From 2c2e07ba04fa05c1a8a94d5ace5963e4596a119c Mon Sep 17 00:00:00 2001 From: Reed Allman Date: Thu, 3 Aug 2017 03:59:19 -0700 Subject: [PATCH] fix bad runner import, remove envconfig & bytefmt --- api/runner/drivers/docker/registry.go | 2 +- glide.lock | 18 +- vendor/code.cloudfoundry.org/bytefmt/LICENSE | 201 ------ vendor/code.cloudfoundry.org/bytefmt/NOTICE | 20 - .../code.cloudfoundry.org/bytefmt/README.md | 15 - vendor/code.cloudfoundry.org/bytefmt/bytes.go | 101 --- .../bytefmt/bytes_test.go | 225 ------ .../bytefmt/formatters_suite_test.go | 13 - .../code.cloudfoundry.org/bytefmt/package.go | 1 - vendor/github.com/iron-io/runner/.gitignore | 4 - vendor/github.com/iron-io/runner/LICENSE.txt | 201 ------ vendor/github.com/iron-io/runner/README.md | 5 - vendor/github.com/iron-io/runner/circle.yml | 33 - .../iron-io/runner/common/backoff.go | 85 --- .../github.com/iron-io/runner/common/clock.go | 23 - .../github.com/iron-io/runner/common/ctx.go | 44 -- .../iron-io/runner/common/environment.go | 38 -- .../iron-io/runner/common/errors.go | 70 -- .../iron-io/runner/common/logging.go | 83 --- .../iron-io/runner/common/stats/aggregator.go | 188 ----- .../runner/common/stats/aggregator_test.go | 95 --- .../iron-io/runner/common/stats/log.go | 45 -- .../iron-io/runner/common/stats/mem.go | 40 -- .../iron-io/runner/common/stats/newrelic.go | 142 ---- .../iron-io/runner/common/stats/riemann.go | 117 ---- .../iron-io/runner/common/stats/stathat.go | 65 -- .../iron-io/runner/common/stats/stats.go | 187 ----- .../iron-io/runner/common/stats/statsd.go | 126 ---- .../iron-io/runner/common/unix_logging.go | 22 - .../iron-io/runner/common/win_logging.go | 12 - .../iron-io/runner/common/writers.go | 204 ------ .../iron-io/runner/common/writers_test.go | 149 ---- .../github.com/iron-io/runner/dind/Dockerfile | 26 - .../github.com/iron-io/runner/dind/README.md | 2 - .../github.com/iron-io/runner/dind/build.sh | 22 - .../iron-io/runner/dind/chaos/Dockerfile | 27 - .../iron-io/runner/dind/chaos/README.md | 1 - .../iron-io/runner/dind/chaos/chaos.sh | 31 - .../iron-io/runner/dind/chaos/entrypoint.sh | 32 - vendor/github.com/iron-io/runner/dind/dind.sh | 49 -- .../iron-io/runner/dind/entrypoint.sh | 24 - .../github.com/iron-io/runner/dind/release.sh | 26 - .../iron-io/runner/drivers/README.md | 1 - .../iron-io/runner/drivers/docker/docker.go | 643 ------------------ .../runner/drivers/docker/docker_client.go | 320 --------- .../runner/drivers/docker/docker_test.go | 133 ---- .../iron-io/runner/drivers/driver.go | 290 -------- .../iron-io/runner/drivers/driver_test.go | 128 ---- .../iron-io/runner/drivers/mock/mocker.go | 64 -- vendor/github.com/iron-io/runner/glide.lock | 104 --- vendor/github.com/iron-io/runner/glide.yaml | 23 - vendor/github.com/iron-io/runner/test.sh | 18 - .../vrischmann/envconfig/.travis.yml | 10 - .../github.com/vrischmann/envconfig/LICENSE | 19 - .../github.com/vrischmann/envconfig/README.md | 151 ---- vendor/github.com/vrischmann/envconfig/doc.go | 199 ------ .../vrischmann/envconfig/envconfig.go | 487 ------------- .../vrischmann/envconfig/envconfig_test.go | 597 ---------------- .../vrischmann/envconfig/example_test.go | 89 --- .../vrischmann/envconfig/keys_test.go | 62 -- .../github.com/vrischmann/envconfig/slice.go | 76 --- .../vrischmann/envconfig/slice_test.go | 47 -- 62 files changed, 6 insertions(+), 6269 deletions(-) delete mode 100644 vendor/code.cloudfoundry.org/bytefmt/LICENSE delete mode 100644 vendor/code.cloudfoundry.org/bytefmt/NOTICE delete mode 100644 vendor/code.cloudfoundry.org/bytefmt/README.md delete mode 100644 vendor/code.cloudfoundry.org/bytefmt/bytes.go delete mode 100644 vendor/code.cloudfoundry.org/bytefmt/bytes_test.go delete mode 100644 vendor/code.cloudfoundry.org/bytefmt/formatters_suite_test.go delete mode 100644 vendor/code.cloudfoundry.org/bytefmt/package.go delete mode 100644 vendor/github.com/iron-io/runner/.gitignore delete mode 100644 vendor/github.com/iron-io/runner/LICENSE.txt delete mode 100644 vendor/github.com/iron-io/runner/README.md delete mode 100644 vendor/github.com/iron-io/runner/circle.yml delete mode 100644 vendor/github.com/iron-io/runner/common/backoff.go delete mode 100644 vendor/github.com/iron-io/runner/common/clock.go delete mode 100644 vendor/github.com/iron-io/runner/common/ctx.go delete mode 100644 vendor/github.com/iron-io/runner/common/environment.go delete mode 100644 vendor/github.com/iron-io/runner/common/errors.go delete mode 100644 vendor/github.com/iron-io/runner/common/logging.go delete mode 100644 vendor/github.com/iron-io/runner/common/stats/aggregator.go delete mode 100644 vendor/github.com/iron-io/runner/common/stats/aggregator_test.go delete mode 100644 vendor/github.com/iron-io/runner/common/stats/log.go delete mode 100644 vendor/github.com/iron-io/runner/common/stats/mem.go delete mode 100644 vendor/github.com/iron-io/runner/common/stats/newrelic.go delete mode 100644 vendor/github.com/iron-io/runner/common/stats/riemann.go delete mode 100644 vendor/github.com/iron-io/runner/common/stats/stathat.go delete mode 100644 vendor/github.com/iron-io/runner/common/stats/stats.go delete mode 100644 vendor/github.com/iron-io/runner/common/stats/statsd.go delete mode 100644 vendor/github.com/iron-io/runner/common/unix_logging.go delete mode 100644 vendor/github.com/iron-io/runner/common/win_logging.go delete mode 100644 vendor/github.com/iron-io/runner/common/writers.go delete mode 100644 vendor/github.com/iron-io/runner/common/writers_test.go delete mode 100644 vendor/github.com/iron-io/runner/dind/Dockerfile delete mode 100644 vendor/github.com/iron-io/runner/dind/README.md delete mode 100755 vendor/github.com/iron-io/runner/dind/build.sh delete mode 100644 vendor/github.com/iron-io/runner/dind/chaos/Dockerfile delete mode 100644 vendor/github.com/iron-io/runner/dind/chaos/README.md delete mode 100755 vendor/github.com/iron-io/runner/dind/chaos/chaos.sh delete mode 100755 vendor/github.com/iron-io/runner/dind/chaos/entrypoint.sh delete mode 100755 vendor/github.com/iron-io/runner/dind/dind.sh delete mode 100755 vendor/github.com/iron-io/runner/dind/entrypoint.sh delete mode 100755 vendor/github.com/iron-io/runner/dind/release.sh delete mode 100644 vendor/github.com/iron-io/runner/drivers/README.md delete mode 100644 vendor/github.com/iron-io/runner/drivers/docker/docker.go delete mode 100644 vendor/github.com/iron-io/runner/drivers/docker/docker_client.go delete mode 100644 vendor/github.com/iron-io/runner/drivers/docker/docker_test.go delete mode 100644 vendor/github.com/iron-io/runner/drivers/driver.go delete mode 100644 vendor/github.com/iron-io/runner/drivers/driver_test.go delete mode 100644 vendor/github.com/iron-io/runner/drivers/mock/mocker.go delete mode 100644 vendor/github.com/iron-io/runner/glide.lock delete mode 100644 vendor/github.com/iron-io/runner/glide.yaml delete mode 100755 vendor/github.com/iron-io/runner/test.sh delete mode 100644 vendor/github.com/vrischmann/envconfig/.travis.yml delete mode 100644 vendor/github.com/vrischmann/envconfig/LICENSE delete mode 100644 vendor/github.com/vrischmann/envconfig/README.md delete mode 100644 vendor/github.com/vrischmann/envconfig/doc.go delete mode 100644 vendor/github.com/vrischmann/envconfig/envconfig.go delete mode 100644 vendor/github.com/vrischmann/envconfig/envconfig_test.go delete mode 100644 vendor/github.com/vrischmann/envconfig/example_test.go delete mode 100644 vendor/github.com/vrischmann/envconfig/keys_test.go delete mode 100644 vendor/github.com/vrischmann/envconfig/slice.go delete mode 100644 vendor/github.com/vrischmann/envconfig/slice_test.go diff --git a/api/runner/drivers/docker/registry.go b/api/runner/drivers/docker/registry.go index 3a12710ae..0320104c0 100644 --- a/api/runner/drivers/docker/registry.go +++ b/api/runner/drivers/docker/registry.go @@ -19,8 +19,8 @@ import ( "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/client/auth/challenge" "github.com/docker/distribution/registry/client/transport" + "github.com/fnproject/fn/api/runner/drivers" docker "github.com/fsouza/go-dockerclient" - "github.com/iron-io/runner/drivers" ) var ( diff --git a/glide.lock b/glide.lock index 1277d35cb..8057485a0 100644 --- a/glide.lock +++ b/glide.lock @@ -1,8 +1,6 @@ -hash: 6a94dc42a1efd079d5d8c5f72edb8de5e2b3cace82b034cfe5f1b0d1c30a982e -updated: 2017-08-03T03:21:07.598979406-07:00 +hash: b555054a6f86ac84f6104ad9efabdcd85966c8f6574e485be7337c3ee9f29aa0 +updated: 2017-08-03T03:58:34.544868553-07:00 imports: -- name: code.cloudfoundry.org/bytefmt - version: f4415fafc5619dd75599a54a7c91fb3948ad58bd - name: github.com/amir/raidman version: 1ccc43bfb9c93cb401a4025e49c64ba71e5e668b subpackages: @@ -208,10 +206,6 @@ imports: - api - config - mq -- name: github.com/iron-io/runner - version: 9fe11b16791641c40a0729de8bac7ba28a507145 - subpackages: - - drivers - name: github.com/jmoiron/jsonq version: e874b168d07ecc7808bc950a17998a8aa3141d82 - name: github.com/jmoiron/sqlx @@ -286,14 +280,14 @@ imports: version: 1f30fe9094a513ce4c700b9a54458bbb0c96996c - name: github.com/Shopify/sarama version: 2fd980e23bdcbb8edeb78fc704de0c39a6567ffc -- name: github.com/sirupsen/logrus - version: ba1b36c82c5e05c4f912a88eab0dcd91a171688f - name: github.com/Sirupsen/logrus version: ba1b36c82c5e05c4f912a88eab0dcd91a171688f repo: https://github.com/sirupsen/logrus.git vcs: git subpackages: - hooks/syslog +- name: github.com/sirupsen/logrus + version: ba1b36c82c5e05c4f912a88eab0dcd91a171688f - name: github.com/spf13/afero version: 9be650865eab0c12963d8753212f4f9c66cdcf12 subpackages: @@ -344,6 +338,4 @@ imports: - internal/scram - name: gopkg.in/yaml.v2 version: 25c4ec802a7d637f88d584ab26798e94ad14c13b -testImports: -- name: github.com/vrischmann/envconfig - version: 757beaaeac8d14bcc7ea3f71488d65cf45cf2eff +testImports: [] diff --git a/vendor/code.cloudfoundry.org/bytefmt/LICENSE b/vendor/code.cloudfoundry.org/bytefmt/LICENSE deleted file mode 100644 index f49a4e16e..000000000 --- a/vendor/code.cloudfoundry.org/bytefmt/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/vendor/code.cloudfoundry.org/bytefmt/NOTICE b/vendor/code.cloudfoundry.org/bytefmt/NOTICE deleted file mode 100644 index 8625a7f41..000000000 --- a/vendor/code.cloudfoundry.org/bytefmt/NOTICE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. - -This project contains software that is Copyright (c) 2013-2015 Pivotal Software, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -This project may include a number of subcomponents with separate -copyright notices and license terms. Your use of these subcomponents -is subject to the terms and conditions of each subcomponent's license, -as noted in the LICENSE file. diff --git a/vendor/code.cloudfoundry.org/bytefmt/README.md b/vendor/code.cloudfoundry.org/bytefmt/README.md deleted file mode 100644 index 44d287d1e..000000000 --- a/vendor/code.cloudfoundry.org/bytefmt/README.md +++ /dev/null @@ -1,15 +0,0 @@ -bytefmt -======= - -**Note**: This repository should be imported as `code.cloudfoundry.org/bytefmt`. - -Human-readable byte formatter. - -Example: - -```go -bytefmt.ByteSize(100.5*bytefmt.MEGABYTE) // returns "100.5M" -bytefmt.ByteSize(uint64(1024)) // returns "1K" -``` - -For documentation, please see http://godoc.org/code.cloudfoundry.org/bytefmt diff --git a/vendor/code.cloudfoundry.org/bytefmt/bytes.go b/vendor/code.cloudfoundry.org/bytefmt/bytes.go deleted file mode 100644 index 73a06e2c8..000000000 --- a/vendor/code.cloudfoundry.org/bytefmt/bytes.go +++ /dev/null @@ -1,101 +0,0 @@ -// Package bytefmt contains helper methods and constants for converting to and from a human-readable byte format. -// -// bytefmt.ByteSize(100.5*bytefmt.MEGABYTE) // "100.5M" -// bytefmt.ByteSize(uint64(1024)) // "1K" -// -package bytefmt - -import ( - "errors" - "fmt" - "regexp" - "strconv" - "strings" -) - -const ( - BYTE = 1.0 - KILOBYTE = 1024 * BYTE - MEGABYTE = 1024 * KILOBYTE - GIGABYTE = 1024 * MEGABYTE - TERABYTE = 1024 * GIGABYTE -) - -var bytesPattern *regexp.Regexp = regexp.MustCompile(`(?i)^(-?\d+(?:\.\d+)?)([KMGT]B?|B)$`) - -var invalidByteQuantityError = errors.New("Byte quantity must be a positive integer with a unit of measurement like M, MB, G, or GB") - -// ByteSize returns a human-readable byte string of the form 10M, 12.5K, and so forth. The following units are available: -// T: Terabyte -// G: Gigabyte -// M: Megabyte -// K: Kilobyte -// B: Byte -// The unit that results in the smallest number greater than or equal to 1 is always chosen. -func ByteSize(bytes uint64) string { - unit := "" - value := float32(bytes) - - switch { - case bytes >= TERABYTE: - unit = "T" - value = value / TERABYTE - case bytes >= GIGABYTE: - unit = "G" - value = value / GIGABYTE - case bytes >= MEGABYTE: - unit = "M" - value = value / MEGABYTE - case bytes >= KILOBYTE: - unit = "K" - value = value / KILOBYTE - case bytes >= BYTE: - unit = "B" - case bytes == 0: - return "0" - } - - stringValue := fmt.Sprintf("%.1f", value) - stringValue = strings.TrimSuffix(stringValue, ".0") - return fmt.Sprintf("%s%s", stringValue, unit) -} - -// ToMegabytes parses a string formatted by ByteSize as megabytes. -func ToMegabytes(s string) (uint64, error) { - bytes, err := ToBytes(s) - if err != nil { - return 0, err - } - - return bytes / MEGABYTE, nil -} - -// ToBytes parses a string formatted by ByteSize as bytes. -func ToBytes(s string) (uint64, error) { - parts := bytesPattern.FindStringSubmatch(strings.TrimSpace(s)) - if len(parts) < 3 { - return 0, invalidByteQuantityError - } - - value, err := strconv.ParseFloat(parts[1], 64) - if err != nil || value <= 0 { - return 0, invalidByteQuantityError - } - - var bytes uint64 - unit := strings.ToUpper(parts[2]) - switch unit[:1] { - case "T": - bytes = uint64(value * TERABYTE) - case "G": - bytes = uint64(value * GIGABYTE) - case "M": - bytes = uint64(value * MEGABYTE) - case "K": - bytes = uint64(value * KILOBYTE) - case "B": - bytes = uint64(value * BYTE) - } - - return bytes, nil -} diff --git a/vendor/code.cloudfoundry.org/bytefmt/bytes_test.go b/vendor/code.cloudfoundry.org/bytefmt/bytes_test.go deleted file mode 100644 index 6bcf31424..000000000 --- a/vendor/code.cloudfoundry.org/bytefmt/bytes_test.go +++ /dev/null @@ -1,225 +0,0 @@ -package bytefmt_test - -import ( - . "code.cloudfoundry.org/bytefmt" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("bytefmt", func() { - - Context("ByteSize", func() { - It("Prints in the largest possible unit", func() { - Expect(ByteSize(10 * TERABYTE)).To(Equal("10T")) - Expect(ByteSize(uint64(10.5 * TERABYTE))).To(Equal("10.5T")) - - Expect(ByteSize(10 * GIGABYTE)).To(Equal("10G")) - Expect(ByteSize(uint64(10.5 * GIGABYTE))).To(Equal("10.5G")) - - Expect(ByteSize(100 * MEGABYTE)).To(Equal("100M")) - Expect(ByteSize(uint64(100.5 * MEGABYTE))).To(Equal("100.5M")) - - Expect(ByteSize(100 * KILOBYTE)).To(Equal("100K")) - Expect(ByteSize(uint64(100.5 * KILOBYTE))).To(Equal("100.5K")) - - Expect(ByteSize(1)).To(Equal("1B")) - }) - - It("prints '0' for zero bytes", func() { - Expect(ByteSize(0)).To(Equal("0")) - }) - }) - - Context("ToMegabytes", func() { - It("parses byte amounts with short units (e.g. M, G)", func() { - var ( - megabytes uint64 - err error - ) - - megabytes, err = ToMegabytes("5B") - Expect(megabytes).To(Equal(uint64(0))) - Expect(err).NotTo(HaveOccurred()) - - megabytes, err = ToMegabytes("5K") - Expect(megabytes).To(Equal(uint64(0))) - Expect(err).NotTo(HaveOccurred()) - - megabytes, err = ToMegabytes("5M") - Expect(megabytes).To(Equal(uint64(5))) - Expect(err).NotTo(HaveOccurred()) - - megabytes, err = ToMegabytes("5m") - Expect(megabytes).To(Equal(uint64(5))) - Expect(err).NotTo(HaveOccurred()) - - megabytes, err = ToMegabytes("2G") - Expect(megabytes).To(Equal(uint64(2 * 1024))) - Expect(err).NotTo(HaveOccurred()) - - megabytes, err = ToMegabytes("3T") - Expect(megabytes).To(Equal(uint64(3 * 1024 * 1024))) - Expect(err).NotTo(HaveOccurred()) - }) - - It("parses byte amounts with long units (e.g MB, GB)", func() { - var ( - megabytes uint64 - err error - ) - - megabytes, err = ToMegabytes("5MB") - Expect(megabytes).To(Equal(uint64(5))) - Expect(err).NotTo(HaveOccurred()) - - megabytes, err = ToMegabytes("5mb") - Expect(megabytes).To(Equal(uint64(5))) - Expect(err).NotTo(HaveOccurred()) - - megabytes, err = ToMegabytes("2GB") - Expect(megabytes).To(Equal(uint64(2 * 1024))) - Expect(err).NotTo(HaveOccurred()) - - megabytes, err = ToMegabytes("3TB") - Expect(megabytes).To(Equal(uint64(3 * 1024 * 1024))) - Expect(err).NotTo(HaveOccurred()) - }) - - It("returns an error when the unit is missing", func() { - _, err := ToMegabytes("5") - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("unit of measurement")) - }) - - It("returns an error when the unit is unrecognized", func() { - _, err := ToMegabytes("5MBB") - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("unit of measurement")) - - _, err = ToMegabytes("5BB") - Expect(err).To(HaveOccurred()) - }) - - It("allows whitespace before and after the value", func() { - megabytes, err := ToMegabytes("\t\n\r 5MB ") - Expect(megabytes).To(Equal(uint64(5))) - Expect(err).NotTo(HaveOccurred()) - }) - - It("returns an error for negative values", func() { - _, err := ToMegabytes("-5MB") - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("unit of measurement")) - }) - - It("returns an error for zero values", func() { - _, err := ToMegabytes("0TB") - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("unit of measurement")) - }) - }) - - Context("ToBytes", func() { - It("parses byte amounts with short units (e.g. M, G)", func() { - var ( - bytes uint64 - err error - ) - - bytes, err = ToBytes("5B") - Expect(bytes).To(Equal(uint64(5))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("5K") - Expect(bytes).To(Equal(uint64(5 * KILOBYTE))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("5M") - Expect(bytes).To(Equal(uint64(5 * MEGABYTE))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("5m") - Expect(bytes).To(Equal(uint64(5 * MEGABYTE))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("2G") - Expect(bytes).To(Equal(uint64(2 * GIGABYTE))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("3T") - Expect(bytes).To(Equal(uint64(3 * TERABYTE))) - Expect(err).NotTo(HaveOccurred()) - }) - - It("parses byte amounts that are float (e.g. 5.3KB)", func() { - var ( - bytes uint64 - err error - ) - - bytes, err = ToBytes("13.5KB") - Expect(bytes).To(Equal(uint64(13824))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("4.5KB") - Expect(bytes).To(Equal(uint64(4608))) - Expect(err).NotTo(HaveOccurred()) - }) - - It("parses byte amounts with long units (e.g MB, GB)", func() { - var ( - bytes uint64 - err error - ) - - bytes, err = ToBytes("5MB") - Expect(bytes).To(Equal(uint64(5 * MEGABYTE))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("5mb") - Expect(bytes).To(Equal(uint64(5 * MEGABYTE))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("2GB") - Expect(bytes).To(Equal(uint64(2 * GIGABYTE))) - Expect(err).NotTo(HaveOccurred()) - - bytes, err = ToBytes("3TB") - Expect(bytes).To(Equal(uint64(3 * TERABYTE))) - Expect(err).NotTo(HaveOccurred()) - }) - - It("returns an error when the unit is missing", func() { - _, err := ToBytes("5") - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("unit of measurement")) - }) - - It("returns an error when the unit is unrecognized", func() { - _, err := ToBytes("5MBB") - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("unit of measurement")) - - _, err = ToBytes("5BB") - Expect(err).To(HaveOccurred()) - }) - - It("allows whitespace before and after the value", func() { - bytes, err := ToBytes("\t\n\r 5MB ") - Expect(bytes).To(Equal(uint64(5 * MEGABYTE))) - Expect(err).NotTo(HaveOccurred()) - }) - - It("returns an error for negative values", func() { - _, err := ToBytes("-5MB") - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("unit of measurement")) - }) - - It("returns an error for zero values", func() { - _, err := ToBytes("0TB") - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("unit of measurement")) - }) - }) -}) diff --git a/vendor/code.cloudfoundry.org/bytefmt/formatters_suite_test.go b/vendor/code.cloudfoundry.org/bytefmt/formatters_suite_test.go deleted file mode 100644 index 64af7ca46..000000000 --- a/vendor/code.cloudfoundry.org/bytefmt/formatters_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package bytefmt_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "testing" -) - -func TestFormatters(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Bytefmt Suite") -} diff --git a/vendor/code.cloudfoundry.org/bytefmt/package.go b/vendor/code.cloudfoundry.org/bytefmt/package.go deleted file mode 100644 index 03429300b..000000000 --- a/vendor/code.cloudfoundry.org/bytefmt/package.go +++ /dev/null @@ -1 +0,0 @@ -package bytefmt // import "code.cloudfoundry.org/bytefmt" diff --git a/vendor/github.com/iron-io/runner/.gitignore b/vendor/github.com/iron-io/runner/.gitignore deleted file mode 100644 index 0c6c03cd5..000000000 --- a/vendor/github.com/iron-io/runner/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -config.json -/runner -vendor/ -.idea/ \ No newline at end of file diff --git a/vendor/github.com/iron-io/runner/LICENSE.txt b/vendor/github.com/iron-io/runner/LICENSE.txt deleted file mode 100644 index 7020bcf1a..000000000 --- a/vendor/github.com/iron-io/runner/LICENSE.txt +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016 Iron.io - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/iron-io/runner/README.md b/vendor/github.com/iron-io/runner/README.md deleted file mode 100644 index cb1603b62..000000000 --- a/vendor/github.com/iron-io/runner/README.md +++ /dev/null @@ -1,5 +0,0 @@ -When vendoring this library, make sure to strip vendors: - -``` -glide install --strip-vendor -``` \ No newline at end of file diff --git a/vendor/github.com/iron-io/runner/circle.yml b/vendor/github.com/iron-io/runner/circle.yml deleted file mode 100644 index 95b5ce031..000000000 --- a/vendor/github.com/iron-io/runner/circle.yml +++ /dev/null @@ -1,33 +0,0 @@ -machine: - environment: - CHECKOUT_DIR: $HOME/$CIRCLE_PROJECT_REPONAME - GOPATH: $HOME/go - GOROOT: $HOME/golang/go - PATH: $GOROOT/bin:$PATH - GH_IRON: $GOPATH/src/github.com/iron-io - GO_PROJECT: ../go/src/github.com/iron-io/$CIRCLE_PROJECT_REPONAME - services: - - docker - -checkout: - post: - - mkdir -p "$GH_IRON" - - cp -R "$CHECKOUT_DIR" "$GH_IRON/$CIRCLE_PROJECT_REPONAME" - -dependencies: - pre: - - wget https://storage.googleapis.com/golang/go1.7.linux-amd64.tar.gz - - mkdir -p $HOME/golang - - tar -C $HOME/golang -xvzf go1.7.linux-amd64.tar.gz - - wget https://github.com/Masterminds/glide/releases/download/v0.12.3/glide-v0.12.3-linux-amd64.tar.gz - - tar -C $HOME/bin -xvzf glide-v0.12.3-linux-amd64.tar.gz --strip=1 - override: - - which go && go version - - glide --version - - glide install: - pwd: $GO_PROJECT - -test: - override: - - ./test.sh go: - pwd: $GO_PROJECT \ No newline at end of file diff --git a/vendor/github.com/iron-io/runner/common/backoff.go b/vendor/github.com/iron-io/runner/common/backoff.go deleted file mode 100644 index 226b0c611..000000000 --- a/vendor/github.com/iron-io/runner/common/backoff.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "math" - "math/rand" - "sync" - "time" -) - -type BoxTime struct{} - -func (BoxTime) Now() time.Time { return time.Now() } -func (BoxTime) Sleep(d time.Duration) { time.Sleep(d) } -func (BoxTime) After(d time.Duration) <-chan time.Time { return time.After(d) } - -type Backoff int - -func (b *Backoff) Sleep() { b.RandomSleep(nil, nil) } - -func (b *Backoff) RandomSleep(rng *rand.Rand, clock Clock) { - const ( - maxexp = 7 - interval = 25 * time.Millisecond - ) - - if rng == nil { - rng = defaultRNG - } - if clock == nil { - clock = defaultClock - } - - // 25-50ms, 50-100ms, 100-200ms, 200-400ms, 400-800ms, 800-1600ms, 1600-3200ms, 3200-6400ms - d := time.Duration(math.Pow(2, float64(*b))) * interval - d += (d * time.Duration(rng.Float64())) - - clock.Sleep(d) - - if *b < maxexp { - (*b)++ - } -} - -var ( - defaultRNG = NewRNG(time.Now().UnixNano()) - defaultClock = BoxTime{} -) - -func NewRNG(seed int64) *rand.Rand { - return rand.New(&lockedSource{src: rand.NewSource(seed)}) -} - -// taken from go1.5.1 math/rand/rand.go +233-250 -// bla bla if it puts a hole in the earth don't sue them -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} diff --git a/vendor/github.com/iron-io/runner/common/clock.go b/vendor/github.com/iron-io/runner/common/clock.go deleted file mode 100644 index 1c91cf2f0..000000000 --- a/vendor/github.com/iron-io/runner/common/clock.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import "time" - -type Clock interface { - Now() time.Time - Sleep(time.Duration) - After(time.Duration) <-chan time.Time -} diff --git a/vendor/github.com/iron-io/runner/common/ctx.go b/vendor/github.com/iron-io/runner/common/ctx.go deleted file mode 100644 index 19e2cb1c1..000000000 --- a/vendor/github.com/iron-io/runner/common/ctx.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "context" - - "github.com/Sirupsen/logrus" -) - -// WithLogger stores the logger. -func WithLogger(ctx context.Context, l logrus.FieldLogger) context.Context { - return context.WithValue(ctx, "logger", l) -} - -// Logger returns the structured logger. -func Logger(ctx context.Context) logrus.FieldLogger { - l, ok := ctx.Value("logger").(logrus.FieldLogger) - if !ok { - return logrus.StandardLogger() - } - return l -} - -// Attempt at simplifying this whole logger in the context thing -// Could even make this take a generic map, then the logger that gets returned could be used just like the stdlib too, since it's compatible -func LoggerWithFields(ctx context.Context, fields logrus.Fields) (context.Context, logrus.FieldLogger) { - l := Logger(ctx) - l = l.WithFields(fields) - ctx = WithLogger(ctx, l) - return ctx, l -} diff --git a/vendor/github.com/iron-io/runner/common/environment.go b/vendor/github.com/iron-io/runner/common/environment.go deleted file mode 100644 index ca10d0d60..000000000 --- a/vendor/github.com/iron-io/runner/common/environment.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "github.com/iron-io/runner/common/stats" -) - -// An Environment is a long lived object that carries around 'configuration' -// for the program. Other long-lived objects may embed an environment directly -// into their definition. Environments wrap common functionality like logging -// and metrics. For short-lived request-response like tasks use `Context`, -// which wraps an Environment. - -type Environment struct { - stats.Statter -} - -// Initializers are functions that may set up the environment as they like. By default the environment is 'inactive' in the sense that metrics aren't reported. -func NewEnvironment(initializers ...func(e *Environment)) *Environment { - env := &Environment{&stats.NilStatter{}} - for _, init := range initializers { - init(env) - } - return env -} diff --git a/vendor/github.com/iron-io/runner/common/errors.go b/vendor/github.com/iron-io/runner/common/errors.go deleted file mode 100644 index adcf73975..000000000 --- a/vendor/github.com/iron-io/runner/common/errors.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "io" - "net" - "syscall" -) - -// Errors that can be directly exposed to task creators/users. -type UserVisibleError interface { - UserVisible() bool -} - -func IsUserVisibleError(err error) bool { - ue, ok := err.(UserVisibleError) - return ok && ue.UserVisible() -} - -type userVisibleError struct { - error -} - -func (u *userVisibleError) UserVisible() bool { return true } - -func UserError(err error) error { - return &userVisibleError{err} -} - -type Temporary interface { - Temporary() bool -} - -func IsTemporary(err error) bool { - v, ok := err.(Temporary) - return (ok && v.Temporary()) || isNet(err) -} - -func isNet(err error) bool { - if _, ok := err.(net.Error); ok { - return true - } - - switch err := err.(type) { - case *net.OpError: - return true - case syscall.Errno: - if err == syscall.ECONNREFUSED { // linux only? maybe ok for prod - return true // connection refused - } - default: - if err == io.ErrUnexpectedEOF || err == io.EOF { - return true - } - } - return false -} diff --git a/vendor/github.com/iron-io/runner/common/logging.go b/vendor/github.com/iron-io/runner/common/logging.go deleted file mode 100644 index 3391fc8fa..000000000 --- a/vendor/github.com/iron-io/runner/common/logging.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "net/url" - "os" - - "github.com/Sirupsen/logrus" -) - -func SetLogLevel(ll string) { - if ll == "" { - ll = "info" - } - logrus.WithFields(logrus.Fields{"level": ll}).Info("Setting log level to") - logLevel, err := logrus.ParseLevel(ll) - if err != nil { - logrus.WithFields(logrus.Fields{"level": ll}).Warn("Could not parse log level, setting to INFO") - logLevel = logrus.InfoLevel - } - logrus.SetLevel(logLevel) -} - -func SetLogDest(to, prefix string) { - logrus.SetOutput(os.Stderr) // in case logrus changes their mind... - if to == "stderr" { - return - } - - // possible schemes: { udp, tcp, file } - // file url must contain only a path, syslog must contain only a host[:port] - // expect: [scheme://][host][:port][/path] - // default scheme to udp:// if none given - - url, err := url.Parse(to) - if url.Host == "" && url.Path == "" { - logrus.WithFields(logrus.Fields{"to": to}).Warn("No scheme on logging url, adding udp://") - // this happens when no scheme like udp:// is present - to = "udp://" + to - url, err = url.Parse(to) - } - if err != nil { - logrus.WithError(err).WithFields(logrus.Fields{"to": to}).Error("could not parse logging URI, defaulting to stderr") - return - } - - // File URL must contain only `url.Path`. Syslog location must contain only `url.Host` - if (url.Host == "" && url.Path == "") || (url.Host != "" && url.Path != "") { - logrus.WithFields(logrus.Fields{"to": to, "uri": url}).Error("invalid logging location, defaulting to stderr") - return - } - - switch url.Scheme { - case "udp", "tcp": - err = NewSyslogHook(url, prefix) - if err != nil { - logrus.WithFields(logrus.Fields{"uri": url, "to": to}).WithError(err).Error("unable to connect to syslog, defaulting to stderr") - return - } - case "file": - f, err := os.OpenFile(url.Path, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0666) - if err != nil { - logrus.WithError(err).WithFields(logrus.Fields{"to": to, "path": url.Path}).Error("cannot open file, defaulting to stderr") - return - } - logrus.SetOutput(f) - default: - logrus.WithFields(logrus.Fields{"scheme": url.Scheme, "to": to}).Error("unknown logging location scheme, defaulting to stderr") - } -} diff --git a/vendor/github.com/iron-io/runner/common/stats/aggregator.go b/vendor/github.com/iron-io/runner/common/stats/aggregator.go deleted file mode 100644 index fa5e4bc30..000000000 --- a/vendor/github.com/iron-io/runner/common/stats/aggregator.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stats - -import ( - "sync" - "time" -) - -type reporter interface { - report([]*collectedStat) -} - -type collectedStat struct { - Name string - Counters map[string]int64 - Values map[string]float64 - Gauges map[string]int64 - Timers map[string]time.Duration - - avgCounts map[string]uint64 -} - -func newCollectedStatUnescaped(name string) *collectedStat { - return &collectedStat{ - Name: name, - Counters: map[string]int64{}, - Values: map[string]float64{}, - Gauges: map[string]int64{}, - Timers: map[string]time.Duration{}, - avgCounts: map[string]uint64{}, - } -} - -// What do you call an alligator in a vest? - -// Aggregator collects a stats and merges them together if they've been added -// previously. Useful for reporters that have low throughput ie stathat. -type Aggregator struct { - // Holds all of our stats based on stat.Name - sl sync.RWMutex - stats map[string]*statHolder - - reporters []reporter -} - -func newAggregator(reporters []reporter) *Aggregator { - return &Aggregator{ - stats: make(map[string]*statHolder), - reporters: reporters, - } -} - -type statHolder struct { - cl sync.RWMutex // Lock on Counters - vl sync.RWMutex // Lock on Values - s *collectedStat -} - -func newStatHolder(st *collectedStat) *statHolder { - return &statHolder{s: st} -} - -type kind int16 - -const ( - counterKind kind = iota - valueKind - gaugeKind - durationKind -) - -func (a *Aggregator) add(component, key string, kind kind, value interface{}) { - a.sl.RLock() - stat, ok := a.stats[component] - a.sl.RUnlock() - if !ok { - a.sl.Lock() - stat, ok = a.stats[component] - if !ok { - stat = newStatHolder(newCollectedStatUnescaped(component)) - a.stats[component] = stat - } - a.sl.Unlock() - } - - if kind == counterKind || kind == gaugeKind { - var mapPtr map[string]int64 - if kind == counterKind { - mapPtr = stat.s.Counters - } else { - mapPtr = stat.s.Gauges - } - value := value.(int64) - stat.cl.Lock() - mapPtr[key] += value - stat.cl.Unlock() - } - - /* TODO: this ends up ignoring tags so yeah gg - / lets just calculate a running average for now. Can do percentiles later - / Recalculated Average - / - / currentAverage * currentCount + newValue - / ------------------------------------------ - / (currentCount +1) - / - */ - if kind == valueKind || kind == durationKind { - var typedValue int64 - if kind == valueKind { - typedValue = value.(int64) - } else { - typedValue = int64(value.(time.Duration)) - } - - stat.vl.Lock() - switch kind { - case valueKind: - oldAverage := stat.s.Values[key] - count := stat.s.avgCounts[key] - newAverage := (oldAverage*float64(count) + float64(typedValue)) / (float64(count + 1)) - stat.s.avgCounts[key] = count + 1 - stat.s.Values[key] = newAverage - case durationKind: - oldAverage := float64(stat.s.Timers[key]) - count := stat.s.avgCounts[key] - newAverage := (oldAverage*float64(count) + float64(typedValue)) / (float64(count + 1)) - stat.s.avgCounts[key] = count + 1 - stat.s.Timers[key] = time.Duration(newAverage) - } - stat.vl.Unlock() - } -} - -func (a *Aggregator) dump() []*collectedStat { - a.sl.Lock() - bucket := a.stats - // Clear out the maps, effectively resetting our average - a.stats = make(map[string]*statHolder) - a.sl.Unlock() - - stats := make([]*collectedStat, 0, len(bucket)) - for _, v := range bucket { - stats = append(stats, v.s) - } - return stats -} - -func (a *Aggregator) report(st []*collectedStat) { - stats := a.dump() - stats = append(stats, st...) - for _, r := range a.reporters { - r.report(stats) - } -} - -func (r *Aggregator) Inc(component string, stat string, value int64, rate float32) { - r.add(component, stat, counterKind, value) -} - -func (r *Aggregator) Gauge(component string, stat string, value int64, rate float32) { - r.add(component, stat, gaugeKind, value) -} - -func (r *Aggregator) Measure(component string, stat string, value int64, rate float32) { - r.add(component, stat, valueKind, value) -} - -func (r *Aggregator) Time(component string, stat string, value time.Duration, rate float32) { - r.add(component, stat, durationKind, value) -} - -func (r *Aggregator) NewTimer(component string, stat string, rate float32) *Timer { - return newTimer(r, component, stat, rate) -} diff --git a/vendor/github.com/iron-io/runner/common/stats/aggregator_test.go b/vendor/github.com/iron-io/runner/common/stats/aggregator_test.go deleted file mode 100644 index b2af47714..000000000 --- a/vendor/github.com/iron-io/runner/common/stats/aggregator_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stats - -import ( - "fmt" - "math/rand" - "testing" -) - -func TestAggregator(t *testing.T) { - ag := newAggregator([]reporter{}) - var sum int64 = 0 - var times int64 = 0 - for i := 0; i < 100; i++ { - ag.add("mq push", "messages", counterKind, int64(1)) - ag.add("mq push", "latency", valueKind, int64(i)) - ag.add("mq pull", "latency", valueKind, int64(i)) - sum += int64(i) - times += 1 - } - - for _, stat := range ag.dump() { - for k, v := range stat.Values { - if v != float64(sum)/float64(times) { - t.Error("key:", k, "Expected", sum/times, "got", v) - } - } - - for k, v := range stat.Counters { - if v != times { - t.Error("key:", k, "Expected", times, "got", v) - } - } - } - if len(ag.stats) != 0 { - t.Error("expected stats map to be clear, got", len(ag.stats)) - } -} - -type testStat struct { - component string - key string - kind kind - value int64 -} - -func BenchmarkAggregatorAdd(b *testing.B) { - ag := &Aggregator{ - stats: make(map[string]*statHolder, 1000), - } - - s := createStatList(1000) - - sl := len(s) - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - e := s[rand.Intn(sl)] - ag.add(e.component, e.key, e.kind, e.value) - } - }) -} - -func createStatList(n int) []*testStat { - var stats []*testStat - for i := 0; i < n; i++ { - st := testStat{ - component: "aggregator_test", - key: fmt.Sprintf("latency.%d", i), - kind: counterKind, - value: 1, - } - - if rand.Float32() < 0.5 { - st.key = fmt.Sprintf("test.%d", i) - st.kind = valueKind - st.value = 15999 - } - stats = append(stats, &st) - } - return stats -} diff --git a/vendor/github.com/iron-io/runner/common/stats/log.go b/vendor/github.com/iron-io/runner/common/stats/log.go deleted file mode 100644 index c7114206e..000000000 --- a/vendor/github.com/iron-io/runner/common/stats/log.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stats - -import ( - "time" - - "github.com/Sirupsen/logrus" -) - -type LogReporter struct { -} - -func NewLogReporter() *LogReporter { - return (&LogReporter{}) -} - -func (lr *LogReporter) report(stats []*collectedStat) { - for _, s := range stats { - f := make(logrus.Fields) - for k, v := range s.Counters { - f[k] = v - } - for k, v := range s.Values { - f[k] = v - } - for k, v := range s.Timers { - f[k] = time.Duration(v) - } - - logrus.WithFields(f).Info(s.Name) - } -} diff --git a/vendor/github.com/iron-io/runner/common/stats/mem.go b/vendor/github.com/iron-io/runner/common/stats/mem.go deleted file mode 100644 index 69ad09b98..000000000 --- a/vendor/github.com/iron-io/runner/common/stats/mem.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stats - -import ( - "runtime" - "time" -) - -func StartReportingMemoryAndGC(reporter Statter, d time.Duration) { - ticker := time.Tick(d) - for { - select { - case <-ticker: - var ms runtime.MemStats - runtime.ReadMemStats(&ms) - - prefix := "runtime" - - reporter.Measure(prefix, "allocated", int64(ms.Alloc), 1.0) - reporter.Measure(prefix, "allocated.heap", int64(ms.HeapAlloc), 1.0) - reporter.Time(prefix, "gc.pause", time.Duration(ms.PauseNs[(ms.NumGC+255)%256]), 1.0) - - // GC CPU percentage. - reporter.Measure(prefix, "gc.cpufraction", int64(ms.GCCPUFraction*100), 1.0) - } - } -} diff --git a/vendor/github.com/iron-io/runner/common/stats/newrelic.go b/vendor/github.com/iron-io/runner/common/stats/newrelic.go deleted file mode 100644 index 7f0110074..000000000 --- a/vendor/github.com/iron-io/runner/common/stats/newrelic.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stats - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "os" - "strings" - - "github.com/Sirupsen/logrus" -) - -type NewRelicAgentConfig struct { - Host string `json:"host"` - Version string `json:"version"` - Pid int `json:"pid"` -} - -// examples: https://docs.newrelic.com/docs/plugins/plugin-developer-resources/developer-reference/metric-data-plugin-api#examples -type newRelicRequest struct { - Agent *agent `json:"agent"` - Components []*component `json:"components"` -} - -type NewRelicReporterConfig struct { - Agent *NewRelicAgentConfig - LicenseKey string `json:"license_key"` -} - -type NewRelicReporter struct { - Agent *agent - LicenseKey string -} - -func NewNewRelicReporter(version string, licenseKey string) *NewRelicReporter { - r := &NewRelicReporter{} - r.Agent = newNewRelicAgent(version) - r.LicenseKey = licenseKey - return r -} - -func (r *NewRelicReporter) report(stats []*collectedStat) { - client := &http.Client{} - req := &newRelicRequest{} - req.Agent = r.Agent - comp := newComponent() - comp.Name = "IronMQ" - comp.Duration = 60 - comp.GUID = "io.iron.ironmq" - // TODO - NR has a fixed 3 level heirarchy? and we just use 2? - req.Components = []*component{comp} - - // now add metrics - for _, s := range stats { - for k, v := range s.Counters { - comp.Metrics[fmt.Sprintf("Component/%s %s", s.Name, k)] = v - } - for k, v := range s.Values { - comp.Metrics[fmt.Sprintf("Component/%s %s", s.Name, k)] = int64(v) - } - for k, v := range s.Timers { - comp.Metrics[fmt.Sprintf("Component/%s %s", s.Name, k)] = int64(v) - } - } - - metricsJson, err := json.Marshal(req) - if err != nil { - logrus.WithError(err).Error("error encoding json for NewRelicReporter") - } - - jsonAsString := string(metricsJson) - - httpRequest, err := http.NewRequest("POST", - "https://platform-api.newrelic.com/platform/v1/metrics", - strings.NewReader(jsonAsString)) - if err != nil { - logrus.WithError(err).Error("error creating New Relic request") - return - } - httpRequest.Header.Set("X-License-Key", r.LicenseKey) - httpRequest.Header.Set("Content-Type", "application/json") - httpRequest.Header.Set("Accept", "application/json") - httpResponse, err := client.Do(httpRequest) - if err != nil { - logrus.WithError(err).Error("error sending http request in NewRelicReporter") - return - } - defer httpResponse.Body.Close() - body, err := ioutil.ReadAll(httpResponse.Body) - if err != nil { - logrus.WithError(err).Error("error reading response body") - } else { - logrus.Debugln("response", "code", httpResponse.Status, "body", string(body)) - } -} - -type agent struct { - Host string `json:"host"` - Version string `json:"version"` - Pid int `json:"pid"` -} - -func newNewRelicAgent(Version string) *agent { - var err error - agent := &agent{ - Version: Version, - } - agent.Pid = os.Getpid() - if agent.Host, err = os.Hostname(); err != nil { - logrus.WithError(err).Error("Can not get hostname") - return nil - } - return agent -} - -type component struct { - Name string `json:"name"` - GUID string `json:"guid"` - Duration int `json:"duration"` - Metrics map[string]int64 `json:"metrics"` -} - -func newComponent() *component { - c := &component{} - c.Metrics = make(map[string]int64) - return c -} diff --git a/vendor/github.com/iron-io/runner/common/stats/riemann.go b/vendor/github.com/iron-io/runner/common/stats/riemann.go deleted file mode 100644 index 154c3d72a..000000000 --- a/vendor/github.com/iron-io/runner/common/stats/riemann.go +++ /dev/null @@ -1,117 +0,0 @@ -// +build riemann - -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stats - -import ( - "os" - "time" - - "github.com/Sirupsen/logrus" - "github.com/amir/raidman" -) - -type RiemannClient struct { - client *raidman.Client - attributes map[string]string -} - -const ( - StateNormal = "normal" -) - -func (rc *RiemannClient) Report([]*Stat) {} - -func (rc *RiemannClient) Add(s *Stat) { - var events []*raidman.Event - - t := time.Now().UnixNano() - - for k, v := range rc.attributes { - s.Tags[k] = v - } - - for k, v := range s.Counters { - events = append(events, &raidman.Event{ - Ttl: 5.0, - Time: t, - State: StateNormal, - Service: s.Name + " " + k, - Metric: v, - Attributes: s.Tags, - }) - } - - for k, v := range s.Values { - events = append(events, &raidman.Event{ - Ttl: 5.0, - Time: t, - State: StateNormal, - Service: s.Name + " " + k, - Metric: v, - Attributes: s.Tags, - }) - } - - rc.report(events) -} - -func (rc *RiemannClient) report(events []*raidman.Event) { - err := rc.client.SendMulti(events) - if err != nil { - logrus.WithError(err).Error("error sending to Riemann") - } -} - -func (rc *RiemannClient) heartbeat() { - events := []*raidman.Event{ - &raidman.Event{ - Ttl: 5.0, - Time: time.Now().UnixNano(), - State: StateNormal, - Service: "heartbeat", - Metric: 1.0, - Attributes: rc.attributes, - }, - } - rc.report(events) -} - -func newRiemann(config Config) *RiemannClient { - c, err := raidman.Dial("tcp", config.Riemann.RiemannHost) - if err != nil { - logrus.WithError(err).Error("error dialing Riemann") - os.Exit(1) - } - - client := &RiemannClient{ - client: c, - attributes: map[string]string{}, - } - - for k, v := range config.Tags { - client.attributes[k] = v - } - - // Send out a heartbeat every second - go func(rc *RiemannClient) { - for _ = range time.Tick(1 * time.Second) { - rc.heartbeat() - } - }(client) - - return client -} diff --git a/vendor/github.com/iron-io/runner/common/stats/stathat.go b/vendor/github.com/iron-io/runner/common/stats/stathat.go deleted file mode 100644 index 1947a3912..000000000 --- a/vendor/github.com/iron-io/runner/common/stats/stathat.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stats - -import ( - "net/http" - "net/url" - "strconv" - - "github.com/Sirupsen/logrus" -) - -func postStatHat(key, stat string, values url.Values) { - values.Set("stat", stat) - values.Set("ezkey", key) - resp, err := http.PostForm("http://api.stathat.com/ez", values) - if err != nil { - logrus.WithError(err).Error("couldn't post to StatHat") - return - } - if resp.StatusCode != 200 { - logrus.Errorln("bad status posting to StatHat", "status_code", resp.StatusCode) - } - resp.Body.Close() -} - -type StatHatReporterConfig struct { - Email string - Prefix string -} - -func (shr *StatHatReporterConfig) report(stats []*collectedStat) { - for _, s := range stats { - for k, v := range s.Counters { - n := shr.Prefix + " " + s.Name + " " + k - values := url.Values{} - values.Set("count", strconv.FormatInt(v, 10)) - postStatHat(shr.Email, n, values) - } - for k, v := range s.Values { - n := shr.Prefix + " " + s.Name + " " + k - values := url.Values{} - values.Set("value", strconv.FormatFloat(v, 'f', 3, 64)) - postStatHat(shr.Email, n, values) - } - for k, v := range s.Timers { - n := shr.Prefix + " " + s.Name + " " + k - values := url.Values{} - values.Set("value", strconv.FormatInt(int64(v), 10)) - postStatHat(shr.Email, n, values) - } - } -} diff --git a/vendor/github.com/iron-io/runner/common/stats/stats.go b/vendor/github.com/iron-io/runner/common/stats/stats.go deleted file mode 100644 index babb1f170..000000000 --- a/vendor/github.com/iron-io/runner/common/stats/stats.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stats - -import ( - "encoding/json" - "errors" - "net/http" - "strings" - "time" - - "github.com/Sirupsen/logrus" -) - -type HTTPSubHandler interface { - HTTPHandler(relativeUrl []string, w http.ResponseWriter, r *http.Request) -} - -type Config struct { - Interval float64 `json:"interval" envconfig:"STATS_INTERVAL"` // seconds - History int // minutes - - Log string `json:"log" envconfig:"STATS_LOG"` - StatHat *StatHatReporterConfig - NewRelic *NewRelicReporterConfig - Statsd *StatsdConfig - GCStats int `json:"gc_stats" envconfig:"GC_STATS"` // seconds -} - -type Statter interface { - Inc(component string, stat string, value int64, rate float32) - Gauge(component string, stat string, value int64, rate float32) - Measure(component string, stat string, value int64, rate float32) - Time(component string, stat string, value time.Duration, rate float32) - NewTimer(component string, stat string, rate float32) *Timer -} - -type MultiStatter struct { - statters []Statter -} - -func (s *MultiStatter) Inc(component string, stat string, value int64, rate float32) { - for _, st := range s.statters { - st.Inc(component, stat, value, rate) - } -} - -func (s *MultiStatter) Gauge(component string, stat string, value int64, rate float32) { - for _, st := range s.statters { - st.Gauge(component, stat, value, rate) - } -} - -func (s *MultiStatter) Measure(component string, stat string, value int64, rate float32) { - for _, st := range s.statters { - st.Measure(component, stat, value, rate) - } -} - -func (s *MultiStatter) Time(component string, stat string, value time.Duration, rate float32) { - for _, st := range s.statters { - st.Time(component, stat, value, rate) - } -} - -func (s *MultiStatter) NewTimer(component string, stat string, rate float32) *Timer { - return newTimer(s, component, stat, rate) -} - -var badDecode error = errors.New("bad stats decode") - -func New(config Config) Statter { - s := new(MultiStatter) - - if config.Interval == 0.0 { - config.Interval = 10.0 // convenience - } - - var reporters []reporter - if config.StatHat != nil && config.StatHat.Email != "" { - reporters = append(reporters, config.StatHat) - } - - if config.NewRelic != nil && config.NewRelic.LicenseKey != "" { - // NR wants version? - // can get it out of the namespace? roll it here? - reporters = append(reporters, NewNewRelicReporter("1.0", config.NewRelic.LicenseKey)) - } - - if config.Log != "" { - reporters = append(reporters, NewLogReporter()) - } - - if len(reporters) > 0 { - ag := newAggregator(reporters) - s.statters = append(s.statters, ag) - go func() { - for range time.Tick(time.Duration(config.Interval * float64(time.Second))) { - ag.report(nil) - } - }() - } - - if config.Statsd != nil && config.Statsd.StatsdUdpTarget != "" { - std, err := NewStatsd(config.Statsd) - if err == nil { - s.statters = append(s.statters, std) - } else { - logrus.WithError(err).Error("Couldn't create statsd reporter") - } - } - - if len(reporters) == 0 && config.Statsd == nil && config.History == 0 { - return &NilStatter{} - } - - if config.GCStats >= 0 { - if config.GCStats == 0 { - config.GCStats = 1 - } - go StartReportingMemoryAndGC(s, time.Duration(config.GCStats)*time.Second) - } - - return s -} - -func HTTPReturnJson(w http.ResponseWriter, result interface{}) { - w.Header().Set("Content-Type", "application/json") - res, err := json.Marshal(result) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } else { - w.Write(res) - } -} - -// Convert a string to a stat name by replacing '.' with '_', lowercasing the -// string and trimming it. Doesn't do any validation, so do try this out -// locally before sending stats. -func AsStatField(input string) string { - return strings.Replace(strings.ToLower(strings.TrimSpace(input)), ".", "_", -1) -} - -// statsd like API on top of the map manipulation API. -type Timer struct { - statter Statter - component string - stat string - start time.Time - rate float32 - measured bool -} - -func newTimer(st Statter, component, stat string, rate float32) *Timer { - return &Timer{st, component, stat, time.Now(), rate, false} -} - -func (timer *Timer) Measure() { - if timer.measured { - return - } - - timer.measured = true - timer.statter.Time(timer.component, timer.stat, time.Since(timer.start), timer.rate) -} - -type NilStatter struct{} - -func (n *NilStatter) Inc(component string, stat string, value int64, rate float32) {} -func (n *NilStatter) Gauge(component string, stat string, value int64, rate float32) {} -func (n *NilStatter) Measure(component string, stat string, value int64, rate float32) {} -func (n *NilStatter) Time(component string, stat string, value time.Duration, rate float32) {} -func (r *NilStatter) NewTimer(component string, stat string, rate float32) *Timer { - return newTimer(r, component, stat, rate) -} diff --git a/vendor/github.com/iron-io/runner/common/stats/statsd.go b/vendor/github.com/iron-io/runner/common/stats/statsd.go deleted file mode 100644 index 1ff1c22e0..000000000 --- a/vendor/github.com/iron-io/runner/common/stats/statsd.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stats - -import ( - "bytes" - "fmt" - "net" - "strings" - "time" - - "github.com/cactus/go-statsd-client/statsd" -) - -type StatsdConfig struct { - StatsdUdpTarget string `json:"target" mapstructure:"target" envconfig:"STATSD_TARGET"` - Interval int64 `json:"interval" envconfig:"STATSD_INTERVAL"` - Prefix string `json:"prefix" envconfig:"STATSD_PREFIX"` -} - -type keyCreator interface { - // The return value of Key *MUST* never have a '.' at the end. - Key(stat string) string -} - -type theStatsdReporter struct { - keyCreator - client statsd.Statter -} - -type prefixKeyCreator struct { - parent keyCreator - prefixes []string -} - -func (pkc *prefixKeyCreator) Key(stat string) string { - prefix := strings.Join(pkc.prefixes, ".") - - if pkc.parent != nil { - prefix = pkc.parent.Key(prefix) - } - - if stat == "" { - return prefix - } - - if prefix == "" { - return stat - } - - return prefix + "." + stat -} - -func whoami() string { - a, _ := net.InterfaceAddrs() - for i := range a { - // is a textual representation of an IPv4 address - z, _, err := net.ParseCIDR(a[i].String()) - if a[i].Network() == "ip+net" && err == nil && z.To4() != nil { - if !bytes.Equal(z, net.ParseIP("127.0.0.1")) { - return strings.Replace(fmt.Sprintf("%v", z), ".", "_", -1) - } - } - } - return "127_0_0_1" // shrug -} - -// The config.Prefix is sent before each message and can be used to set API -// keys. The prefix is used as the key prefix. -// If config is nil, creates a noop reporter. -// -// st, e := NewStatsd(config, "ironmq") -// st.Inc("enqueue", 1) -> Actually records to key ironmq.enqueue. -func NewStatsd(config *StatsdConfig) (*theStatsdReporter, error) { - var client statsd.Statter - var err error - if config != nil { - // 512 for now since we are sending to hostedgraphite over the internet. - config.Prefix += "." + whoami() - client, err = statsd.NewBufferedClient(config.StatsdUdpTarget, config.Prefix, time.Duration(config.Interval)*time.Second, 512) - } else { - client, err = statsd.NewNoopClient() - } - if err != nil { - return nil, err - } - - return &theStatsdReporter{keyCreator: &prefixKeyCreator{}, client: client}, nil -} - -func (sr *theStatsdReporter) Inc(component, stat string, value int64, rate float32) { - sr.client.Inc(sr.keyCreator.Key(component+"."+stat), value, rate) -} - -func (sr *theStatsdReporter) Measure(component, stat string, delta int64, rate float32) { - sr.client.Timing(sr.keyCreator.Key(component+"."+stat), delta, rate) -} - -func (sr *theStatsdReporter) Time(component, stat string, delta time.Duration, rate float32) { - sr.client.TimingDuration(sr.keyCreator.Key(component+"."+stat), delta, rate) -} - -func (sr *theStatsdReporter) Gauge(component, stat string, value int64, rate float32) { - sr.client.Gauge(sr.keyCreator.Key(component+"."+stat), value, rate) -} - -func (sr *theStatsdReporter) NewTimer(component string, stat string, rate float32) *Timer { - return newTimer(sr, component, stat, rate) -} - -// We need some kind of all-or-nothing sampler where multiple stats can be -// given the same rate and they are either all logged on that run or none of -// them are. The statsd library we use ends up doing its own rate calculation -// which is going to impede doing something like this. diff --git a/vendor/github.com/iron-io/runner/common/unix_logging.go b/vendor/github.com/iron-io/runner/common/unix_logging.go deleted file mode 100644 index 32b4c69d1..000000000 --- a/vendor/github.com/iron-io/runner/common/unix_logging.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !windows,!nacl,!plan9 - -package common - -import ( - "io/ioutil" - "net/url" - - "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/syslog" -) - -func NewSyslogHook(url *url.URL, prefix string) error { - syslog, err := logrus_syslog.NewSyslogHook(url.Scheme, url.Host, 0, prefix) - if err != nil { - return err - } - logrus.AddHook(syslog) - // TODO we could support multiple destinations... - logrus.SetOutput(ioutil.Discard) - return nil -} diff --git a/vendor/github.com/iron-io/runner/common/win_logging.go b/vendor/github.com/iron-io/runner/common/win_logging.go deleted file mode 100644 index 1bb74544f..000000000 --- a/vendor/github.com/iron-io/runner/common/win_logging.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux,!darwin - -package common - -import ( - "errors" - "net/url" -) - -func NewSyslogHook(url *url.URL, prefix string) error { - return errors.New("Syslog not supported on this system.") -} diff --git a/vendor/github.com/iron-io/runner/common/writers.go b/vendor/github.com/iron-io/runner/common/writers.go deleted file mode 100644 index d18f09eca..000000000 --- a/vendor/github.com/iron-io/runner/common/writers.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "bytes" - "errors" - "io" -) - -// lineWriter will break apart a stream of data into individual lines. -// Downstream writer will be called for each complete new line. When Flush -// is called, a newline will be appended if there isn't one at the end. -// Not thread-safe -type LineWriter struct { - b *bytes.Buffer - w io.Writer -} - -func NewLineWriter(w io.Writer) *LineWriter { - return &LineWriter{ - w: w, - b: bytes.NewBuffer(make([]byte, 0, 1024)), - } -} - -func (li *LineWriter) Write(p []byte) (int, error) { - n, err := li.b.Write(p) - if err != nil { - return n, err - } - if n != len(p) { - return n, errors.New("short write") - } - - for { - b := li.b.Bytes() - i := bytes.IndexByte(b, '\n') - if i < 0 { - break - } - - l := b[:i+1] - ns, err := li.w.Write(l) - if err != nil { - return ns, err - } - li.b.Next(len(l)) - } - - return n, nil -} - -func (li *LineWriter) Flush() (int, error) { - b := li.b.Bytes() - if len(b) == 0 { - return 0, nil - } - - if b[len(b)-1] != '\n' { - b = append(b, '\n') - } - return li.w.Write(b) -} - -// HeadLinesWriter stores upto the first N lines in a buffer that can be -// retrieved via Head(). -type HeadLinesWriter struct { - buffer bytes.Buffer - max int -} - -func NewHeadLinesWriter(max int) *HeadLinesWriter { - return &HeadLinesWriter{ - buffer: bytes.Buffer{}, - max: max, - } -} - -// Writes start failing once the writer has reached capacity. -// In such cases the return value is the actual count written (may be zero) and io.ErrShortWrite. -func (h *HeadLinesWriter) Write(p []byte) (n int, err error) { - var afterNewLine int - for h.max > 0 && afterNewLine < len(p) { - idx := bytes.IndexByte(p[afterNewLine:], '\n') - if idx == -1 { - h.buffer.Write(p[afterNewLine:]) - afterNewLine = len(p) - } else { - h.buffer.Write(p[afterNewLine : afterNewLine+idx+1]) - afterNewLine = afterNewLine + idx + 1 - h.max-- - } - } - - if afterNewLine == len(p) { - return afterNewLine, nil - } - - return afterNewLine, io.ErrShortWrite -} - -// The returned bytes alias the buffer, the same restrictions as -// bytes.Buffer.Bytes() apply. -func (h *HeadLinesWriter) Head() []byte { - return h.buffer.Bytes() -} - -// TailLinesWriter stores upto the last N lines in a buffer that can be retrieved -// via Tail(). The truncation is only performed when more bytes are received -// after '\n', so the buffer contents for both these writes are identical. -// -// tail writer that captures last 3 lines. -// 'a\nb\nc\nd\n' -> 'b\nc\nd\n' -// 'a\nb\nc\nd' -> 'b\nc\nd' -type TailLinesWriter struct { - buffer bytes.Buffer - max int - newlineEncountered bool - // Tail is not idempotent without this. - tailCalled bool -} - -func NewTailLinesWriter(max int) *TailLinesWriter { - return &TailLinesWriter{ - buffer: bytes.Buffer{}, - max: max, - } -} - -// Write always succeeds! This is because all len(p) bytes are written to the -// buffer before it is truncated. -func (t *TailLinesWriter) Write(p []byte) (n int, err error) { - if t.tailCalled { - return 0, errors.New("Tail() has already been called.") - } - - var afterNewLine int - for afterNewLine < len(p) { - // This is at the top of the loop so it does not operate on trailing - // newlines. That is handled by Tail() where we have full knowledge that it - // is indeed the true trailing newline (if any). - if t.newlineEncountered { - if t.max > 0 { - // we still have capacity - t.max-- - } else { - // chomp a newline. - t.chompNewline() - } - } - - idx := bytes.IndexByte(p[afterNewLine:], '\n') - if idx == -1 { - t.buffer.Write(p[afterNewLine:]) - afterNewLine = len(p) - t.newlineEncountered = false - } else { - t.buffer.Write(p[afterNewLine : afterNewLine+idx+1]) - afterNewLine = afterNewLine + idx + 1 - t.newlineEncountered = true - } - - } - return len(p), nil -} - -func (t *TailLinesWriter) chompNewline() { - b := t.buffer.Bytes() - idx := bytes.IndexByte(b, '\n') - if idx >= 0 { - t.buffer.Next(idx + 1) - } else { - // pretend a trailing newline exists. In the call in Write() this will - // never be hit. - t.buffer.Truncate(0) - } -} - -// The returned bytes alias the buffer, the same restrictions as -// bytes.Buffer.Bytes() apply. -// -// Once Tail() is called, further Write()s error. -func (t *TailLinesWriter) Tail() []byte { - if !t.tailCalled { - t.tailCalled = true - if t.max <= 0 { - t.chompNewline() - } - } - return t.buffer.Bytes() -} diff --git a/vendor/github.com/iron-io/runner/common/writers_test.go b/vendor/github.com/iron-io/runner/common/writers_test.go deleted file mode 100644 index 2691c9bc9..000000000 --- a/vendor/github.com/iron-io/runner/common/writers_test.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "bytes" - "fmt" - "io" - "testing" -) - -type testSliceWriter struct { - b [][]byte -} - -func (tsw *testSliceWriter) Write(p []byte) (n int, err error) { - l := make([]byte, len(p)) - copy(l, p) - tsw.b = append(tsw.b, l) - return len(p), nil -} - -func TestLineWriter(t *testing.T) { - tsw := &testSliceWriter{} - lw := NewLineWriter(tsw) - - lineCount := 7 - lw.Write([]byte("0 line\n1 line\n2 line\n\n4 line")) - lw.Write([]byte("+more\n5 line\n")) - lw.Write([]byte("6 line")) - - lw.Flush() - - if len(tsw.b) != lineCount { - t.Errorf("Expected %v individual rows; got %v", lineCount, len(tsw.b)) - } - - for x := 0; x < len(tsw.b); x++ { - l := fmt.Sprintf("%v line\n", x) - if x == 3 { - if len(tsw.b[x]) != 1 { - t.Errorf("Expected slice with only newline; got %v", tsw.b[x]) - } - continue - } else if x == 4 { - l = "4 line+more\n" - } - if !bytes.Equal(tsw.b[x], []byte(l)) { - t.Errorf("Expected slice %s equal to %s", []byte(l), tsw.b[x]) - } - } -} - -func TestHeadWriter(t *testing.T) { - data := []byte("the quick\n brown\n fox jumped\n over the\n lazy dog.") - w := NewHeadLinesWriter(3) - _, err := w.Write(data[:4]) - if err != nil { - t.Errorf("Expected nil error on small write") - } - - if !bytes.Equal(w.Head(), []byte("the ")) { - t.Errorf("Expected 4 bytes in head, got '%s'", w.Head()) - } - - n, err := w.Write(data[4:16]) - if n != len(data[4:16]) || err != nil { - t.Errorf("HeadWriter Write() does not satisfy contract about failing writes.") - } - - if !bytes.Equal(w.Head(), []byte("the quick\n brown")) { - t.Errorf("unexpected contents of head, got '%s'", w.Head()) - } - - n, err = w.Write(data[16:]) - if n != (29-16) || err != io.ErrShortWrite { - t.Errorf("HeadWriter Write() does not satisfy contract about failing writes.") - } - if !bytes.Equal(w.Head(), data[:29]) { - t.Errorf("unexpected contents of head, got '%s'", w.Head()) - } -} - -func testTail(t *testing.T, n int, output []byte, writes ...[]byte) { - w := NewTailLinesWriter(n) - for _, slice := range writes { - written, err := w.Write(slice) - if written != len(slice) || err != nil { - t.Errorf("Tail Write() should always succeed, but failed, input=%s, input length = %d, written=%d, err=%s", slice, len(slice), written, err) - } - } - if !bytes.Equal(w.Tail(), output) { - t.Errorf("Output did not match for tail writer of length %d: Expected '%s', got '%s'", n, output, w.Tail()) - } -} - -func TestTailWriter(t *testing.T) { - inputs := [][]byte{[]byte("a\nb\n"), []byte("gh"), []byte("\n")} - testTail(t, 2, []byte("b\ngh\n"), inputs...) -} - -func TestZeroAndOneTailWriter(t *testing.T) { - // zero line writer, with only single line added to it should return empty buffer. - testTail(t, 0, []byte(""), []byte("Hello World\n")) - testTail(t, 0, []byte(""), []byte("Hello World")) - - b1 := []byte("Hello World") - testTail(t, 1, b1, b1) - - b1 = []byte("Hello World\n") - testTail(t, 1, b1, b1) - - b2 := []byte("Yeah!\n") - testTail(t, 1, b2, b1, b2) - - b1 = []byte("Flat write") - b2 = []byte("Yeah!\n") - j := bytes.Join([][]byte{b1, b2}, []byte{}) - testTail(t, 1, j, b1, b2) -} - -func TestTailWriterTrailing(t *testing.T) { - input1 := []byte("a\nb\nc\nd\ne") - input2 := []byte("a\nb\nc\nd\ne\n") - w1 := NewTailLinesWriter(4) - w1.Write(input1) - w2 := NewTailLinesWriter(4) - w2.Write(input2) - if !bytes.Equal(w1.Tail(), []byte("b\nc\nd\ne")) { - t.Errorf("Tail not working correctly, got '%s'", w1.Tail()) - } - - t2 := w2.Tail() - if !bytes.Equal(w1.Tail(), t2[:len(t2)-1]) { - t.Errorf("Tailwriter does not transition correctly over trailing newline. '%s', '%s'", w1.Tail(), t2) - } -} diff --git a/vendor/github.com/iron-io/runner/dind/Dockerfile b/vendor/github.com/iron-io/runner/dind/Dockerfile deleted file mode 100644 index 4a2565036..000000000 --- a/vendor/github.com/iron-io/runner/dind/Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2016 Iron.io -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -FROM docker:1.13.1-dind - -RUN apk update && apk upgrade && apk add --no-cache ca-certificates - -COPY entrypoint.sh /usr/local/bin/ -COPY dind.sh /usr/local/bin/ - -ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] - -# USAGE: Add a CMD to your own Dockerfile to use this (NOT an ENTRYPOINT, so that this is called) -# CMD ["./runner"] diff --git a/vendor/github.com/iron-io/runner/dind/README.md b/vendor/github.com/iron-io/runner/dind/README.md deleted file mode 100644 index 227bacbb5..000000000 --- a/vendor/github.com/iron-io/runner/dind/README.md +++ /dev/null @@ -1,2 +0,0 @@ -This is the base image for all Titan's docker-in-docker images. - diff --git a/vendor/github.com/iron-io/runner/dind/build.sh b/vendor/github.com/iron-io/runner/dind/build.sh deleted file mode 100755 index 6b84dbd58..000000000 --- a/vendor/github.com/iron-io/runner/dind/build.sh +++ /dev/null @@ -1,22 +0,0 @@ - -# Copyright 2016 Iron.io -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -set -ex - -docker build -t iron/dind:latest . - -cd go-dind -docker build -t iron/go-dind:latest . diff --git a/vendor/github.com/iron-io/runner/dind/chaos/Dockerfile b/vendor/github.com/iron-io/runner/dind/chaos/Dockerfile deleted file mode 100644 index a8f1926f2..000000000 --- a/vendor/github.com/iron-io/runner/dind/chaos/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2016 Iron.io -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# iron/dind-chaos -FROM docker:1.12-rc-dind - -RUN apk update && apk upgrade && apk add --no-cache ca-certificates - -COPY entrypoint.sh /usr/local/bin/ -COPY chaos.sh /usr/local/bin/ - -ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] - -# USAGE: Add a CMD to your own Dockerfile to use this (NOT an ENTRYPOINT, so that this is called) -# CMD ["./runner"] diff --git a/vendor/github.com/iron-io/runner/dind/chaos/README.md b/vendor/github.com/iron-io/runner/dind/chaos/README.md deleted file mode 100644 index 5a4270962..000000000 --- a/vendor/github.com/iron-io/runner/dind/chaos/README.md +++ /dev/null @@ -1 +0,0 @@ -dind docker to periodically kill docker to test against diff --git a/vendor/github.com/iron-io/runner/dind/chaos/chaos.sh b/vendor/github.com/iron-io/runner/dind/chaos/chaos.sh deleted file mode 100755 index 84042c513..000000000 --- a/vendor/github.com/iron-io/runner/dind/chaos/chaos.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh - -# Copyright 2016 Iron.io -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -ex - -sleep 600 # 10 minutes - -for i in 1..1000; do - pkill -9 dockerd - pkill -9 docker-containerd - # remove pid file since we killed docker hard - rm /var/run/docker.pid - sleep 30 - docker daemon \ - --host=unix:///var/run/docker.sock \ - --host=tcp://0.0.0.0:2375 & - sleep 300 # 5 minutes -done diff --git a/vendor/github.com/iron-io/runner/dind/chaos/entrypoint.sh b/vendor/github.com/iron-io/runner/dind/chaos/entrypoint.sh deleted file mode 100755 index 26401dc45..000000000 --- a/vendor/github.com/iron-io/runner/dind/chaos/entrypoint.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/sh - -# Copyright 2016 Iron.io -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -set -ex - -# modified from: https://github.com/docker-library/docker/blob/866c3fbd87e8eeed524fdf19ba2d63288ad49cd2/1.11/dind/dockerd-entrypoint.sh -# this will run either overlay or aufs as the docker fs driver, if the OS has both, overlay is preferred. - -docker daemon \ - --host=unix:///var/run/docker.sock \ - --host=tcp://0.0.0.0:2375 & - -# wait for daemon to initialize -sleep 10 - -/usr/local/bin/chaos.sh & - -exec "$@" diff --git a/vendor/github.com/iron-io/runner/dind/dind.sh b/vendor/github.com/iron-io/runner/dind/dind.sh deleted file mode 100755 index 3fe976601..000000000 --- a/vendor/github.com/iron-io/runner/dind/dind.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/sh - -# Copyright 2016 Iron.io -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -ex -# modified from: https://github.com/docker-library/docker/blob/866c3fbd87e8eeed524fdf19ba2d63288ad49cd2/1.11/dind/dockerd-entrypoint.sh -# this will run either overlay or aufs as the docker fs driver, if the OS has both, overlay is preferred. -# rewrite overlay to use overlay2 (docker 1.12, linux >=4.x required), see https://docs.docker.com/engine/userguide/storagedriver/selectadriver/#overlay-vs-overlay2 - -fsdriver=$(grep -Eh -w -m1 "overlay|aufs" /proc/filesystems | cut -f2) - -if [ $fsdriver == "overlay" ]; then - fsdriver="overlay2" -fi - -#https://docs.docker.com/engine/userguide/storagedriver/overlayfs-driver/#configure-docker-with-the-overlay-or-overlay2-storage-driver -sub_opt="" -case "$(uname -r)" in - *.el7*) sub_opt="--storage-opt overlay2.override_kernel_check=1" ;; -esac - -cmd="dockerd \ - --host=unix:///var/run/docker.sock \ - --host=tcp://0.0.0.0:2375 \ - --storage-driver=$fsdriver - $sub_opt" - -# nanny and restart on crashes -until eval $cmd; do - echo "Docker crashed with exit code $?. Respawning.." >&2 - # if we just restart it won't work, so start it (it wedges up) and - # then kill the wedgie and restart it again and ta da... yea, seriously - pidfile=/var/run/docker/libcontainerd/docker-containerd.pid - kill -9 $(cat $pidfile) - rm $pidfile - sleep 1 -done diff --git a/vendor/github.com/iron-io/runner/dind/entrypoint.sh b/vendor/github.com/iron-io/runner/dind/entrypoint.sh deleted file mode 100755 index 3c719e11a..000000000 --- a/vendor/github.com/iron-io/runner/dind/entrypoint.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/sh - -# Copyright 2016 Iron.io -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -ex - -/usr/local/bin/dind.sh & - -# wait for daemon to initialize -sleep 3 - -exec "$@" diff --git a/vendor/github.com/iron-io/runner/dind/release.sh b/vendor/github.com/iron-io/runner/dind/release.sh deleted file mode 100755 index 7d8f8d0f8..000000000 --- a/vendor/github.com/iron-io/runner/dind/release.sh +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2016 Iron.io -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e - -./build.sh - -docker run --rm -v "$PWD":/app treeder/bump patch -version=`cat VERSION` -echo "version $version" - -docker tag iron/dind:latest iron/dind:$version - -docker push iron/dind:latest -docker push iron/dind:$version diff --git a/vendor/github.com/iron-io/runner/drivers/README.md b/vendor/github.com/iron-io/runner/drivers/README.md deleted file mode 100644 index e178215dc..000000000 --- a/vendor/github.com/iron-io/runner/drivers/README.md +++ /dev/null @@ -1 +0,0 @@ -This package is intended as a general purpose container abstraction library. With the same code, you can run on Docker, Rkt, etc. diff --git a/vendor/github.com/iron-io/runner/drivers/docker/docker.go b/vendor/github.com/iron-io/runner/drivers/docker/docker.go deleted file mode 100644 index c64d6a418..000000000 --- a/vendor/github.com/iron-io/runner/drivers/docker/docker.go +++ /dev/null @@ -1,643 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package docker - -import ( - "context" - "crypto/tls" - "errors" - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "path" - "strings" - "time" - - "github.com/Sirupsen/logrus" - manifest "github.com/docker/distribution/manifest/schema1" - "github.com/fsouza/go-dockerclient" - "github.com/heroku/docker-registry-client/registry" - "github.com/iron-io/runner/common" - "github.com/iron-io/runner/common/stats" - "github.com/iron-io/runner/drivers" -) - -const hubURL = "https://registry.hub.docker.com" - -var registryClient = &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 10 * time.Second, - KeepAlive: 2 * time.Minute, - }).Dial, - TLSClientConfig: &tls.Config{ - ClientSessionCache: tls.NewLRUClientSessionCache(8192), - }, - TLSHandshakeTimeout: 10 * time.Second, - MaxIdleConnsPerHost: 32, // TODO tune; we will likely be making lots of requests to same place - Proxy: http.ProxyFromEnvironment, - IdleConnTimeout: 90 * time.Second, - MaxIdleConns: 512, - ExpectContinueTimeout: 1 * time.Second, - }, -} - -// A drivers.ContainerTask should implement the Auther interface if it would -// like to use not-necessarily-public docker images for any or all task -// invocations. -type Auther interface { - // DockerAuth should return docker auth credentials that will authenticate - // against a docker registry for a given drivers.ContainerTask.Image(). An - // error may be returned which will cause the task not to be run, this can be - // useful for an implementer to do things like testing auth configurations - // before returning them; e.g. if the implementer would like to impose - // certain restrictions on images or if credentials must be acquired right - // before runtime and there's an error doing so. If these credentials don't - // work, the docker pull will fail and the task will be set to error status. - DockerAuth() (docker.AuthConfiguration, error) -} - -type runResult struct { - error - StatusValue string -} - -func (r *runResult) Error() string { - if r.error == nil { - return "" - } - return r.error.Error() -} - -func (r *runResult) Status() string { return r.StatusValue } -func (r *runResult) UserVisible() bool { return common.IsUserVisibleError(r.error) } - -type DockerDriver struct { - conf drivers.Config - docker dockerClient // retries on *docker.Client, restricts ad hoc *docker.Client usage / retries - hostname string - - *common.Environment -} - -// implements drivers.Driver -func NewDocker(env *common.Environment, conf drivers.Config) *DockerDriver { - hostname, err := os.Hostname() - if err != nil { - logrus.WithError(err).Fatal("couldn't resolve hostname") - } - - return &DockerDriver{ - conf: conf, - docker: newClient(env), - hostname: hostname, - Environment: env, - } -} - -// CheckRegistry will return a sizer, which can be used to check the size of an -// image if the returned error is nil. If the error returned is nil, then -// authentication against the given credentials was successful, if the -// configuration does not specify a config.ServerAddress, -// https://hub.docker.com will be tried. CheckRegistry is a package level -// method since rkt can also use docker images, we may be interested in using -// rkt w/o a docker driver configured; also, we don't have to tote around a -// driver in any tasker that may be interested in registry information (2/2 -// cases thus far). -func CheckRegistry(image string, config docker.AuthConfiguration) (Sizer, error) { - registry, repo, tag := drivers.ParseImage(image) - - reg, err := registryForConfig(config, registry) - if err != nil { - return nil, err - } - - mani, err := reg.Manifest(repo, tag) - if err != nil { - logrus.WithFields(logrus.Fields{"username": config.Username, "server": config.ServerAddress, "image": image}).WithError(err).Error("Credentials not authorized, trying next.") - //if !isAuthError(err) { - // // TODO we might retry this, since if this was the registry that was supposed to - // // auth the task will erroneously be set to 'error' - //} - - return nil, err - } - - return &sizer{mani, reg, repo}, nil -} - -// Sizer returns size information. This interface is liable to contain more -// than a size at some point, change as needed. -type Sizer interface { - Size() (int64, error) -} - -type sizer struct { - mani *manifest.SignedManifest - reg *registry.Registry - repo string -} - -func (s *sizer) Size() (int64, error) { - var sum int64 - for _, r := range s.mani.References() { - desc, err := s.reg.LayerMetadata(s.repo, r.Digest) - if err != nil { - return 0, err - } - sum += desc.Size - } - return sum, nil -} - -func registryURL(addr string) (string, error) { - if addr == "" || strings.Contains(addr, "hub.docker.com") || strings.Contains(addr, "index.docker.io") { - return hubURL, nil - } - - url, err := url.Parse(addr) - if err != nil { - // TODO we could error the task out from this with a user error but since - // we have a list of auths to check, just return the error so as to be - // skipped... horrible api as it is - logrus.WithFields(logrus.Fields{"auth_addr": addr}).WithError(err).Error("error parsing server address url, skipping") - return "", err - } - - if url.Scheme == "" { - url.Scheme = "https" - } - url.Path = strings.TrimSuffix(url.Path, "/") - url.Path = strings.TrimPrefix(url.Path, "/v2") - url.Path = strings.TrimPrefix(url.Path, "/v1") // just try this, if it fails it fails, not supporting v1 - return url.String(), nil -} - -func isAuthError(err error) bool { - // AARGH! - if urlError, ok := err.(*url.Error); ok { - if httpError, ok := urlError.Err.(*registry.HttpStatusError); ok { - if httpError.Response.StatusCode == 401 { - return true - } - } - } - - return false -} - -func registryForConfig(config docker.AuthConfiguration, reg string) (*registry.Registry, error) { - if reg == "" { - reg = config.ServerAddress - } - - var err error - config.ServerAddress, err = registryURL(reg) - if err != nil { - return nil, err - } - - // Use this instead of registry.New to avoid the Ping(). - transport := registry.WrapTransport(registryClient.Transport, reg, config.Username, config.Password) - r := ®istry.Registry{ - URL: config.ServerAddress, - Client: &http.Client{ - Transport: transport, - }, - Logf: registry.Quiet, - } - return r, nil -} - -func (drv *DockerDriver) Prepare(ctx context.Context, task drivers.ContainerTask) (drivers.Cookie, error) { - var cmd []string - if task.Command() != "" { - // NOTE: this is hyper-sensitive and may not be correct like this even, but it passes old tests - // task.Command() in swapi is always "sh /mnt/task/.runtask" so fields is safe - cmd = strings.Fields(task.Command()) - logrus.WithFields(logrus.Fields{"task_id": task.Id(), "cmd": cmd, "len": len(cmd)}).Debug("docker command") - } - - envvars := make([]string, 0, len(task.EnvVars())) - for name, val := range task.EnvVars() { - envvars = append(envvars, name+"="+val) - } - - containerName := newContainerID(task) - container := docker.CreateContainerOptions{ - Name: containerName, - Config: &docker.Config{ - Env: envvars, - Cmd: cmd, - Memory: int64(drv.conf.Memory), - CPUShares: drv.conf.CPUShares, - Hostname: drv.hostname, - Image: task.Image(), - Volumes: map[string]struct{}{}, - Labels: task.Labels(), - OpenStdin: true, - AttachStdin: true, - StdinOnce: true, - }, - HostConfig: &docker.HostConfig{}, - Context: ctx, - } - - volumes := task.Volumes() - for _, mapping := range volumes { - hostDir := mapping[0] - containerDir := mapping[1] - container.Config.Volumes[containerDir] = struct{}{} - mapn := fmt.Sprintf("%s:%s", hostDir, containerDir) - container.HostConfig.Binds = append(container.HostConfig.Binds, mapn) - logrus.WithFields(logrus.Fields{"volumes": mapn, "task_id": task.Id()}).Debug("setting volumes") - } - - if wd := task.WorkDir(); wd != "" { - logrus.WithFields(logrus.Fields{"wd": wd, "task_id": task.Id()}).Debug("setting work dir") - container.Config.WorkingDir = wd - } - - err := drv.ensureImage(ctx, task) - if err != nil { - return nil, err - } - - createTimer := drv.NewTimer("docker", "create_container", 1.0) - _, err = drv.docker.CreateContainer(container) - createTimer.Measure() - if err != nil { - // since we retry under the hood, if the container gets created and retry fails, we can just ignore error - if err != docker.ErrContainerAlreadyExists { - logrus.WithFields(logrus.Fields{"task_id": task.Id(), "command": container.Config.Cmd, "memory": container.Config.Memory, - "cpu_shares": container.Config.CPUShares, "hostname": container.Config.Hostname, "name": container.Name, - "image": container.Config.Image, "volumes": container.Config.Volumes, "binds": container.HostConfig.Binds, "container": containerName, - }).WithError(err).Error("Could not create container") - - if ce := containerConfigError(err); ce != nil { - return nil, common.UserError(fmt.Errorf("Failed to create container from task configuration '%s'", ce)) - } - return nil, err - } - } - - // discard removal error - return &cookie{id: containerName, task: task, drv: drv}, nil -} - -func (drv *DockerDriver) IsAlive(ctx context.Context) bool { - err := drv.docker.Ping() - return err == nil -} - -type cookie struct { - id string - task drivers.ContainerTask - drv *DockerDriver -} - -func (c *cookie) Close() error { return c.drv.removeContainer(c.id) } - -func (c *cookie) Run(ctx context.Context) (drivers.RunResult, error) { - return c.drv.run(ctx, c.id, c.task) -} - -func (drv *DockerDriver) removeContainer(container string) error { - removeTimer := drv.NewTimer("docker", "remove_container", 1.0) - defer removeTimer.Measure() - err := drv.docker.RemoveContainer(docker.RemoveContainerOptions{ - ID: container, Force: true, RemoveVolumes: true}) - - if err != nil { - logrus.WithError(err).WithFields(logrus.Fields{"container": container}).Error("error removing container") - } - return nil -} - -func (drv *DockerDriver) ensureImage(ctx context.Context, task drivers.ContainerTask) error { - reg, _, _ := drivers.ParseImage(task.Image()) - - // ask for docker creds before looking for image, as the tasker may need to - // validate creds even if the image is downloaded. - - var config docker.AuthConfiguration // default, tries docker hub w/o user/pass - if task, ok := task.(Auther); ok { - var err error - config, err = task.DockerAuth() - if err != nil { - return err - } - } - - if reg != "" { - config.ServerAddress = reg - } - - // see if we already have it, if not, pull it - _, err := drv.docker.InspectImage(task.Image()) - if err == docker.ErrNoSuchImage { - err = drv.pullImage(ctx, task, config) - } - - return err -} - -func (drv *DockerDriver) pullImage(ctx context.Context, task drivers.ContainerTask, config docker.AuthConfiguration) error { - log := common.Logger(ctx) - - reg, repo, tag := drivers.ParseImage(task.Image()) - globalRepo := path.Join(reg, repo) - - pullTimer := drv.NewTimer("docker", "pull_image", 1.0) - defer pullTimer.Measure() - - drv.Inc("docker", "pull_image_count."+stats.AsStatField(task.Image()), 1, 1) - - if reg != "" { - config.ServerAddress = reg - } - - var err error - config.ServerAddress, err = registryURL(config.ServerAddress) - if err != nil { - return err - } - - log.WithFields(logrus.Fields{"registry": config.ServerAddress, "username": config.Username, "image": task.Image()}).Info("Pulling image") - - err = drv.docker.PullImage(docker.PullImageOptions{Repository: globalRepo, Tag: tag, Context: ctx}, config) - if err != nil { - drv.Inc("task", "error.pull."+stats.AsStatField(task.Image()), 1, 1) - log.WithFields(logrus.Fields{"registry": config.ServerAddress, "username": config.Username, "image": task.Image()}).WithError(err).Error("Failed to pull image") - - // TODO need to inspect for hub or network errors and pick. - return common.UserError(fmt.Errorf("Failed to pull image '%s': %s", task.Image(), err)) - - // TODO what about a case where credentials were good, then credentials - // were invalidated -- do we need to keep the credential cache docker - // driver side and after pull for this case alone? - } - - return nil -} - -// Run executes the docker container. If task runs, drivers.RunResult will be returned. If something fails outside the task (ie: Docker), it will return error. -// The docker driver will attempt to cast the task to a Auther. If that succeeds, private image support is available. See the Auther interface for how to implement this. -func (drv *DockerDriver) run(ctx context.Context, container string, task drivers.ContainerTask) (drivers.RunResult, error) { - log := common.Logger(ctx) - timeout := task.Timeout() - - var cancel context.CancelFunc - if timeout <= 0 { - ctx, cancel = context.WithCancel(ctx) - } else { - ctx, cancel = context.WithTimeout(ctx, timeout) - } - defer cancel() // do this so that after Run exits, nanny and collect stop - var complete bool - defer func() { complete = true }() // run before cancel is called - ctx = context.WithValue(ctx, completeKey, &complete) - - go drv.nanny(ctx, container) - go drv.collectStats(ctx, container, task) - - mwOut, mwErr := task.Logger() - - timer := drv.NewTimer("docker", "attach_container", 1) - waiter, err := drv.docker.AttachToContainerNonBlocking(docker.AttachToContainerOptions{ - Container: container, OutputStream: mwOut, ErrorStream: mwErr, - Stream: true, Logs: true, Stdout: true, Stderr: true, - Stdin: true, InputStream: task.Input()}) - timer.Measure() - if err != nil { - return nil, err - } - - err = drv.startTask(ctx, container) - if err != nil { - return nil, err - } - - taskTimer := drv.NewTimer("docker", "container_runtime", 1) - - // can discard error, inspect will tell us about the task and wait will retry under the hood - drv.docker.WaitContainerWithContext(container, ctx) - taskTimer.Measure() - - waiter.Close() - err = waiter.Wait() - if err != nil { - // TODO need to make sure this error isn't just a context error or something we can ignore - log.WithError(err).Error("attach to container returned error, task may be missing logs") - } - - status, err := drv.status(ctx, container) - return &runResult{ - StatusValue: status, - error: err, - }, nil -} - -const completeKey = "complete" - -// watch for cancel or timeout and kill process. -func (drv *DockerDriver) nanny(ctx context.Context, container string) { - select { - case <-ctx.Done(): - if *(ctx.Value(completeKey).(*bool)) { - return - } - drv.cancel(container) - } -} - -func (drv *DockerDriver) cancel(container string) { - stopTimer := drv.NewTimer("docker", "stop_container", 1.0) - err := drv.docker.StopContainer(container, 30) - stopTimer.Measure() - if err != nil { - logrus.WithError(err).WithFields(logrus.Fields{"container": container, "errType": fmt.Sprintf("%T", err)}).Error("something managed to escape our retries web, could not kill container") - } -} - -func (drv *DockerDriver) collectStats(ctx context.Context, container string, task drivers.ContainerTask) { - done := make(chan bool) - defer close(done) - dstats := make(chan *docker.Stats, 1) - go func() { - // NOTE: docker automatically streams every 1s. we can skip or avg samples if we'd like but - // the memory overhead is < 1MB for 3600 stat points so this seems fine, seems better to stream - // (internal docker api streams) than open/close stream for 1 sample over and over. - // must be called in goroutine, docker.Stats() blocks - err := drv.docker.Stats(docker.StatsOptions{ - ID: container, - Stats: dstats, - Stream: true, - Done: done, // A flag that enables stopping the stats operation - }) - - if err != nil && err != io.ErrClosedPipe { - logrus.WithError(err).WithFields(logrus.Fields{"container": container, "task_id": task.Id()}).Error("error streaming docker stats for task") - } - }() - - for { - select { - case <-ctx.Done(): - return - case ds, ok := <-dstats: - if !ok { - return - } - task.WriteStat(cherryPick(ds)) - } - } -} - -func cherryPick(ds *docker.Stats) drivers.Stat { - // TODO cpu % is as a % of the whole system... cpu is weird since we're sharing it - // across a bunch of containers and it scales based on how many we're sharing with, - // do we want users to see as a % of system? - systemDelta := float64(ds.CPUStats.SystemCPUUsage - ds.PreCPUStats.SystemCPUUsage) - cores := float64(len(ds.CPUStats.CPUUsage.PercpuUsage)) - var cpuUser, cpuKernel, cpuTotal float64 - if systemDelta > 0 { - // TODO we could leave these in docker format and let hud/viz tools do this instead of us... like net is, could do same for mem, too. thoughts? - cpuUser = (float64(ds.CPUStats.CPUUsage.UsageInUsermode-ds.PreCPUStats.CPUUsage.UsageInUsermode) / systemDelta) * cores * 100.0 - cpuKernel = (float64(ds.CPUStats.CPUUsage.UsageInKernelmode-ds.PreCPUStats.CPUUsage.UsageInKernelmode) / systemDelta) * cores * 100.0 - cpuTotal = (float64(ds.CPUStats.CPUUsage.TotalUsage-ds.PreCPUStats.CPUUsage.TotalUsage) / systemDelta) * cores * 100.0 - } - - var rx, tx float64 - for _, v := range ds.Networks { - rx += float64(v.RxBytes) - tx += float64(v.TxBytes) - } - - var blkRead, blkWrite uint64 - for _, bioEntry := range ds.BlkioStats.IOServiceBytesRecursive { - switch strings.ToLower(bioEntry.Op) { - case "read": - blkRead = blkRead + bioEntry.Value - case "write": - blkWrite = blkWrite + bioEntry.Value - } - } - - return drivers.Stat{ - Timestamp: ds.Read, - Metrics: map[string]uint64{ - // source: https://godoc.org/github.com/fsouza/go-dockerclient#Stats - // ex (for future expansion): {"read":"2016-08-03T18:08:05Z","pids_stats":{},"network":{},"networks":{"eth0":{"rx_bytes":508,"tx_packets":6,"rx_packets":6,"tx_bytes":508}},"memory_stats":{"stats":{"cache":16384,"pgpgout":281,"rss":8826880,"pgpgin":2440,"total_rss":8826880,"hierarchical_memory_limit":536870912,"total_pgfault":3809,"active_anon":8843264,"total_active_anon":8843264,"total_pgpgout":281,"total_cache":16384,"pgfault":3809,"total_pgpgin":2440},"max_usage":8953856,"usage":8953856,"limit":536870912},"blkio_stats":{"io_service_bytes_recursive":[{"major":202,"op":"Read"},{"major":202,"op":"Write"},{"major":202,"op":"Sync"},{"major":202,"op":"Async"},{"major":202,"op":"Total"}],"io_serviced_recursive":[{"major":202,"op":"Read"},{"major":202,"op":"Write"},{"major":202,"op":"Sync"},{"major":202,"op":"Async"},{"major":202,"op":"Total"}]},"cpu_stats":{"cpu_usage":{"percpu_usage":[47641874],"usage_in_usermode":30000000,"total_usage":47641874},"system_cpu_usage":8880800500000000,"throttling_data":{}},"precpu_stats":{"cpu_usage":{"percpu_usage":[44946186],"usage_in_usermode":30000000,"total_usage":44946186},"system_cpu_usage":8880799510000000,"throttling_data":{}}} - // mostly stolen values from docker stats cli api... - - // net - "net_rx": uint64(rx), - "net_tx": uint64(tx), - // mem - "mem_limit": ds.MemoryStats.Limit, - "mem_usage": ds.MemoryStats.Usage, - // i/o - "disk_read": blkRead, - "disk_write": blkWrite, - // cpu - "cpu_user": uint64(cpuUser), - "cpu_total": uint64(cpuTotal), - "cpu_kernel": uint64(cpuKernel), - }, - } -} - -// Introduces some randomness to prevent container name clashes where task ID remains the same. -func newContainerID(task drivers.ContainerTask) string { - return fmt.Sprintf("task-%d-%s", time.Now().UnixNano(), task.Id()) -} - -func (drv *DockerDriver) startTask(ctx context.Context, container string) error { - log := common.Logger(ctx) - startTimer := drv.NewTimer("docker", "start_container", 1.0) - log.WithFields(logrus.Fields{"container": container}).Debug("Starting container execution") - err := drv.docker.StartContainerWithContext(container, nil, ctx) - startTimer.Measure() - if err != nil { - dockerErr, ok := err.(*docker.Error) - _, containerAlreadyRunning := err.(*docker.ContainerAlreadyRunning) - if containerAlreadyRunning || (ok && dockerErr.Status == 304) { - // 304=container already started -- so we can ignore error - } else { - return err - } - } - return nil -} - -func (drv *DockerDriver) status(ctx context.Context, container string) (status string, err error) { - log := common.Logger(ctx) - - cinfo, err := drv.docker.InspectContainer(container) - if err != nil { - // this is pretty sad, but better to say we had an error than to not. - // task has run to completion and logs will be uploaded, user can decide - log.WithFields(logrus.Fields{"container": container}).WithError(err).Error("Inspecting container") - return drivers.StatusError, err - } - - exitCode := cinfo.State.ExitCode - log.WithFields(logrus.Fields{ - "exit_code": exitCode, - "container_running": cinfo.State.Running, - "container_status": cinfo.State.Status, - "container_finished": cinfo.State.FinishedAt, - "container_error": cinfo.State.Error, - }).Info("container status") - - select { // do this after inspect so we can see exit code - case <-ctx.Done(): // check if task was canceled or timed out - switch ctx.Err() { - case context.DeadlineExceeded: - return drivers.StatusTimeout, nil - case context.Canceled: - return drivers.StatusCancelled, nil - } - default: - } - - if cinfo.State.Running { - log.Warn("getting status of task that is still running, need to fix this") - return drivers.StatusError, errors.New("task in running state but not timed out. weird") - } - - switch exitCode { - default: - return drivers.StatusError, common.UserError(fmt.Errorf("exit code %d", exitCode)) - case 0: - return drivers.StatusSuccess, nil - case 137: // OOM - drv.Inc("docker", "oom", 1, 1) - if !cinfo.State.OOMKilled { - // It is possible that the host itself is running out of memory and - // the host kernel killed one of the container processes. - // See: https://github.com/docker/docker/issues/15621 - // TODO reed: isn't an OOM an OOM? this is wasting space imo - log.WithFields(logrus.Fields{"container": container}).Info("Setting task as OOM killed, but docker disagreed.") - drv.Inc("docker", "possible_oom_false_alarm", 1, 1.0) - } - - return drivers.StatusKilled, drivers.ErrOutOfMemory - } -} diff --git a/vendor/github.com/iron-io/runner/drivers/docker/docker_client.go b/vendor/github.com/iron-io/runner/drivers/docker/docker_client.go deleted file mode 100644 index 35b2db5c0..000000000 --- a/vendor/github.com/iron-io/runner/drivers/docker/docker_client.go +++ /dev/null @@ -1,320 +0,0 @@ -// +build go1.7 - -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package docker - -import ( - "context" - "crypto/tls" - "encoding/json" - "fmt" - "net" - "net/http" - "strings" - "time" - - "github.com/Sirupsen/logrus" - "github.com/fsouza/go-dockerclient" - "github.com/iron-io/runner/common" -) - -const ( - retryTimeout = 10 * time.Minute -) - -// wrap docker client calls so we can retry 500s, kind of sucks but fsouza doesn't -// bake in retries we can use internally, could contribute it at some point, would -// be much more convenient if we didn't have to do this, but it's better than ad hoc retries. -// also adds timeouts to many operations, varying by operation -// TODO could generate this, maybe not worth it, may not change often -type dockerClient interface { - // Each of these are github.com/fsouza/go-dockerclient methods - - AttachToContainerNonBlocking(opts docker.AttachToContainerOptions) (docker.CloseWaiter, error) - WaitContainerWithContext(id string, ctx context.Context) (int, error) - StartContainerWithContext(id string, hostConfig *docker.HostConfig, ctx context.Context) error - CreateContainer(opts docker.CreateContainerOptions) (*docker.Container, error) - RemoveContainer(opts docker.RemoveContainerOptions) error - PullImage(opts docker.PullImageOptions, auth docker.AuthConfiguration) error - InspectImage(name string) (*docker.Image, error) - InspectContainer(id string) (*docker.Container, error) - StopContainer(id string, timeout uint) error - Stats(opts docker.StatsOptions) error - Ping() error -} - -// TODO: switch to github.com/docker/engine-api -func newClient(env *common.Environment) dockerClient { - // TODO this was much easier, don't need special settings at the moment - // docker, err := docker.NewClient(conf.Docker) - client, err := docker.NewClientFromEnv() - if err != nil { - logrus.WithError(err).Fatal("couldn't create docker client") - } - - t := &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 10 * time.Second, - KeepAlive: 1 * time.Minute, - }).Dial, - TLSClientConfig: &tls.Config{ - ClientSessionCache: tls.NewLRUClientSessionCache(8192), - }, - TLSHandshakeTimeout: 10 * time.Second, - MaxIdleConnsPerHost: 512, - Proxy: http.ProxyFromEnvironment, - MaxIdleConns: 512, - IdleConnTimeout: 90 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } - - client.HTTPClient = &http.Client{Transport: t} - - if err := client.Ping(); err != nil { - logrus.WithError(err).Fatal("couldn't connect to docker daemon") - } - - client.SetTimeout(120 * time.Second) - - // get 2 clients, one with a small timeout, one with no timeout to use contexts - - clientNoTimeout, err := docker.NewClientFromEnv() - if err != nil { - logrus.WithError(err).Fatal("couldn't create other docker client") - } - - clientNoTimeout.HTTPClient = &http.Client{Transport: t} - - if err := clientNoTimeout.Ping(); err != nil { - logrus.WithError(err).Fatal("couldn't connect to other docker daemon") - } - - return &dockerWrap{client, clientNoTimeout, env} -} - -type dockerWrap struct { - docker *docker.Client - dockerNoTimeout *docker.Client - *common.Environment -} - -func (d *dockerWrap) retry(ctx context.Context, f func() error) error { - var b common.Backoff - for { - select { - case <-ctx.Done(): - d.Inc("task", "fail.docker", 1, 1) - logrus.WithError(ctx.Err()).Warnf("retrying on docker errors timed out, restart docker or rotate this instance?") - return ctx.Err() - default: - } - - err := filter(f()) - if common.IsTemporary(err) || isDocker50x(err) { - logrus.WithError(err).Warn("docker temporary error, retrying") - b.Sleep() - d.Inc("task", "error.docker", 1, 1) - continue - } - if err != nil { - d.Inc("task", "error.docker", 1, 1) - } - return err - } -} - -func isDocker50x(err error) bool { - derr, ok := err.(*docker.Error) - return ok && derr.Status >= 500 -} - -func containerConfigError(err error) error { - derr, ok := err.(*docker.Error) - if ok && derr.Status == 400 { - // derr.Message is a JSON response from docker, which has a "message" field we want to extract if possible. - var v struct { - Msg string `json:"message"` - } - - err := json.Unmarshal([]byte(derr.Message), &v) - if err != nil { - // If message was not valid JSON, the raw body is still better than nothing. - return fmt.Errorf("%s", derr.Message) - } - return fmt.Errorf("%s", v.Msg) - } - - return nil -} - -type temporary struct { - error -} - -func (t *temporary) Temporary() bool { return true } - -func temp(err error) error { - return &temporary{err} -} - -// some 500s are totally cool -func filter(err error) error { - // "API error (500): {\"message\":\"service endpoint with name task-57d722ecdecb9e7be16aff17 already exists\"}\n" -> ok since container exists - switch { - default: - return err - case err == nil: - return err - case strings.Contains(err.Error(), "service endpoint with name"): - } - logrus.WithError(err).Warn("filtering error") - return nil -} - -func filterNoSuchContainer(err error) error { - if err == nil { - return nil - } - _, containerNotFound := err.(*docker.NoSuchContainer) - dockerErr, ok := err.(*docker.Error) - if containerNotFound || (ok && dockerErr.Status == 404) { - logrus.WithError(err).Error("filtering error") - return nil - } - return err -} - -func filterNotRunning(err error) error { - if err == nil { - return nil - } - - _, containerNotRunning := err.(*docker.ContainerNotRunning) - dockerErr, ok := err.(*docker.Error) - if containerNotRunning || (ok && dockerErr.Status == 304) { - logrus.WithError(err).Error("filtering error") - return nil - } - - return err -} - -func (d *dockerWrap) AttachToContainerNonBlocking(opts docker.AttachToContainerOptions) (w docker.CloseWaiter, err error) { - ctx, cancel := context.WithTimeout(context.Background(), retryTimeout) - defer cancel() - err = d.retry(ctx, func() error { - w, err = d.docker.AttachToContainerNonBlocking(opts) - if err != nil { - // always retry if attach errors, task is running, we want logs! - err = temp(err) - } - return err - }) - return w, err -} - -func (d *dockerWrap) WaitContainerWithContext(id string, ctx context.Context) (code int, err error) { - err = d.retry(ctx, func() error { - code, err = d.dockerNoTimeout.WaitContainerWithContext(id, ctx) - return err - }) - return code, filterNoSuchContainer(err) -} - -func (d *dockerWrap) StartContainerWithContext(id string, hostConfig *docker.HostConfig, ctx context.Context) (err error) { - err = d.retry(ctx, func() error { - err = d.dockerNoTimeout.StartContainerWithContext(id, hostConfig, ctx) - if _, ok := err.(*docker.NoSuchContainer); ok { - // for some reason create will sometimes return successfully then say no such container here. wtf. so just retry like normal - return temp(err) - } - return err - }) - return err -} - -func (d *dockerWrap) CreateContainer(opts docker.CreateContainerOptions) (c *docker.Container, err error) { - err = d.retry(opts.Context, func() error { - c, err = d.dockerNoTimeout.CreateContainer(opts) - return err - }) - return c, err -} - -func (d *dockerWrap) PullImage(opts docker.PullImageOptions, auth docker.AuthConfiguration) (err error) { - err = d.retry(opts.Context, func() error { - err = d.dockerNoTimeout.PullImage(opts, auth) - return err - }) - return err -} - -func (d *dockerWrap) RemoveContainer(opts docker.RemoveContainerOptions) (err error) { - ctx, cancel := context.WithTimeout(context.Background(), retryTimeout) - defer cancel() - err = d.retry(ctx, func() error { - err = d.docker.RemoveContainer(opts) - return err - }) - return filterNoSuchContainer(err) -} - -func (d *dockerWrap) InspectImage(name string) (i *docker.Image, err error) { - ctx, cancel := context.WithTimeout(context.Background(), retryTimeout) - defer cancel() - err = d.retry(ctx, func() error { - i, err = d.docker.InspectImage(name) - return err - }) - return i, err -} - -func (d *dockerWrap) InspectContainer(id string) (c *docker.Container, err error) { - ctx, cancel := context.WithTimeout(context.Background(), retryTimeout) - defer cancel() - err = d.retry(ctx, func() error { - c, err = d.docker.InspectContainer(id) - return err - }) - return c, err -} - -func (d *dockerWrap) StopContainer(id string, timeout uint) (err error) { - ctx, cancel := context.WithTimeout(context.Background(), retryTimeout) - defer cancel() - err = d.retry(ctx, func() error { - err = d.docker.StopContainer(id, timeout) - return err - }) - return filterNotRunning(filterNoSuchContainer(err)) -} - -func (d *dockerWrap) Stats(opts docker.StatsOptions) (err error) { - // we can't retry this one this way since the callee closes the - // stats chan, need a fancier retry mechanism where we can swap out - // channels, but stats isn't crucial so... be lazy for now - return d.docker.Stats(opts) - - //err = d.retry(func() error { - //err = d.docker.Stats(opts) - //return err - //}) - //return err -} - -func (d *dockerWrap) Ping() error { - return d.docker.Ping() -} diff --git a/vendor/github.com/iron-io/runner/drivers/docker/docker_test.go b/vendor/github.com/iron-io/runner/drivers/docker/docker_test.go deleted file mode 100644 index 7c94fd025..000000000 --- a/vendor/github.com/iron-io/runner/drivers/docker/docker_test.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package docker - -import ( - "bytes" - "context" - "io" - "os" - "strings" - "testing" - "time" - - "github.com/iron-io/runner/common" - "github.com/iron-io/runner/drivers" - "github.com/vrischmann/envconfig" -) - -type taskDockerTest struct { - id string - input io.Reader - output io.Writer -} - -func (f *taskDockerTest) Command() string { return "" } -func (f *taskDockerTest) EnvVars() map[string]string { - return map[string]string{} -} -func (f *taskDockerTest) Labels() map[string]string { return nil } -func (f *taskDockerTest) Id() string { return f.id } -func (f *taskDockerTest) Group() string { return "" } -func (f *taskDockerTest) Image() string { return "iron/hello" } -func (f *taskDockerTest) Timeout() time.Duration { return 30 * time.Second } -func (f *taskDockerTest) Logger() (stdout, stderr io.Writer) { return f.output, nil } -func (f *taskDockerTest) WriteStat(drivers.Stat) { /* TODO */ } -func (f *taskDockerTest) Volumes() [][2]string { return [][2]string{} } -func (f *taskDockerTest) WorkDir() string { return "" } -func (f *taskDockerTest) Close() {} -func (f *taskDockerTest) Input() io.Reader { return f.input } - -func TestDockerIsAlive(t *testing.T) { - - env := common.NewEnvironment(func(e *common.Environment) {}) - dkr := NewDocker(env, drivers.Config{}) - ctx := context.Background() - - isAlive := dkr.IsAlive(ctx) - if !isAlive { - t.Fatal("Docker is not working") - } -} - -func TestRunnerDocker(t *testing.T) { - env := common.NewEnvironment(func(e *common.Environment) {}) - dkr := NewDocker(env, drivers.Config{}) - ctx := context.Background() - - task := &taskDockerTest{"test-docker", nil, nil} - - cookie, err := dkr.Prepare(ctx, task) - if err != nil { - t.Fatal("Couldn't prepare task test") - } - defer cookie.Close() - - result, err := cookie.Run(ctx) - if err != nil { - t.Fatal(err) - } - - if result.Status() != "success" { - t.Fatal("Test should successfully run the image") - } -} - -func TestRunnerDockerStdin(t *testing.T) { - env := common.NewEnvironment(func(e *common.Environment) {}) - dkr := NewDocker(env, drivers.Config{}) - ctx := context.Background() - - input := `{"name": "test"}` - var output bytes.Buffer - - task := &taskDockerTest{"test-docker-stdin", bytes.NewBufferString(input), &output} - - cookie, err := dkr.Prepare(ctx, task) - if err != nil { - t.Fatal("Couldn't prepare task test") - } - defer cookie.Close() - - result, err := cookie.Run(ctx) - if err != nil { - t.Fatal(err) - } - - if result.Status() != "success" { - t.Error("Test should successfully run the image") - } - - expect := "Hello test!" - got := output.String() - if !strings.Contains(got, expect) { - t.Errorf("Test expected output to contain '%s', got '%s'", expect, got) - } -} - -func TestConfigLoadMemory(t *testing.T) { - if err := os.Setenv("MEMORY_PER_JOB", "128M"); err != nil { - t.Fatalf("Could not set MEMORY_PER_JOB: %v", err) - } - - var conf drivers.Config - if err := envconfig.Init(&conf); err != nil { - t.Fatalf("Could not read config: %v", err) - } - - if conf.Memory != 128*1024*1024 { - t.Fatalf("Memory read from config should match 128M, got %d", conf.Memory) - } -} diff --git a/vendor/github.com/iron-io/runner/drivers/driver.go b/vendor/github.com/iron-io/runner/drivers/driver.go deleted file mode 100644 index 82d6da24c..000000000 --- a/vendor/github.com/iron-io/runner/drivers/driver.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Interface for all container drivers - -package drivers - -import ( - "context" - "errors" - "io" - "strings" - "time" - - "code.cloudfoundry.org/bytefmt" -) - -// A DriverCookie identifies a unique request to run a task. -// -// Clients should always call Close() on a DriverCookie after they are done -// with it. -type Cookie interface { - io.Closer - - // Run should execute task on the implementation. - // RunResult captures the result of task execution. This means if task - // execution fails due to a problem in the task, Run() MUST return a valid - // RunResult and nil as the error. The RunResult's Error() and Status() - // should be used to indicate failure. - // If the implementation itself suffers problems (lost of network, out of - // disk etc.), a nil RunResult and an error message is preferred. - // - // Run() MUST monitor the context. task cancellation is indicated by - // cancelling the context. - Run(ctx context.Context) (RunResult, error) -} - -type Driver interface { - // Prepare can be used in order to do any preparation that a specific driver - // may need to do before running the task, and can be useful to put - // preparation that the task can recover from into (i.e. if pulling an image - // fails because a registry is down, the task doesn't need to be failed). It - // returns a cookie that can be used to execute the task. - // Callers should Close the cookie regardless of whether they run it. - // - // The returned cookie should respect the task's timeout when it is run. - Prepare(ctx context.Context, task ContainerTask) (Cookie, error) - - IsAlive(ctx context.Context) bool -} - -// RunResult indicates only the final state of the task. -type RunResult interface { - // Error is an actionable/checkable error from the container. - error - - // Status should return the current status of the task. - // Only valid options are {"error", "success", "timeout", "killed", "cancelled"}. - Status() string -} - -// The ContainerTask interface guides task execution across a wide variety of -// container oriented runtimes. -// This interface is unstable. -// -// FIXME: This interface is large, and it is currently a little Docker specific. -type ContainerTask interface { - // Command returns the command to run within the container. - Command() string - // EnvVars returns environment variable key-value pairs. - EnvVars() map[string]string - // Input feeds the container with data - Input() io.Reader - // Labels returns container label key-value pairs. - Labels() map[string]string - Id() string - // Image returns the runtime specific image to run. - Image() string - // Timeout specifies the maximum time a task is allowed to run. Return 0 to let it run forever. - Timeout() time.Duration - // Driver will write output log from task execution to these writers. Must be - // non-nil. Use io.Discard if log is irrelevant. - Logger() (stdout, stderr io.Writer) - // WriteStat writes a single Stat, implementation need not be thread safe. - WriteStat(Stat) - // Volumes returns an array of 2-element tuples indicating storage volume mounts. - // The first element is the path on the host, and the second element is the - // path in the container. - Volumes() [][2]string - // WorkDir returns the working directory to use for the task. Empty string - // leaves it unset. - WorkDir() string - - // Close is used to perform cleanup after task execution. - // Close should be safe to call multiple times. - Close() -} - -// Stat is a bucket of stats from a driver at a point in time for a certain task. -type Stat struct { - Timestamp time.Time - Metrics map[string]uint64 -} - -// Set of acceptable errors coming from container engines to TaskRunner -var ( - // ErrOutOfMemory for OOM in container engine - ErrOutOfMemory = userError(errors.New("out of memory error")) -) - -// TODO agent.UserError should be elsewhere -func userError(err error) error { return &ue{err} } - -type ue struct { - error -} - -func (u *ue) UserVisible() bool { return true } - -// TODO: ensure some type is applied to these statuses. -const ( - // task statuses - StatusRunning = "running" - StatusSuccess = "success" - StatusError = "error" - StatusTimeout = "timeout" - StatusKilled = "killed" - StatusCancelled = "cancelled" -) - -// Allows us to implement custom unmarshaling of JSON and envconfig. -type Memory uint64 - -func (m *Memory) Unmarshal(s string) error { - temp, err := bytefmt.ToBytes(s) - if err != nil { - return err - } - - *m = Memory(temp) - return nil -} - -func (m *Memory) UnmarshalJSON(p []byte) error { - temp, err := bytefmt.ToBytes(string(p)) - if err != nil { - return err - } - - *m = Memory(temp) - return nil -} - -type Config struct { - Docker string `json:"docker" envconfig:"default=unix:///var/run/docker.sock,DOCKER"` - Memory Memory `json:"memory" envconfig:"default=256M,MEMORY_PER_JOB"` - CPUShares int64 `json:"cpu_shares" envconfig:"default=2,CPU_SHARES"` -} - -// for tests -func DefaultConfig() Config { - return Config{ - Docker: "unix:///var/run/docker.sock", - Memory: 256 * 1024 * 1024, - CPUShares: 0, - } -} - -func average(samples []Stat) (Stat, bool) { - l := len(samples) - if l == 0 { - return Stat{}, false - } else if l == 1 { - return samples[0], true - } - - s := Stat{ - Metrics: samples[0].Metrics, // Recycle Metrics map from first sample - } - t := samples[0].Timestamp.UnixNano() / int64(l) - for _, sample := range samples[1:] { - t += sample.Timestamp.UnixNano() / int64(l) - for k, v := range sample.Metrics { - s.Metrics[k] += v - } - } - - s.Timestamp = time.Unix(0, t) - for k, v := range s.Metrics { - s.Metrics[k] = v / uint64(l) - } - return s, true -} - -// Decimate will down sample to a max number of points in a given sample by -// averaging samples together. i.e. max=240, if we have 240 samples, return -// them all, if we have 480 samples, every 2 samples average them (and time -// distance), and return 240 samples. This is relatively naive and if len(in) > -// max, <= max points will be returned, not necessarily max: length(out) = -// ceil(length(in)/max) -- feel free to fix this, setting a relatively high max -// will allow good enough granularity at higher lengths, i.e. for max of 1 hour -// tasks, sampling every 1s, decimate will return 15s samples if max=240. -// Large gaps in time between samples (a factor > (last-start)/max) will result -// in a shorter list being returned to account for lost samples. -// Decimate will modify the input list for efficiency, it is not copy safe. -// Input must be sorted by timestamp or this will fail gloriously. -func Decimate(maxSamples int, stats []Stat) []Stat { - if len(stats) <= maxSamples { - return stats - } else if maxSamples <= 0 { // protect from nefarious input - return nil - } - - start := stats[0].Timestamp - window := stats[len(stats)-1].Timestamp.Sub(start) / time.Duration(maxSamples) - - nextEntry, current := 0, start // nextEntry is the index tracking next Stats record location - for x := 0; x < len(stats); { - isLastEntry := nextEntry == maxSamples-1 // Last bin is larger than others to handle imprecision - - var samples []Stat - for offset := 0; x+offset < len(stats); offset++ { // Iterate through samples until out of window - if !isLastEntry && stats[x+offset].Timestamp.After(current.Add(window)) { - break - } - samples = stats[x : x+offset+1] - } - - x += len(samples) // Skip # of samples for next window - if entry, ok := average(samples); ok { // Only record Stat if 1+ samples exist - stats[nextEntry] = entry - nextEntry++ - } - - current = current.Add(window) - } - return stats[:nextEntry] // Return slice of []Stats that was modified with averages -} - -// https://github.com/fsouza/go-dockerclient/blob/master/misc.go#L166 -func parseRepositoryTag(repoTag string) (repository string, tag string) { - parts := strings.SplitN(repoTag, "@", 2) - repoTag = parts[0] - n := strings.LastIndex(repoTag, ":") - if n < 0 { - return repoTag, "" - } - if tag := repoTag[n+1:]; !strings.Contains(tag, "/") { - return repoTag[:n], tag - } - return repoTag, "" -} - -func ParseImage(image string) (registry, repo, tag string) { - repo, tag = parseRepositoryTag(image) - // Officially sanctioned at https://github.com/docker/docker/blob/master/registry/session.go#L319 to deal with "Official Repositories". - // Without this, token auth fails. - // Registries must exist at root (https://github.com/docker/docker/issues/7067#issuecomment-54302847) - // This cannot support the `library/` shortcut for private registries. - parts := strings.Split(repo, "/") - switch len(parts) { - case 1: - repo = "library/" + repo - case 2: - if strings.Contains(repo, ".") { - registry = parts[0] - repo = parts[1] - } - case 3: - registry = parts[0] - repo = parts[1] + "/" + parts[2] - } - - if tag == "" { - tag = "latest" - } - - return registry, repo, tag -} diff --git a/vendor/github.com/iron-io/runner/drivers/driver_test.go b/vendor/github.com/iron-io/runner/drivers/driver_test.go deleted file mode 100644 index 2148b0a32..000000000 --- a/vendor/github.com/iron-io/runner/drivers/driver_test.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package drivers - -import ( - "testing" - "time" -) - -func TestAverage(t *testing.T) { - start := time.Date(2016, 8, 11, 0, 0, 0, 0, time.UTC) - stats := make([]Stat, 10) - for i := 0; i < len(stats); i++ { - stats[i] = Stat{ - Timestamp: start.Add(time.Duration(i) * time.Minute), - Metrics: map[string]uint64{"x": uint64(i)}, - } - } - - res, ok := average(stats) - if !ok { - t.Error("Expected good record") - } - - expectedV := uint64(4) - if v, ok := res.Metrics["x"]; !ok || v != expectedV { - t.Error("Actual average didn't match expected", "actual", v, "expected", expectedV) - } - - expectedT := time.Unix(1470873870, 0) - if res.Timestamp != expectedT { - t.Error("Actual average didn't match expected", "actual", res.Timestamp, "expected", expectedT) - } -} - -func TestDecimate(t *testing.T) { - start := time.Now() - stats := make([]Stat, 480) - for i := range stats { - stats[i] = Stat{ - Timestamp: start.Add(time.Duration(i) * time.Second), - Metrics: map[string]uint64{"x": uint64(i)}, - } - // t.Log(stats[i]) - } - - stats = Decimate(240, stats) - if len(stats) != 240 { - t.Error("decimate function bad", len(stats)) - } - - //for i := range stats { - //t.Log(stats[i]) - //} - - stats = make([]Stat, 700) - for i := range stats { - stats[i] = Stat{ - Timestamp: start.Add(time.Duration(i) * time.Second), - Metrics: map[string]uint64{"x": uint64(i)}, - } - } - stats = Decimate(240, stats) - if len(stats) != 240 { - t.Error("decimate function bad", len(stats)) - } - - stats = make([]Stat, 300) - for i := range stats { - stats[i] = Stat{ - Timestamp: start.Add(time.Duration(i) * time.Second), - Metrics: map[string]uint64{"x": uint64(i)}, - } - } - stats = Decimate(240, stats) - if len(stats) != 240 { - t.Error("decimate function bad", len(stats)) - } - - stats = make([]Stat, 300) - for i := range stats { - if i == 150 { - // leave 1 large gap - start = start.Add(20 * time.Minute) - } - stats[i] = Stat{ - Timestamp: start.Add(time.Duration(i) * time.Second), - Metrics: map[string]uint64{"x": uint64(i)}, - } - } - stats = Decimate(240, stats) - if len(stats) != 49 { - t.Error("decimate function bad", len(stats)) - } -} - -func TestParseImage(t *testing.T) { - cases := map[string][]string{ - "iron/hello": {"", "iron/hello", "latest"}, - "iron/hello:v1": {"", "iron/hello", "v1"}, - "my.registry/hello": {"my.registry", "hello", "latest"}, - "my.registry/hello:v1": {"my.registry", "hello", "v1"}, - "mongo": {"", "library/mongo", "latest"}, - "mongo:v1": {"", "library/mongo", "v1"}, - "quay.com/iron/hello": {"quay.com", "iron/hello", "latest"}, - "quay.com:8080/iron/hello:v2": {"quay.com:8080", "iron/hello", "v2"}, - "localhost.localdomain:5000/samalba/hipache:latest": {"localhost.localdomain:5000", "samalba/hipache", "latest"}, - } - - for in, out := range cases { - reg, repo, tag := ParseImage(in) - if reg != out[0] || repo != out[1] || tag != out[2] { - t.Errorf("Test input %q wasn't parsed as expected. Expected %q, got %q", in, out, []string{reg, repo, tag}) - } - } -} diff --git a/vendor/github.com/iron-io/runner/drivers/mock/mocker.go b/vendor/github.com/iron-io/runner/drivers/mock/mocker.go deleted file mode 100644 index 7f03cb655..000000000 --- a/vendor/github.com/iron-io/runner/drivers/mock/mocker.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2016 Iron.io -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mock - -import ( - "context" - "fmt" - - "github.com/iron-io/runner/drivers" -) - -func New() drivers.Driver { - return &Mocker{} -} - -type Mocker struct { - count int -} - -func (m *Mocker) Prepare(context.Context, drivers.ContainerTask) (drivers.Cookie, error) { - return &cookie{m}, nil -} - -func (m *Mocker) IsAlive(ctx context.Context) bool { - return true -} - -type cookie struct { - m *Mocker -} - -func (c *cookie) Close() error { return nil } - -func (c *cookie) Run(ctx context.Context) (drivers.RunResult, error) { - c.m.count++ - if c.m.count%100 == 0 { - return nil, fmt.Errorf("Mocker error! Bad.") - } - return &runResult{ - error: nil, - StatusValue: "success", - }, nil -} - -type runResult struct { - error - StatusValue string -} - -func (runResult *runResult) Status() string { - return runResult.StatusValue -} diff --git a/vendor/github.com/iron-io/runner/glide.lock b/vendor/github.com/iron-io/runner/glide.lock deleted file mode 100644 index b21519518..000000000 --- a/vendor/github.com/iron-io/runner/glide.lock +++ /dev/null @@ -1,104 +0,0 @@ -hash: d7c3318fa4d64560e9149d2ebaf0e863232ab42ffbdf13473d4260a23d2ad2b8 -updated: 2017-04-05T13:35:00.148390772-07:00 -imports: -- name: code.cloudfoundry.org/bytefmt - version: a75017a21993c80187c7fa4f3c1ec22ddd6a8cd5 -- name: github.com/amir/raidman - version: c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985 - subpackages: - - proto -- name: github.com/Azure/go-ansiterm - version: fa152c58bc15761d0200cb75fe958b89a9d4888e - subpackages: - - winterm -- name: github.com/cactus/go-statsd-client - version: 91c326c3f7bd20f0226d3d1c289dd9f8ce28d33d - subpackages: - - statsd -- name: github.com/docker/distribution - version: 99cb7c0946d2f5a38015443e515dc916295064d7 - subpackages: - - context - - digest - - manifest - - manifest/schema1 - - manifest/schema2 - - reference - - uuid -- name: github.com/docker/docker - version: 092cba3727bb9b4a2f0e922cd6c0f93ea270e363 - subpackages: - - api/types - - api/types/blkiodev - - api/types/container - - api/types/filters - - api/types/mount - - api/types/network - - api/types/registry - - api/types/strslice - - api/types/swarm - - api/types/versions - - opts - - pkg/archive - - pkg/fileutils - - pkg/homedir - - pkg/idtools - - pkg/ioutils - - pkg/jsonlog - - pkg/jsonmessage - - pkg/longpath - - pkg/pools - - pkg/promise - - pkg/stdcopy - - pkg/system - - pkg/term - - pkg/term/windows -- name: github.com/docker/go-connections - version: 4ccf312bf1d35e5dbda654e57a9be4c3f3cd0366 - subpackages: - - nat -- name: github.com/docker/go-units - version: e30f1e79f3cd72542f2026ceec18d3bd67ab859c -- name: github.com/docker/libtrust - version: fa567046d9b14f6aa788882a950d69651d230b21 -- name: github.com/fsouza/go-dockerclient - version: e24e809e9db395f1e3c85af1b88f2002023610f5 -- name: github.com/golang/protobuf - version: 2402d76f3d41f928c7902a765dfc872356dd3aad - subpackages: - - proto -- name: github.com/gorilla/context - version: 14f550f51af52180c2eefed15e5fd18d63c0a64a -- name: github.com/gorilla/mux - version: e444e69cbd2e2e3e0749a2f3c717cec491552bbf -- name: github.com/hashicorp/go-cleanhttp - version: ad28ea4487f05916463e2423a55166280e8254b5 -- name: github.com/heroku/docker-registry-client - version: 95467b6cacee2a06f112a3cf7e47a70fad6000cf - subpackages: - - registry -- name: github.com/Microsoft/go-winio - version: 24a3e3d3fc7451805e09d11e11e95d9a0a4f205e -- name: github.com/opencontainers/runc - version: 8893fa693bf9bf29e5a156369bc51b887df43924 - subpackages: - - libcontainer/system - - libcontainer/user -- name: github.com/Sirupsen/logrus - version: ba1b36c82c5e05c4f912a88eab0dcd91a171688f - subpackages: - - hooks/syslog -- name: golang.org/x/net - version: 4876518f9e71663000c348837735820161a42df7 - subpackages: - - context - - context/ctxhttp - - proxy -- name: golang.org/x/sys - version: d5645953809d8b4752afb2c3224b1f1ad73dfa70 - subpackages: - - unix - - windows -testImports: -- name: github.com/vrischmann/envconfig - version: 757beaaeac8d14bcc7ea3f71488d65cf45cf2eff diff --git a/vendor/github.com/iron-io/runner/glide.yaml b/vendor/github.com/iron-io/runner/glide.yaml deleted file mode 100644 index eb3259700..000000000 --- a/vendor/github.com/iron-io/runner/glide.yaml +++ /dev/null @@ -1,23 +0,0 @@ -package: github.com/iron-io/runner -import: -- package: github.com/Sirupsen/logrus - version: ^0.10.0 - subpackages: - - hooks/syslog -- package: github.com/amir/raidman -- package: github.com/cactus/go-statsd-client - version: ^3.1.0 - subpackages: - - statsd -- package: github.com/docker/docker - version: ^1.13.1 -- package: github.com/fsouza/go-dockerclient - version: master -- package: github.com/heroku/docker-registry-client - version: master - subpackages: - - registry -- package: code.cloudfoundry.org/bytefmt -testImport: -- package: github.com/vrischmann/envconfig - version: 757beaaeac8d14bcc7ea3f71488d65cf45cf2eff diff --git a/vendor/github.com/iron-io/runner/test.sh b/vendor/github.com/iron-io/runner/test.sh deleted file mode 100755 index 76e26f3b4..000000000 --- a/vendor/github.com/iron-io/runner/test.sh +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2016 Iron.io -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -export LOG_LEVEL=debug - -rm -rf vendor/github.com/heroku/docker-registry-client/vendor -go test -v $(go list ./... | grep -v /vendor/ | grep -v /examples/) \ No newline at end of file diff --git a/vendor/github.com/vrischmann/envconfig/.travis.yml b/vendor/github.com/vrischmann/envconfig/.travis.yml deleted file mode 100644 index 3d57ed3df..000000000 --- a/vendor/github.com/vrischmann/envconfig/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go - -go: - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - tip diff --git a/vendor/github.com/vrischmann/envconfig/LICENSE b/vendor/github.com/vrischmann/envconfig/LICENSE deleted file mode 100644 index e5bf20cdb..000000000 --- a/vendor/github.com/vrischmann/envconfig/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2015 Vincent Rischmann - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/vrischmann/envconfig/README.md b/vendor/github.com/vrischmann/envconfig/README.md deleted file mode 100644 index 1de571086..000000000 --- a/vendor/github.com/vrischmann/envconfig/README.md +++ /dev/null @@ -1,151 +0,0 @@ -envconfig -========= - -[![Build Status](https://travis-ci.org/vrischmann/envconfig.svg?branch=master)](https://travis-ci.org/vrischmann/envconfig) -[![GoDoc](https://godoc.org/github.com/vrischmann/envconfig?status.svg)](https://godoc.org/github.com/vrischmann/envconfig) - -envconfig is a library which allows you to parse your configuration from environment variables and fill an arbitrary struct. - -See [the example](https://godoc.org/github.com/vrischmann/envconfig#example-Init) to understand how to use it, it's pretty simple. - -Supported types ---------------- - - * Almost all standard types plus `time.Duration` are supported by default. - * Slices and arrays - * Arbitrary structs - * Custom types via the [Unmarshaler](https://godoc.org/github.com/vrischmann/envconfig/#Unmarshaler) interface. - -How does it work ----------------- - -*envconfig* takes the hierarchy of your configuration struct and the names of the fields to create a environment variable key. - -For example: - -```go -var conf struct { - Name string - Shard struct { - Host string - Port int - } -} -``` - -This will check for those 3 keys: - - * NAME or name - * SHARD\_HOST, or shard\_host - * SHARD\_PORT, or shard\_port - -Flexible key naming -------------------- - -*envconfig* supports having underscores in the key names where there is a _word boundary_. Now, that term is not super explicit, so let me show you an example: - -```go -var conf struct { - Cassandra struct { - SSLCert string - SslKey string - } -} -``` - -This will check all of the following keys: - - * CASSANDRA\_SSL\_CERT, CASSANDRA\_SSLCERT, cassandra\_ssl\_cert, cassandra\_sslcert - * CASSANDRA\_SSL\_KEY, CASSANDRA\_SSLKEY, cassandra\_ssl\_key, cassandra\_sslkey - -If that is not good enough, look just below. - -Custom environment variable names ---------------------------------- - -*envconfig* supports custom environment variable names: - -```go -var conf struct { - Name string `envconfig:"myName"` -} -``` - -Default values --------------- - -*envconfig* supports default values: - -```go -var conf struct { - Name string `envconfig:"default=Vincent"` -} -``` - -Optional values ---------------- - -*envconfig* supports optional values: - -```go -var conf struct { - Name string `envconfig:"optional"` - Age int `envconfig:"-"` -} -``` - -The two syntax are equivalent. - -Combining multiple options in one tag -------------------------------------- - -You can of course combine multiple options: - -```go -var conf struct { - Name string `envconfig:"default=Vincent,myName"` -} -``` - -Slices or arrays ----------------- - -With slices or arrays, the same naming is applied for the slice. To put multiple elements into the slice or array, you need to separate -them with a *,* (will probably be configurable in the future, or at least have a way to escape) - -For example: - -```go -var conf struct { - Ports []int -} -``` - -This will check for the key __PORTS__: - - * if your variable is *9000* the slice will contain only 9000 - * if your variable is *9000,100* the slice will contain 9000 and 100 - -For slices of structs, it's a little more complicated. The same splitting of slice elements is done with a *comma*, however, each token must follow -a specific format like this: `{,,...}` - -For example: - -```go -var conf struct { - Shards []struct { - Name string - Port int - } -} -``` - -This will check for the key __SHARDS__. Example variable content: `{foobar,9000},{barbaz,20000}` - -This will result in two struct defined in the *Shards* slice. - -Future work ------------ - - * support for time.Time values with a layout defined via a field tag - * support for complex types diff --git a/vendor/github.com/vrischmann/envconfig/doc.go b/vendor/github.com/vrischmann/envconfig/doc.go deleted file mode 100644 index 0e8d7c883..000000000 --- a/vendor/github.com/vrischmann/envconfig/doc.go +++ /dev/null @@ -1,199 +0,0 @@ -/* -Package envconfig implements a configuration reader which reads each value from an environment variable. - -The basic idea is that you define a configuration struct, like this: - - var conf struct { - Addr string - Port int - Auth struct { - Key string - Endpoint string - } - Partitions []int - Shards []struct { - Name string - Id int - } - } - -Once you have that, you need to initialize the configuration: - - if err := envconfig.Init(&conf); err != nil { - log.Fatalln(err) - } - -Then it's just a matter of setting the environment variables when calling your binary: - - ADDR=localhost PORT=6379 AUTH_KEY=foobar ./mybinary - -Layout of the conf struct - -Your conf struct must follow the following rules: - - no unexported fields by default (can turn off with Options.AllowUnexported) - - only supported types (no map fields for example) - -Naming of the keys - -By default, envconfig generates all possible keys based on the field chain according to a flexible naming scheme. - -The field chain is how you access your field in the configuration struct. For example: - - var conf struct { - Shard struct { - Name string - } - } - -With that struct, you access the name field via the chain *Shard.Name* - -The default naming scheme takes that and transforms it into the following: - - SHARD_NAME - - shard_name - -It can handles more complicated cases, with multiple words in one field name. It needs to be in the correct case though, for example: - - var conf struct { - Cassandra struct { - SSLCert string - SslKey string - } - } - -With that struct, you access the name field via the chain *Cassandra.SSLCert* or *Cassandra.SslKey* - -The default naming scheme takes that and transforms it into the following: - - CASSANDRA_SSL_CERT, cassandra_ssl_cert, CASSANDRA_SSLCERT, cassandra_sslcert - - CASSANDRA_SSL_KEY, cassandra_ssl_key, CASSANDRA_SSLKEY, cassandra_sslkey - -And, if that is not good enough for you, you always have the option to use a custom key: - - var conf struct { - Cassandra struct { - Name string `envconfig:"cassandraMyName"` - } - } - -Now envconfig will only ever checks the environment variable _cassandraMyName_. - - -Content of the variables - -There are three types of content for a single variable: - - for simple types, a single string representing the value, and parseable into the type. - - for slices or arrays, a comma-separated list of strings. Each string must be parseable into the element type of the slice or array. - - for structs, a comma-separated list of specially formatted strings representing structs. - -Example of a valid slice value: - foo,bar,baz - -The format for a struct is as follow: - - prefixed with { - - suffixed with } - - contains a comma-separated list of field values, in the order in which they are defined in the struct - -Example of a valid struct value: - type MyStruct struct { - Name string - Id int - Timeout time.Duration - } - - {foobar,10,120s} - -Example of a valid slice of struct values: - {foobar,10,120s},{barbaz,20,50s} - -Special case for bytes slices - -For bytes slices, you generally don't want to type out a comma-separated list of byte values. - -For this use case, we support base64 encoded values. - -Here's an example: - - var conf struct { - Data []byte - } - - os.Setenv("DATA", "Rk9PQkFS") - -This will decode DATA to FOOBAR and put that into conf.Data. - -Optional values - -Sometimes you don't absolutely need a value. Here's how we tell envconfig a value is optional: - - var conf struct { - Name string `envconfig:"optional"` - Age int `envconfig:"-"` - } - -The two syntax are equivalent. - -Default values - -Often times you have configuration keys which almost never changes, but you still want to be able to change them. - -In such cases, you might want to provide a default value. - -Here's to do this with envconfig: - - var conf struct { - Timeout time.Duration `envconfig:"default=1m"` - } - -Combining options - -You can of course combine multiple options. The syntax is simple enough, separate each option with a comma. - -For example: - - var conf struct { - Timeout time.Duration `envconfig:"default=1m,myTimeout"` - } - -This would give you the default timeout of 1 minute, and lookup the myTimeout environment variable. - -Supported types - -envconfig supports the following list of types: - - - bool - - string - - intX - - uintX - - floatX - - time.Duration - - pointers to all of the above types - -Notably, we don't (yet) support complex types simply because I had no use for it yet. - -Custom unmarshaler - -When the standard types are not enough, you will want to use a custom unmarshaler for your types. - -You do this by implementing Unmarshaler on your type. Here's an example: - - type connectionType uint - - const ( - tlsConnection connectionType = iota - insecureConnection - ) - - func (t *connectionType) Unmarshal(s string) error { - switch s { - case "tls": - *t = tlsConnection - case "insecure": - *t = insecureConnection - default: - return fmt.Errorf("unable to unmarshal %s to a connection type", s) - } - - return nil - } - -*/ -package envconfig diff --git a/vendor/github.com/vrischmann/envconfig/envconfig.go b/vendor/github.com/vrischmann/envconfig/envconfig.go deleted file mode 100644 index a096f88a0..000000000 --- a/vendor/github.com/vrischmann/envconfig/envconfig.go +++ /dev/null @@ -1,487 +0,0 @@ -package envconfig - -import ( - "bytes" - "encoding/base64" - "errors" - "fmt" - "os" - "reflect" - "sort" - "strconv" - "strings" - "time" - "unicode" -) - -var ( - // ErrUnexportedField is the error returned by the Init* functions when a field of the config struct is not exported and the option AllowUnexported is not used. - ErrUnexportedField = errors.New("envconfig: unexported field") - // ErrNotAPointer is the error returned by the Init* functions when the configuration object is not a pointer. - ErrNotAPointer = errors.New("envconfig: value is not a pointer") - // ErrInvalidValueKind is the error returned by the Init* functions when the configuration object is not a struct. - ErrInvalidValueKind = errors.New("envconfig: invalid value kind, only works on structs") - // ErrDefaultUnsupportedOnSlice is the error returned by the Init* functions when there is a default tag on a slice. - // The `default` tag is unsupported on slices because slice parsing uses , as the separator, as does the envconfig tags separator. - ErrDefaultUnsupportedOnSlice = errors.New("envconfig: default tag unsupported on slice") -) - -type context struct { - name string - customName string - defaultVal string - parents []reflect.Value - optional, leaveNil bool - allowUnexported bool -} - -// Unmarshaler is the interface implemented by objects that can unmarshal -// a environment variable string of themselves. -type Unmarshaler interface { - Unmarshal(s string) error -} - -// Options is used to customize the behavior of envconfig. Use it with InitWithOptions. -type Options struct { - // Prefix allows specifying a prefix for each key. - Prefix string - - // AllOptional determines whether to not throw errors by default for any key - // that is not found. AllOptional=true means errors will not be thrown. - AllOptional bool - - // LeaveNil specifies whether to not create new pointers for any pointer fields - // found within the passed config. Rather, it behaves such that if and only if - // there is a) a non-empty field in the value or b) a non-empty value that - // the pointer is pointing to will a new pointer be created. By default, - // LeaveNil=false will create all pointers in all structs if they are nil. - // - // var X struct { - // A *struct{ - // B string - // } - // } - // envconfig.InitWithOptions(&X, Options{LeaveNil: true}) - // - // $ ./program - // - // X.A == nil - // - // $ A_B="string" ./program - // - // X.A.B="string" // A will not be nil - LeaveNil bool - - // AllowUnexported allows unexported fields to be present in the passed config. - AllowUnexported bool -} - -// Init reads the configuration from environment variables and populates the conf object. conf must be a pointer -func Init(conf interface{}) error { - return InitWithOptions(conf, Options{}) -} - -// InitWithPrefix reads the configuration from environment variables and populates the conf object. conf must be a pointer. -// Each key read will be prefixed with the prefix string. -func InitWithPrefix(conf interface{}, prefix string) error { - return InitWithOptions(conf, Options{Prefix: prefix}) -} - -// InitWithOptions reads the configuration from environment variables and populates the conf object. -// conf must be a pointer. -func InitWithOptions(conf interface{}, opts Options) error { - value := reflect.ValueOf(conf) - if value.Kind() != reflect.Ptr { - return ErrNotAPointer - } - - elem := value.Elem() - - ctx := context{ - name: opts.Prefix, - optional: opts.AllOptional, - leaveNil: opts.LeaveNil, - allowUnexported: opts.AllowUnexported, - } - switch elem.Kind() { - case reflect.Ptr: - if elem.IsNil() { - elem.Set(reflect.New(elem.Type().Elem())) - } - _, err := readStruct(elem.Elem(), &ctx) - return err - case reflect.Struct: - _, err := readStruct(elem, &ctx) - return err - default: - return ErrInvalidValueKind - } -} - -type tag struct { - customName string - optional bool - skip bool - defaultVal string -} - -func parseTag(s string) *tag { - var t tag - - tokens := strings.Split(s, ",") - for _, v := range tokens { - switch { - case v == "-": - t.skip = true - case v == "optional": - t.optional = true - case strings.HasPrefix(v, "default="): - t.defaultVal = strings.TrimPrefix(v, "default=") - default: - t.customName = v - } - } - - return &t -} - -func readStruct(value reflect.Value, ctx *context) (nonNil bool, err error) { - var parents []reflect.Value - - for i := 0; i < value.NumField(); i++ { - field := value.Field(i) - name := value.Type().Field(i).Name - - tag := parseTag(value.Type().Field(i).Tag.Get("envconfig")) - if tag.skip || !field.CanSet() { - if !field.CanSet() && !ctx.allowUnexported { - return false, ErrUnexportedField - } - continue - } - - parents = ctx.parents - - doRead: - switch field.Kind() { - case reflect.Ptr: - // it's a pointer, create a new value and restart the switch - if field.IsNil() { - field.Set(reflect.New(field.Type().Elem())) - parents = append(parents, field) // track parent pointers to deallocate if no children are filled in - } - field = field.Elem() - goto doRead - case reflect.Struct: - var nonNilIn bool - nonNilIn, err = readStruct(field, &context{ - name: combineName(ctx.name, name), - optional: ctx.optional || tag.optional, - defaultVal: tag.defaultVal, - parents: parents, - leaveNil: ctx.leaveNil, - allowUnexported: ctx.allowUnexported, - }) - nonNil = nonNil || nonNilIn - default: - var ok bool - ok, err = setField(field, &context{ - name: combineName(ctx.name, name), - customName: tag.customName, - optional: ctx.optional || tag.optional, - defaultVal: tag.defaultVal, - parents: parents, - leaveNil: ctx.leaveNil, - allowUnexported: ctx.allowUnexported, - }) - nonNil = nonNil || ok - } - - if err != nil { - return false, err - } - } - - if !nonNil && ctx.leaveNil { // re-zero - for _, p := range parents { - p.Set(reflect.Zero(p.Type())) - } - } - - return nonNil, err -} - -var byteSliceType = reflect.TypeOf([]byte(nil)) - -func setField(value reflect.Value, ctx *context) (ok bool, err error) { - str, err := readValue(ctx) - if err != nil { - return false, err - } - - if len(str) == 0 && ctx.optional { - return false, nil - } - - isSliceNotUnmarshaler := value.Kind() == reflect.Slice && !isUnmarshaler(value.Type()) - switch { - case isSliceNotUnmarshaler && value.Type() == byteSliceType: - return true, parseBytesValue(value, str) - - case isSliceNotUnmarshaler: - return true, setSliceField(value, str, ctx) - - default: - return true, parseValue(value, str, ctx) - } -} - -func setSliceField(value reflect.Value, str string, ctx *context) error { - if ctx.defaultVal != "" { - return ErrDefaultUnsupportedOnSlice - } - - elType := value.Type().Elem() - tnz := newSliceTokenizer(str) - - slice := reflect.MakeSlice(value.Type(), value.Len(), value.Cap()) - - for tnz.scan() { - token := tnz.text() - - el := reflect.New(elType).Elem() - - if err := parseValue(el, token, ctx); err != nil { - return err - } - - slice = reflect.Append(slice, el) - } - - value.Set(slice) - - return tnz.Err() -} - -var ( - durationType = reflect.TypeOf(new(time.Duration)).Elem() - unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem() -) - -func isDurationField(t reflect.Type) bool { - return t.AssignableTo(durationType) -} - -func isUnmarshaler(t reflect.Type) bool { - return t.Implements(unmarshalerType) || reflect.PtrTo(t).Implements(unmarshalerType) -} - -func parseValue(v reflect.Value, str string, ctx *context) (err error) { - vtype := v.Type() - - // Special case when the type is a map: we need to make the map - switch vtype.Kind() { - case reflect.Map: - v.Set(reflect.MakeMap(vtype)) - } - - // Special case for Unmarshaler - if isUnmarshaler(vtype) { - return parseWithUnmarshaler(v, str) - } - - // Special case for time.Duration - if isDurationField(vtype) { - return parseDuration(v, str) - } - - kind := vtype.Kind() - switch kind { - case reflect.Bool: - err = parseBoolValue(v, str) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - err = parseIntValue(v, str) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - err = parseUintValue(v, str) - case reflect.Float32, reflect.Float64: - err = parseFloatValue(v, str) - case reflect.Ptr: - v.Set(reflect.New(vtype.Elem())) - return parseValue(v.Elem(), str, ctx) - case reflect.String: - v.SetString(str) - case reflect.Struct: - err = parseStruct(v, str, ctx) - default: - return fmt.Errorf("envconfig: kind %v not supported", kind) - } - - return -} - -func parseWithUnmarshaler(v reflect.Value, str string) error { - var u = v.Addr().Interface().(Unmarshaler) - return u.Unmarshal(str) -} - -func parseDuration(v reflect.Value, str string) error { - d, err := time.ParseDuration(str) - if err != nil { - return err - } - - v.SetInt(int64(d)) - - return nil -} - -// NOTE(vincent): this is only called when parsing structs inside a slice. -func parseStruct(value reflect.Value, token string, ctx *context) error { - tokens := strings.Split(token[1:len(token)-1], ",") - if len(tokens) != value.NumField() { - return fmt.Errorf("envconfig: struct token has %d fields but struct has %d", len(tokens), value.NumField()) - } - - for i := 0; i < value.NumField(); i++ { - field := value.Field(i) - t := tokens[i] - - if err := parseValue(field, t, ctx); err != nil { - return err - } - } - - return nil -} - -func parseBoolValue(v reflect.Value, str string) error { - val, err := strconv.ParseBool(str) - if err != nil { - return err - } - v.SetBool(val) - - return nil -} - -func parseIntValue(v reflect.Value, str string) error { - val, err := strconv.ParseInt(str, 10, 64) - if err != nil { - return err - } - v.SetInt(val) - - return nil -} - -func parseUintValue(v reflect.Value, str string) error { - val, err := strconv.ParseUint(str, 10, 64) - if err != nil { - return err - } - v.SetUint(val) - - return nil -} - -func parseFloatValue(v reflect.Value, str string) error { - val, err := strconv.ParseFloat(str, 64) - if err != nil { - return err - } - v.SetFloat(val) - - return nil -} - -func parseBytesValue(v reflect.Value, str string) error { - val, err := base64.StdEncoding.DecodeString(str) - if err != nil { - return err - } - v.SetBytes(val) - - return nil -} - -func combineName(parentName, name string) string { - if parentName == "" { - return name - } - - return parentName + "." + name -} - -func readValue(ctx *context) (string, error) { - keys := makeAllPossibleKeys(ctx) - - var str string - - for _, key := range keys { - str = os.Getenv(key) - if str != "" { - break - } - } - - if str != "" { - return str, nil - } - - if ctx.defaultVal != "" { - return ctx.defaultVal, nil - } - - if ctx.optional { - return "", nil - } - - return "", fmt.Errorf("envconfig: keys %s not found", strings.Join(keys, ", ")) -} - -func makeAllPossibleKeys(ctx *context) (res []string) { - if ctx.customName != "" { - return []string{ctx.customName} - } - - tmp := make(map[string]struct{}) - { - n := []rune(ctx.name) - - var buf bytes.Buffer // this is the buffer where we put extra underscores on "word" boundaries - var buf2 bytes.Buffer // this is the buffer with the standard naming scheme - - wroteUnderscore := false - for i, r := range ctx.name { - if r == '.' { - buf.WriteRune('_') - buf2.WriteRune('_') - wroteUnderscore = true - continue - } - - prevOrNextLower := i+1 < len(n) && i-1 > 0 && (unicode.IsLower(n[i+1]) || unicode.IsLower(n[i-1])) - if i > 0 && unicode.IsUpper(r) && prevOrNextLower && !wroteUnderscore { - buf.WriteRune('_') - } - - buf.WriteRune(r) - buf2.WriteRune(r) - - wroteUnderscore = false - } - - tmp[strings.ToLower(buf.String())] = struct{}{} - tmp[strings.ToUpper(buf.String())] = struct{}{} - tmp[strings.ToLower(buf2.String())] = struct{}{} - tmp[strings.ToUpper(buf2.String())] = struct{}{} - } - - for k := range tmp { - res = append(res, k) - } - - sort.Strings(res) - - return -} diff --git a/vendor/github.com/vrischmann/envconfig/envconfig_test.go b/vendor/github.com/vrischmann/envconfig/envconfig_test.go deleted file mode 100644 index 104370d22..000000000 --- a/vendor/github.com/vrischmann/envconfig/envconfig_test.go +++ /dev/null @@ -1,597 +0,0 @@ -package envconfig_test - -import ( - "fmt" - "os" - "strconv" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/require" - "github.com/vrischmann/envconfig" -) - -func TestParseSimpleConfig(t *testing.T) { - var conf struct { - Name string - Log struct { - Path string - } - } - - err := envconfig.Init(&conf) - require.Equal(t, "envconfig: keys NAME, name not found", err.Error()) - - os.Setenv("NAME", "foobar") - err = envconfig.Init(&conf) - require.Equal(t, "envconfig: keys LOG_PATH, log_path not found", err.Error()) - - os.Setenv("LOG_PATH", "/var/log/foobar") - err = envconfig.Init(&conf) - require.Nil(t, err) - - require.Equal(t, "foobar", conf.Name) - require.Equal(t, "/var/log/foobar", conf.Log.Path) - - // Clean up at the end of the test - some tests share the same key and we don't values to be seen by those tests - os.Setenv("NAME", "") - os.Setenv("LOG_PATH", "") -} - -func TestParseIntegerConfig(t *testing.T) { - var conf struct { - Port int - Long uint64 - Version uint8 - } - - timestamp := time.Now().UnixNano() - - os.Setenv("PORT", "80") - os.Setenv("LONG", fmt.Sprintf("%d", timestamp)) - os.Setenv("VERSION", "2") - - err := envconfig.Init(&conf) - require.Nil(t, err) - - require.Equal(t, 80, conf.Port) - require.Equal(t, uint64(timestamp), conf.Long) - require.Equal(t, uint8(2), conf.Version) -} - -func TestParseBoolConfig(t *testing.T) { - var conf struct { - DoIt bool - } - - os.Setenv("DOIT", "true") - - err := envconfig.Init(&conf) - require.Nil(t, err) - require.Equal(t, true, conf.DoIt) -} - -func TestParseBytesConfig(t *testing.T) { - var conf struct { - Data []byte - } - - os.Setenv("DATA", "Rk9PQkFS") - - err := envconfig.Init(&conf) - require.Nil(t, err) - require.Equal(t, []byte("FOOBAR"), conf.Data) -} - -func TestParseFloatConfig(t *testing.T) { - var conf struct { - Delta float32 - DeltaV float64 - } - - os.Setenv("DELTA", "0.02") - os.Setenv("DELTAV", "400.20000000001") - - err := envconfig.Init(&conf) - require.Nil(t, err) - require.Equal(t, float32(0.02), conf.Delta) - require.Equal(t, float64(400.20000000001), conf.DeltaV) -} - -func TestParseSliceConfig(t *testing.T) { - var conf struct { - Names []string - Ports []int - Shards []struct { - Name string - Addr string - } - } - - os.Setenv("NAMES", "foobar,barbaz") - os.Setenv("PORTS", "900,100") - os.Setenv("SHARDS", "{foobar,localhost:2929},{barbaz,localhost:2828}") - - err := envconfig.Init(&conf) - require.Nil(t, err) - - require.Equal(t, 2, len(conf.Names)) - require.Equal(t, "foobar", conf.Names[0]) - require.Equal(t, "barbaz", conf.Names[1]) - require.Equal(t, 2, len(conf.Ports)) - require.Equal(t, 900, conf.Ports[0]) - require.Equal(t, 100, conf.Ports[1]) - require.Equal(t, 2, len(conf.Shards)) - require.Equal(t, "foobar", conf.Shards[0].Name) - require.Equal(t, "localhost:2929", conf.Shards[0].Addr) - require.Equal(t, "barbaz", conf.Shards[1].Name) - require.Equal(t, "localhost:2828", conf.Shards[1].Addr) -} - -func TestParseStructSliceWrongData(t *testing.T) { - var conf struct { - Shards []struct { - Name string - Addr string - } - } - - os.Setenv("SHARDS", "foobar") - - err := envconfig.Init(&conf) - require.Equal(t, "envconfig: struct token has 1 fields but struct has 2", err.Error()) -} - -func TestParseStructSliceWrongValue(t *testing.T) { - var conf struct { - Shards []struct { - Name string - Port int32 - } - } - - os.Setenv("SHARDS", "{foobar,barbaz}") - - err := envconfig.Init(&conf) - require.Equal(t, `strconv.ParseInt: parsing "barbaz": invalid syntax`, err.Error()) -} - -func TestParseWrongValues(t *testing.T) { - var conf struct{ OK bool } - os.Setenv("OK", "foobar") - err := envconfig.Init(&conf) - require.Equal(t, `strconv.ParseBool: parsing "foobar": invalid syntax`, err.Error()) - - var conf2 struct{ Port int } - os.Setenv("PORT", "foobar") - err = envconfig.Init(&conf2) - require.Equal(t, `strconv.ParseInt: parsing "foobar": invalid syntax`, err.Error()) - - var conf3 struct{ Port uint } - os.Setenv("PORT", "foobar") - err = envconfig.Init(&conf3) - require.Equal(t, `strconv.ParseUint: parsing "foobar": invalid syntax`, err.Error()) - - var conf4 struct{ Port float32 } - os.Setenv("PORT", "foobar") - err = envconfig.Init(&conf4) - require.Equal(t, `strconv.ParseFloat: parsing "foobar": invalid syntax`, err.Error()) - - var conf5 struct{ Data []byte } - os.Setenv("DATA", "foobar") - err = envconfig.Init(&conf5) - require.Equal(t, "illegal base64 data at input byte 4", err.Error()) -} - -func TestDurationConfig(t *testing.T) { - var conf struct { - Timeout time.Duration - } - - os.Setenv("TIMEOUT", "1m") - - err := envconfig.Init(&conf) - require.Nil(t, err) - - require.Equal(t, time.Minute*1, conf.Timeout) -} - -func TestInvalidDurationConfig(t *testing.T) { - var conf struct { - Timeout time.Duration - } - - os.Setenv("TIMEOUT", "foo") - - err := envconfig.Init(&conf) - require.NotNil(t, err) -} - -func TestAllPointerConfig(t *testing.T) { - var conf struct { - Name *string - Port *int - Delta *float32 - DeltaV *float64 - Hosts *[]string - Shards *[]*struct { - Name *string - Addr *string - } - Master *struct { - Name *string - Addr *string - } - Timeout *time.Duration - } - - os.Setenv("NAME", "foobar") - os.Setenv("PORT", "9000") - os.Setenv("DELTA", "40.01") - os.Setenv("DELTAV", "200.00001") - os.Setenv("HOSTS", "localhost,free.fr") - os.Setenv("SHARDS", "{foobar,localhost:2828},{barbaz,localhost:2929}") - os.Setenv("MASTER_NAME", "master") - os.Setenv("MASTER_ADDR", "localhost:2727") - os.Setenv("TIMEOUT", "1m") - - err := envconfig.Init(&conf) - require.Nil(t, err) - - require.Equal(t, "foobar", *conf.Name) - require.Equal(t, 9000, *conf.Port) - require.Equal(t, float32(40.01), *conf.Delta) - require.Equal(t, 200.00001, *conf.DeltaV) - require.Equal(t, 2, len(*conf.Hosts)) - require.Equal(t, "localhost", (*conf.Hosts)[0]) - require.Equal(t, "free.fr", (*conf.Hosts)[1]) - require.Equal(t, 2, len(*conf.Shards)) - require.Equal(t, "foobar", *(*conf.Shards)[0].Name) - require.Equal(t, "localhost:2828", *(*conf.Shards)[0].Addr) - require.Equal(t, "barbaz", *(*conf.Shards)[1].Name) - require.Equal(t, "localhost:2929", *(*conf.Shards)[1].Addr) - require.Equal(t, "master", *conf.Master.Name) - require.Equal(t, "localhost:2727", *conf.Master.Addr) - require.Equal(t, time.Minute*1, *conf.Timeout) -} - -type logMode uint - -const ( - logFile logMode = iota + 1 - logStdout -) - -func (m *logMode) Unmarshal(s string) error { - switch strings.ToLower(s) { - case "file": - *m = logFile - case "stdout": - *m = logStdout - default: - return fmt.Errorf("unable to unmarshal %s", s) - } - - return nil -} - -func TestUnmarshaler(t *testing.T) { - var conf struct { - LogMode logMode - } - - os.Setenv("LOGMODE", "file") - - err := envconfig.Init(&conf) - require.Nil(t, err) - require.Equal(t, logFile, conf.LogMode) - - var conf2 struct { - LogMode *logMode - } - - err = envconfig.Init(&conf2) - require.Nil(t, err) - require.Equal(t, logFile, *conf2.LogMode) -} - -func TestParseOptionalConfig(t *testing.T) { - var conf struct { - Name string `envconfig:"optional"` - Flag bool `envconfig:"optional"` - Timeout time.Duration `envconfig:"optional"` - Port int `envconfig:"optional"` - Port2 uint `envconfig:"optional"` - Delta float32 `envconfig:"optional"` - DeltaV float64 `envconfig:"optional"` - Slice []string `envconfig:"optional"` - Struct struct { - A string - B int - } `envconfig:"optional"` - } - - os.Setenv("NAME", "") - os.Setenv("FLAG", "") - os.Setenv("TIMEOUT", "") - os.Setenv("PORT", "") - os.Setenv("PORT2", "") - os.Setenv("DELTA", "") - os.Setenv("DELTAV", "") - os.Setenv("SLICE", "") - os.Setenv("STRUCT_A", "") - - err := envconfig.Init(&conf) - require.Nil(t, err) - require.Equal(t, "", conf.Name) - - os.Setenv("NAME", "foobar") - os.Setenv("SLICE", "a,b") - os.Setenv("STRUCT_A", "foobar") - os.Setenv("STRUCT_B", "1") - - err = envconfig.Init(&conf) - require.Nil(t, err) - require.Equal(t, "foobar", conf.Name) - require.Equal(t, []string{"a", "b"}, conf.Slice) - require.Equal(t, "foobar", conf.Struct.A) - require.Equal(t, 1, conf.Struct.B) -} - -func TestParseSkippableConfig(t *testing.T) { - var conf struct { - Flag bool `envconfig:"-"` - } - - os.Setenv("FLAG", "true") - - err := envconfig.Init(&conf) - require.Nil(t, err) - require.Equal(t, false, conf.Flag) -} - -func TestParseCustomNameConfig(t *testing.T) { - var conf struct { - Name string `envconfig:"customName"` - } - - os.Setenv("customName", "foobar") - - err := envconfig.Init(&conf) - require.Nil(t, err) - require.Equal(t, "foobar", conf.Name) -} - -func TestParseOptionalStruct(t *testing.T) { - var conf struct { - Master struct { - Name string - } `envconfig:"optional"` - } - - os.Setenv("MASTER_NAME", "") - - err := envconfig.Init(&conf) - require.Nil(t, err) - require.Equal(t, "", conf.Master.Name) -} - -func TestParsePrefixedStruct(t *testing.T) { - var conf struct { - Name string - } - - os.Setenv("NAME", "") - os.Setenv("FOO_NAME", "") - - os.Setenv("NAME", "bad") - err := envconfig.InitWithPrefix(&conf, "FOO") - require.NotNil(t, err) - - os.Setenv("FOO_NAME", "good") - err = envconfig.InitWithPrefix(&conf, "FOO") - require.Nil(t, err) - require.Equal(t, "good", conf.Name) -} - -func TestUnexportedField(t *testing.T) { - var conf struct { - name string - } - - os.Setenv("NAME", "foobar") - - err := envconfig.Init(&conf) - require.Equal(t, envconfig.ErrUnexportedField, err) - - err = envconfig.InitWithOptions(&conf, envconfig.Options{AllowUnexported: true}) - require.Equal(t, nil, err) -} - -func TestNestedUnexportedField(t *testing.T) { - var conf struct { - Foo struct { - Bar struct { - baz string - } - } - } - - os.Setenv("FOO_BAR_BAZ", "foobar") - - err := envconfig.Init(&conf) - require.Equal(t, envconfig.ErrUnexportedField, err) - - err = envconfig.InitWithOptions(&conf, envconfig.Options{AllowUnexported: true}) - require.Equal(t, nil, err) -} - -type sliceWithUnmarshaler []int - -func (sl *sliceWithUnmarshaler) Unmarshal(s string) error { - tokens := strings.Split(s, ".") - for _, tok := range tokens { - tmp, err := strconv.Atoi(tok) - if err != nil { - return err - } - - *sl = append(*sl, tmp) - } - - return nil -} - -func TestSliceTypeWithUnmarshaler(t *testing.T) { - var conf struct { - Data sliceWithUnmarshaler - } - - os.Setenv("DATA", "1.2.3") - - err := envconfig.Init(&conf) - require.Nil(t, err) - require.Equal(t, 3, len(conf.Data)) - require.Equal(t, 1, conf.Data[0]) - require.Equal(t, 2, conf.Data[1]) - require.Equal(t, 3, conf.Data[2]) -} - -func TestParseDefaultVal(t *testing.T) { - var conf struct { - MySQL struct { - Master struct { - Address string `envconfig:"default=localhost"` - Port int `envconfig:"default=3306"` - } - Timeout time.Duration `envconfig:"default=1m,myTimeout"` - LocalTimeout time.Duration `envconfig:"myTimeout2,default=1m"` - } - } - - err := envconfig.Init(&conf) - require.Nil(t, err) - require.Equal(t, "localhost", conf.MySQL.Master.Address) - require.Equal(t, 3306, conf.MySQL.Master.Port) - require.Equal(t, time.Minute*1, conf.MySQL.Timeout) - - os.Setenv("myTimeout", "2m") - os.Setenv("myTimeout2", "20m") - - err = envconfig.Init(&conf) - require.Nil(t, err) - require.Equal(t, "localhost", conf.MySQL.Master.Address) - require.Equal(t, 3306, conf.MySQL.Master.Port) - require.Equal(t, time.Minute*2, conf.MySQL.Timeout) - require.Equal(t, time.Minute*20, conf.MySQL.LocalTimeout) -} - -func TestDefaultSlice(t *testing.T) { - // See https://github.com/vrischmann/envconfig/pull/15 - // - // The way people think about the following default value, is that the slice will be [a,b] - // However this never worked because we split the entire envconfig tag on , therefore default is just `a` here. - // The proper thing to do is to introduce a new format in the tag that doesn't have this limitation, but we don't have that yet. - // For now, we simply return an error indicating default is not unsupported on slices. - - var conf struct { - Hosts []string `envconfig:"default=a,b"` - } - - err := envconfig.Init(&conf) - require.NotNil(t, err) - require.Equal(t, envconfig.ErrDefaultUnsupportedOnSlice, err) -} - -func TestInitNotAPointer(t *testing.T) { - err := envconfig.Init("foobar") - require.Equal(t, envconfig.ErrNotAPointer, err) -} - -func TestInitPointerToAPointer(t *testing.T) { - type Conf struct { - Name string - } - var tmp *Conf - - os.Setenv("NAME", "foobar") - - err := envconfig.Init(&tmp) - require.Nil(t, err) - require.Equal(t, "foobar", tmp.Name) -} - -func TestInitInvalidValueKind(t *testing.T) { - sl := []string{"foo", "bar"} - err := envconfig.Init(&sl) - require.Equal(t, envconfig.ErrInvalidValueKind, err) -} - -func TestInvalidFieldValueKind(t *testing.T) { - var conf struct { - Foo interface{} - } - - os.Setenv("FOO", "lalala") - - err := envconfig.Init(&conf) - require.Equal(t, "envconfig: kind interface not supported", err.Error()) -} - -func TestInvalidSliceElementValueKind(t *testing.T) { - var conf struct { - Foo []interface{} - } - - os.Setenv("FOO", "lalala") - - err := envconfig.Init(&conf) - require.Equal(t, "envconfig: kind interface not supported", err.Error()) -} - -func TestParseEmptyTag(t *testing.T) { - var conf struct { - Name string `envconfig:""` - } - - os.Setenv("NAME", "foobar") - - err := envconfig.Init(&conf) - require.Nil(t, err) - require.Equal(t, "foobar", conf.Name) -} - -func TestLeaveNil(t *testing.T) { - var conf struct { - MySQL *struct { - Name string - } - } - - err := envconfig.InitWithOptions(&conf, envconfig.Options{ - AllOptional: true, - LeaveNil: true, - }) - require.Nil(t, err) - require.Nil(t, conf.MySQL) -} - -type myMapType map[string]int - -func (t *myMapType) Unmarshal(s string) error { - (*t)[s] = 1 - return nil -} - -func TestParseMapType(t *testing.T) { - var conf struct { - Map myMapType - } - - os.Setenv("MAP", "a") - - err := envconfig.Init(&conf) - require.Nil(t, err) - require.Equal(t, 1, conf.Map["a"]) -} diff --git a/vendor/github.com/vrischmann/envconfig/example_test.go b/vendor/github.com/vrischmann/envconfig/example_test.go deleted file mode 100644 index 93473523d..000000000 --- a/vendor/github.com/vrischmann/envconfig/example_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package envconfig_test - -import ( - "fmt" - "os" - "time" - - "github.com/vrischmann/envconfig" -) - -func ExampleInit() { - var conf struct { - MySQL struct { - Host string - Port int - Database struct { - User string - Password string - Name string - } - Params struct { - Charset string `envconfig:"-"` - } - } - Log struct { - Path string `envconfig:"default=/var/log/mylog.log"` - Rotate bool `envconfig:"logRotate"` - } - NbWorkers int - Timeout time.Duration - Cassandra struct { - SSLCert string - SSLKey string - } - } - - os.Setenv("MYSQL_HOST", "localhost") - os.Setenv("MYSQL_PORT", "3306") - os.Setenv("MYSQL_DATABASE_USER", "root") - os.Setenv("MYSQL_DATABASE_PASSWORD", "foobar") - os.Setenv("MYSQL_DATABASE_NAME", "default") - os.Setenv("logRotate", "true") - os.Setenv("NBWORKERS", "10") - os.Setenv("TIMEOUT", "120s") - os.Setenv("CASSANDRA_SSL_CERT", "/etc/cassandra/ssl.crt") - os.Setenv("CASSANDRA_SSL_KEY", "/etc/cassandra/ssl.key") - - if err := envconfig.Init(&conf); err != nil { - fmt.Printf("err=%s\n", err) - } - - fmt.Println(conf.MySQL.Database.User) - fmt.Println(conf.Log.Rotate) - fmt.Println(conf.Timeout) - fmt.Println(conf.Log.Path) - fmt.Println(conf.Cassandra.SSLCert) - fmt.Println(conf.Cassandra.SSLKey) - // Output: - // root - // true - // 2m0s - // /var/log/mylog.log - // /etc/cassandra/ssl.crt - // /etc/cassandra/ssl.key -} - -func ExampleInitWithPrefix() { - var conf struct { - Name string - } - - os.Setenv("NAME", "") - os.Setenv("FOO_NAME", "") - - os.Setenv("NAME", "foobar") - - err := envconfig.InitWithPrefix(&conf, "FOO") - fmt.Println(err) - - os.Setenv("FOO_NAME", "foobar") - err = envconfig.InitWithPrefix(&conf, "FOO") - fmt.Println(err) - - fmt.Println(conf.Name) - // Output: - // envconfig: keys FOO_NAME, foo_name not found - // - // foobar -} diff --git a/vendor/github.com/vrischmann/envconfig/keys_test.go b/vendor/github.com/vrischmann/envconfig/keys_test.go deleted file mode 100644 index 56ac46f47..000000000 --- a/vendor/github.com/vrischmann/envconfig/keys_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package envconfig - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestMakeAllPossibleKeys(t *testing.T) { - fieldName := "CassandraSslCert" - keys := makeAllPossibleKeys(&context{ - name: fieldName, - }) - - require.Equal(t, 4, len(keys)) - require.Equal(t, "CASSANDRASSLCERT", keys[0]) - require.Equal(t, "CASSANDRA_SSL_CERT", keys[1]) - require.Equal(t, "cassandra_ssl_cert", keys[2]) - require.Equal(t, "cassandrasslcert", keys[3]) - - fieldName = "CassandraSSLCert" - keys = makeAllPossibleKeys(&context{ - name: fieldName, - }) - - require.Equal(t, 4, len(keys)) - require.Equal(t, "CASSANDRASSLCERT", keys[0]) - require.Equal(t, "CASSANDRA_SSL_CERT", keys[1]) - require.Equal(t, "cassandra_ssl_cert", keys[2]) - require.Equal(t, "cassandrasslcert", keys[3]) - - fieldName = "Cassandra.SslCert" - keys = makeAllPossibleKeys(&context{ - name: fieldName, - }) - - require.Equal(t, 4, len(keys)) - require.Equal(t, "CASSANDRA_SSLCERT", keys[0]) - require.Equal(t, "CASSANDRA_SSL_CERT", keys[1]) - require.Equal(t, "cassandra_ssl_cert", keys[2]) - require.Equal(t, "cassandra_sslcert", keys[3]) - - fieldName = "Cassandra.SSLCert" - keys = makeAllPossibleKeys(&context{ - name: fieldName, - }) - - require.Equal(t, 4, len(keys)) - require.Equal(t, "CASSANDRA_SSLCERT", keys[0]) - require.Equal(t, "CASSANDRA_SSL_CERT", keys[1]) - require.Equal(t, "cassandra_ssl_cert", keys[2]) - require.Equal(t, "cassandra_sslcert", keys[3]) - - fieldName = "Name" - keys = makeAllPossibleKeys(&context{ - name: fieldName, - }) - - require.Equal(t, 2, len(keys)) - require.Equal(t, "NAME", keys[0]) - require.Equal(t, "name", keys[1]) -} diff --git a/vendor/github.com/vrischmann/envconfig/slice.go b/vendor/github.com/vrischmann/envconfig/slice.go deleted file mode 100644 index daa7095cd..000000000 --- a/vendor/github.com/vrischmann/envconfig/slice.go +++ /dev/null @@ -1,76 +0,0 @@ -package envconfig - -import ( - "bufio" - "bytes" - "io" - "strings" -) - -type sliceTokenizer struct { - err error - r *bufio.Reader - buf bytes.Buffer - inBraces bool -} - -var eof = rune(0) - -func newSliceTokenizer(str string) *sliceTokenizer { - return &sliceTokenizer{ - r: bufio.NewReader(strings.NewReader(str)), - } -} - -func (t *sliceTokenizer) scan() bool { - for { - if t.err == io.EOF && t.buf.Len() == 0 { - return false - } - - ch := t.readRune() - if ch == eof { - return true - } - - if ch == '{' { - t.inBraces = true - } - if ch == '}' { - t.inBraces = false - } - - if ch == ',' && !t.inBraces { - return true - } - - // NOTE(vincent): we ignore the WriteRune error here because there is NO WAY - // for WriteRune to return an error. - // Yep. Seriously. Look here http://golang.org/src/bytes/buffer.go?s=7661:7714#L227 - _, _ = t.buf.WriteRune(ch) - } -} - -func (t *sliceTokenizer) readRune() rune { - ch, _, err := t.r.ReadRune() - if err != nil { - t.err = err - return eof - } - - return ch -} - -func (t *sliceTokenizer) text() string { - str := t.buf.String() - t.buf.Reset() - - return str -} - -func (t *sliceTokenizer) Err() error { - if t.err == io.EOF { - return nil - } - return t.err -} diff --git a/vendor/github.com/vrischmann/envconfig/slice_test.go b/vendor/github.com/vrischmann/envconfig/slice_test.go deleted file mode 100644 index 0debf7fdf..000000000 --- a/vendor/github.com/vrischmann/envconfig/slice_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package envconfig - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestSliceTokenizer(t *testing.T) { - str := "foobar,barbaz" - tnz := newSliceTokenizer(str) - - b := tnz.scan() - require.Nil(t, tnz.Err()) - require.Equal(t, true, b) - - require.Equal(t, "foobar", tnz.text()) - - b = tnz.scan() - require.Nil(t, tnz.Err()) - require.Equal(t, true, b) - require.Equal(t, "barbaz", tnz.text()) - - b = tnz.scan() - require.Nil(t, tnz.Err()) - require.Equal(t, false, b) -} - -func TestSliceOfStructsTokenizer(t *testing.T) { - str := "{foobar,100},{barbaz,200}" - tnz := newSliceTokenizer(str) - - b := tnz.scan() - require.Nil(t, tnz.Err()) - require.Equal(t, true, b) - - require.Equal(t, "{foobar,100}", tnz.text()) - - b = tnz.scan() - require.Nil(t, tnz.Err()) - require.Equal(t, true, b) - require.Equal(t, "{barbaz,200}", tnz.text()) - - b = tnz.scan() - require.Nil(t, tnz.Err()) - require.Equal(t, false, b) -}