Clean e2e test example (#1535)

* a template for a clean tests

* vendor: update ginkgo,gomega,testify

* e2e template: Remove Before/AfterSuite and rename file

* gofmt

* don't ignore .odo
This commit is contained in:
Tomas Kral
2019-04-01 15:09:14 +02:00
committed by OpenShift Merge Robot
parent ffa2756f7c
commit 7c81a212d8
400 changed files with 15162 additions and 4136 deletions

3
.gitignore vendored
View File

@@ -69,6 +69,3 @@ tags
# VSCode specific
.vscode
# local .odo config directory
.odo
*/**/.odo

44
glide.lock generated
View File

@@ -1,5 +1,5 @@
hash: 6622a97829592723ea2ae941c3f3de57b32ec4439bb1109fc059bc893552b23a
updated: 2019-01-17T12:55:58.278901676-05:00
hash: 9609a0d65ff7c6c3e7c5a70aa1f46aaffe4229aa698db538ea82d8f2de88e6a9
updated: 2019-03-28T15:05:18.897257+01:00
imports:
- name: github.com/alexbrainman/sspi
version: e580b900e9f5657daa5473021296289be6da2661
@@ -30,7 +30,7 @@ imports:
- digestset
- reference
- name: github.com/docker/docker
version: fcdab531a6d9b133eaa3f2a3f6c11fd02a97de75
version: da810a85109d7bd7ccd8f2d622556cbcfc00386b
repo: https://github.com/openshift/moby-moby.git
subpackages:
- api/types
@@ -108,7 +108,7 @@ imports:
- ptypes/duration
- ptypes/timestamp
- name: github.com/google/btree
version: 4030bb1f1f0c35b30ca7009e9ebd06849dd45306
version: 20236160a414454a9c64b6c8829381c6f4bddcaa
- name: github.com/google/gofuzz
version: 44d81051d367757e1c7c6a5a86423ece9afcf63c
- name: github.com/googleapis/gnostic
@@ -131,6 +131,13 @@ imports:
- simplelru
- name: github.com/hinshun/vt10x
version: d55458df857cfff952e4bf72ce1b567c5a4e141c
- name: github.com/hpcloud/tail
version: a1dbeea552b7c8df4b542c66073e393de198a800
subpackages:
- ratelimiter
- util
- watch
- winfile
- name: github.com/imdario/mergo
version: 6633656539c1639d9d78127b7d47c622b5d7b6dc
- name: github.com/inconshreveable/mousetrap
@@ -140,7 +147,7 @@ imports:
- name: github.com/kballard/go-shellquote
version: 95032a82bc518f77982ea72343cc1ade730072f0
- name: github.com/kr/pty
version: db8e3cd836b82e82e0a9c8edc6896967dd31374f
version: 521317be5ebc228a0f0ede099fa2a0b5ece22e49
- name: github.com/kubernetes-incubator/service-catalog
version: 9d82b390b96d565e9301fdfe4ac822311e765a3c
subpackages:
@@ -167,7 +174,7 @@ imports:
- name: github.com/mattn/go-colorable
version: efa589957cd060542a26d2dd7832fd6a6c6c3ade
- name: github.com/mattn/go-isatty
version: 3fb116b820352b7f0c281308a4d6250c22d94e27
version: c2a7a6ca930a4cd0bc33a3f298eb71960732a3a7
- name: github.com/mattn/go-runewidth
version: 703b5e6b11ae25aeb2af9ebb5d5fdf8fa2575211
- name: github.com/matttproud/golang_protobuf_extensions
@@ -187,7 +194,7 @@ imports:
- name: github.com/olekukonko/tablewriter
version: d4647c9c7a84d847478d890b816b7d8b62b0b279
- name: github.com/onsi/ginkgo
version: 9eda700730cba42af70d53180f9dcce9266bc2bc
version: eea6ad008b96acdaa524f5b409513bf062b500ad
subpackages:
- config
- internal/codelocation
@@ -207,7 +214,7 @@ imports:
- reporters/stenographer/support/go-isatty
- types
- name: github.com/onsi/gomega
version: 003f63b7f4cff3fc95357005358af2de0f5fe152
version: 90e289841c1ed79b7a598a7cd9959750cb5e89e2
subpackages:
- format
- gbytes
@@ -230,7 +237,7 @@ imports:
- specs-go
- specs-go/v1
- name: github.com/openshift/api
version: 5ad8479f64f1b60ee9c62ce8ef1fe6638838725e
version: 8741ff068a473be041d7bafb4502c12e4f10aab5
subpackages:
- apps/v1
- authorization/v1
@@ -406,7 +413,7 @@ imports:
- name: github.com/spf13/pflag
version: e57e3eeb33f795204c1ca35f56c44f83227c6e66
- name: github.com/stretchr/testify
version: f35b8ab0b5a2cef36673838d662e249dd9c94686
version: ffdc059bfe9ce6a4e144ba849dbedead332c6053
subpackages:
- assert
- require
@@ -427,6 +434,7 @@ imports:
- http2/hpack
- idna
- lex/httplex
- websocket
- name: golang.org/x/sys
version: 95c6576299259db960f6c5b9b69ea52422860fce
subpackages:
@@ -466,6 +474,8 @@ imports:
subpackages:
- core
- terminal
- name: gopkg.in/fsnotify/fsnotify.v1
version: c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9
- name: gopkg.in/inf.v0
version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
- name: gopkg.in/square/go-jose.v2
@@ -474,6 +484,8 @@ imports:
- cipher
- json
- jwt
- name: gopkg.in/tomb.v1
version: c131134a1947e9afd9cecfe11f4c6dff0732ae58
- name: gopkg.in/yaml.v2
version: 670d4cfef0544295bc27a114dbac37980d83185a
- name: k8s.io/api
@@ -512,12 +524,12 @@ imports:
- storage/v1alpha1
- storage/v1beta1
- name: k8s.io/apiextensions-apiserver
version: 7dbb63fb7dacd49193fb013af5bea8ec7a047cdd
version: 45cb0b8537cf79d23914721e0bf41a3479769b22
repo: https://github.com/openshift/kubernetes-apiextensions-apiserver.git
subpackages:
- pkg/features
- name: k8s.io/apimachinery
version: 0b908666f6c964370af24212f499d77109c65937
version: 5f1ab6ad9104144e43c7e477938fd278ade27eb2
repo: https://github.com/openshift/kubernetes-apimachinery.git
subpackages:
- pkg/api/equality
@@ -574,7 +586,7 @@ imports:
- third_party/forked/golang/netutil
- third_party/forked/golang/reflect
- name: k8s.io/apiserver
version: 10f1ab2357d4fc15527540fd7d7953ba8e6c98e3
version: 996cefeb14170756dc1626dde64fcfb63c8edb24
repo: https://github.com/openshift/kubernetes-apiserver.git
subpackages:
- pkg/apis/audit
@@ -587,7 +599,7 @@ imports:
- pkg/features
- pkg/util/feature
- name: k8s.io/client-go
version: b28cdde46f9c296b8ecde9c0bed0d215b6cda59d
version: 7eaf7133bcfa11e19ebbbc76ff53e2cb32524683
repo: https://github.com/openshift/kubernetes-client-go.git
subpackages:
- discovery
@@ -698,12 +710,12 @@ imports:
- util/jsonpath
- util/retry
- name: k8s.io/kube-openapi
version: 8a9b82f00b3a86eac24681da3f9fe6c34c01cea2
version: 91cfa479c814065e420cee7ed227db0f63a5854e
subpackages:
- pkg/util/proto
- pkg/util/proto/validation
- name: k8s.io/kubernetes
version: 22b8c511baaf712194dee3eb9c6d8e96d282d3a5
version: d8b46d72a413981e6ea99605dcef3b8ca3f2223f
repo: https://github.com/openshift/kubernetes.git
subpackages:
- pkg/api/events

View File

@@ -26,9 +26,9 @@ import:
repo: https://github.com/openshift/kubernetes-api.git
version: origin-3.11-kubernetes-1.11.1
- package: github.com/onsi/gomega
version: v1.3.0
version: v1.5.0
- package: github.com/onsi/ginkgo
version: v1.4.0
version: v1.8.0
- package: github.com/kubernetes-incubator/service-catalog
version: v0.1.31
- package: github.com/satori/go.uuid
@@ -44,7 +44,7 @@ import:
- package: github.com/Netflix/go-expect
version: master
- package: github.com/stretchr/testify
version: v1.2.2
version: v1.3.0
- package: github.com/posener/complete
version: v1.1.2
- package: github.com/gobwas/glob

View File

@@ -44,11 +44,11 @@ func TestOdo(t *testing.T) {
RunSpecs(t, "odo test suite")
}
var _ = BeforeSuite(func() {
// Save the current project
// commenting this out to resolve e2e tests failures on OC 4
// curProj = runCmdShouldPass("oc project -q")
})
//var _ = BeforeSuite(func() {
// Save the current project
// commenting this out to resolve e2e tests failures on OC 4
// curProj = runCmdShouldPass("oc project -q")
//})
func VerifyAppNameOfComponent(cmpName string, appName string) {
session := runCmdShouldPass(fmt.Sprintf("oc get dc %s-%s --template={{.metadata.labels.'app'}}", cmpName, appName))

View File

@@ -0,0 +1,103 @@
package helper
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
// CreateNewContext create new empty temporary directory
func CreateNewContext() string {
directory, err := ioutil.TempDir("", "")
Expect(err).NotTo(HaveOccurred())
fmt.Fprintf(GinkgoWriter, "Created dir: %s\n", directory)
return directory
}
// DeleteDir delete directory
func DeleteDir(dir string) {
fmt.Fprintf(GinkgoWriter, "Deleating dir: %s\n", dir)
err := os.RemoveAll(dir)
Expect(err).NotTo(HaveOccurred())
}
// Chdir change current working dir
func Chdir(dir string) {
fmt.Fprintf(GinkgoWriter, "Setting current dir to: %s\n", dir)
err := os.Chdir(dir)
Expect(err).ShouldNot(HaveOccurred())
}
// Getwd retruns current working dir
func Getwd() string {
dir, err := os.Getwd()
Expect(err).NotTo(HaveOccurred())
fmt.Fprintf(GinkgoWriter, "Current working dir: %s\n", dir)
return dir
}
// CopyExample copies an example from tests/e2e/examples/<exampleName> into targetDir
func CopyExample(exampleName string, targetDir string) {
// filename of this file
_, filename, _, _ := runtime.Caller(0)
// path to the examples directory
examplesDir := filepath.Join(filepath.Dir(filename), "..", "examples")
src := filepath.Join(examplesDir, exampleName)
info, err := os.Stat(src)
Expect(err).NotTo(HaveOccurred())
err = copyDir(src, targetDir, info)
Expect(err).NotTo(HaveOccurred())
}
// copyDir copy one directory to the other
// this function is called recursively info should start as os.Stat(src)
func copyDir(src string, dst string, info os.FileInfo) error {
if info.IsDir() {
files, err := ioutil.ReadDir(src)
if err != nil {
return err
}
for _, file := range files {
dsrt := filepath.Join(src, file.Name())
ddst := filepath.Join(dst, file.Name())
if err := copyDir(dsrt, ddst, file); err != nil {
return err
}
}
return nil
}
if err := os.MkdirAll(filepath.Dir(dst), os.ModePerm); err != nil {
return err
}
dFile, err := os.Create(dst)
if err != nil {
return err
}
defer dFile.Close()
sFile, err := os.Open(src)
if err != nil {
return err
}
defer sFile.Close()
if err = os.Chmod(dFile.Name(), info.Mode()); err != nil {
return err
}
_, err = io.Copy(dFile, sFile)
return err
}

View File

@@ -0,0 +1,19 @@
package helper
import (
"math/rand"
"time"
)
func init() {
rand.Seed(time.Now().UTC().UnixNano())
}
func randString(n int) string {
const letterBytes = "abcdefghijklmnopqrstuvwxyz"
b := make([]byte, n)
for i := range b {
b[i] = letterBytes[rand.Intn(len(letterBytes))]
}
return string(b)
}

View File

@@ -0,0 +1,38 @@
package helper
import (
"fmt"
. "github.com/onsi/ginkgo"
//. "github.com/onsi/gomega"
)
// CreateRandProject create new project with random name (10 letters)
// without writing to the config file (without switching project)
func OcCreateRandProject() string {
projectName := randString(10)
fmt.Fprintf(GinkgoWriter, "Creating a new project: %s\n", projectName)
CmdShouldPass(fmt.Sprintf("oc new-project %s --skip-config-write", projectName))
return projectName
}
// OcSwitchProject switch to the project
func OcSwitchProject(project string) {
CmdShouldPass(fmt.Sprintf("oc project %s ", project))
}
// DeleteProject deletes a specified project
func OcDeleteProject(project string) {
fmt.Fprintf(GinkgoWriter, "Deleting project: %s\n", project)
CmdShouldPass(fmt.Sprintf("oc delete project %s --now", project))
}
// OcCurrentProject get currently active project in oc
// returns empty string if there no active project, or no access to the project
func OcGetCurrentProject() string {
stdout, _, exitCode := cmdRunner("oc project -q")
if exitCode == 0 {
return stdout
}
return ""
}

View File

@@ -0,0 +1,40 @@
package helper
import (
"fmt"
"os/exec"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
)
// and returns the stdout, stderr and exitcode
func cmdRunner(cmdS string) (string, string, int) {
//TODO: this needs to be os independent
cmd := exec.Command("/bin/sh", "-c", cmdS)
fmt.Fprintf(GinkgoWriter, "Running command: %s\n", cmdS)
session, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter)
// wait for the command execution to complete
<-session.Exited
Expect(err).NotTo(HaveOccurred())
stdout := string(session.Out.Contents())
stderr := string(session.Err.Contents())
exitCode := session.ExitCode()
fmt.Fprintf(GinkgoWriter, "Result: \n stdout: %s\n stderr:%s \n exitcode: %d \n", stdout, stderr, exitCode)
return stdout, stderr, exitCode
}
// CmdShouldPass command needs to retrun 0 as en exit code
// returns just stderr
func CmdShouldPass(cmd string) string {
stdout, _, exitcode := cmdRunner(cmd)
Expect(exitcode).To(Equal(0))
return strings.TrimSpace(stdout)
}

View File

@@ -0,0 +1,121 @@
package e2e
import (
"path/filepath"
. "github.com/onsi/ginkgo"
//. "github.com/onsi/gomega"
"github.com/openshift/odo/tests/e2e/helper"
)
// following command will tests in Describe section below in parallel (in 2 nodes)
// ginkgo -nodes=2 -focus="Example of a clean test" slowSpecThreshold=120 -randomizeAllSpecs tests/e2e/
var _ = Describe("Example of a clean test", func() {
//new clean project and context for each test
var project string
var context string
// current directory and project (before eny test is run) so it can restored after all testing is done
var originalDir string
var originalProject string
// Setup up state for each test spec
// create new project (not set as active) and new context directory for each test spec
// This is before every spec (It)
var _ = BeforeEach(func() {
project = helper.OcCreateRandProject()
context = helper.CreateNewContext()
})
// Clean up after the test
// This is run after every Spec (It)
var _ = AfterEach(func() {
helper.OcDeleteProject(project)
helper.DeleteDir(context)
})
var _ = Context("when component is in the current directory", func() {
// we will be testing components that are created from the current directory
// switch to the clean context dir before each test
var _ = JustBeforeEach(func() {
originalDir = helper.Getwd()
helper.Chdir(context)
})
// go back to original directory after each test
var _ = JustAfterEach(func() {
helper.Chdir(originalDir)
})
var _ = Context("when project from KUBECONFIG is used", func() {
// Set active project for each test spec
var _ = JustBeforeEach(func() {
originalProject = helper.OcGetCurrentProject()
helper.OcSwitchProject(project)
})
// go back to original project after each test
var _ = JustAfterEach(func() {
helper.OcSwitchProject(originalProject)
})
It("create local nodejs component and push code", func() {
helper.CopyExample(filepath.Join("source", "nodejs"), context)
helper.CmdShouldPass("odo component create nodejs")
//TODO: verify that config was properly created
helper.CmdShouldPass("odo push")
//TODO: verify resources on cluster
})
})
var _ = Context("when --project flag is used", func() {
It("create local nodejs component and push code", func() {
helper.CopyExample(filepath.Join("source", "nodejs"), context)
helper.CmdShouldPass("odo component create nodejs --project " + project + "")
//TODO: verify that config was properly created
helper.CmdShouldPass("odo push")
//TODO: verify resources on cluster
})
})
var _ = Context("when --context is used", func() {
var _ = Context("when project from KUBECONFIG is used", func() {
// Set active project for each test spec
var _ = JustBeforeEach(func() {
helper.OcSwitchProject(project)
})
// go back to original project after each test
var _ = JustAfterEach(func() {
helper.OcSwitchProject(originalProject)
})
It("create local nodejs component and push code", func() {
helper.CopyExample(filepath.Join("source", "nodejs"), context)
helper.CmdShouldPass("odo component create nodejs nodejs --context " + context)
//TODO: verify that config was properly created
helper.CmdShouldPass("odo push")
//TODO: verify resources on cluster
})
})
var _ = Context("when --project flag is used", func() {
It("create local nodejs component and push code", func() {
helper.CopyExample(filepath.Join("source", "nodejs"), context)
helper.CmdShouldPass("odo component create nodejs nodejs --project " + project + " --context " + context)
//TODO: verify that config was properly created
helper.CmdShouldPass("odo push")
//TODO: verify resources on cluster
})
})
})
})
})

View File

@@ -50,8 +50,8 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (
// Currently go does not fill in the major/minors
if s.Mode&unix.S_IFBLK != 0 ||
s.Mode&unix.S_IFCHR != 0 {
hdr.Devmajor = int64(major(uint64(s.Rdev)))
hdr.Devminor = int64(minor(uint64(s.Rdev)))
hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert
hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert
}
}
@@ -77,14 +77,6 @@ func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil
}
func major(device uint64) uint64 {
return (device >> 8) & 0xfff
}
func minor(device uint64) uint64 {
return (device & 0xff) | ((device >> 12) & 0xfff00)
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {

View File

@@ -294,7 +294,7 @@ func OverlayChanges(layers []string, rw string) ([]Change, error) {
func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
if fi.Mode()&os.ModeCharDevice != 0 {
s := fi.Sys().(*syscall.Stat_t)
if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 {
if unix.Major(uint64(s.Rdev)) == 0 && unix.Minor(uint64(s.Rdev)) == 0 { // nolint: unconvert
return path, nil
}
}

View File

@@ -40,7 +40,6 @@ var SignalMap = map[string]syscall.Signal{
"TSTP": unix.SIGTSTP,
"TTIN": unix.SIGTTIN,
"TTOU": unix.SIGTTOU,
"UNUSED": unix.SIGUNUSED,
"URG": unix.SIGURG,
"USR1": unix.SIGUSR1,
"USR2": unix.SIGUSR2,

View File

@@ -18,5 +18,5 @@ func Mknod(path string, mode uint32, dev int) error {
// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
// then the top 12 bits of the minor.
func Mkdev(major int64, minor int64) uint32 {
return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
return uint32(unix.Mkdev(uint32(major), uint32(minor)))
}

View File

@@ -15,7 +15,7 @@ github.com/sirupsen/logrus v1.0.1
github.com/tchap/go-patricia v2.2.6
github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
golang.org/x/sys 8dbc5d05d6edcc104950cc299a1ce6641235bc86
github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756

17
vendor/github.com/google/btree/go.mod generated vendored Normal file
View File

@@ -0,0 +1,17 @@
// Copyright 2014 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
module github.com/google/btree
go 1.12

3
vendor/github.com/hpcloud/tail/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,3 @@
.test
.go

19
vendor/github.com/hpcloud/tail/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,19 @@
language: go
script:
- go test -race -v ./...
go:
- 1.5
- 1.6
- 1.7
- 1.8
- tip
matrix:
allow_failures:
- go: tip
install:
- go get gopkg.in/fsnotify/fsnotify.v1
- go get gopkg.in/tomb.v1

63
vendor/github.com/hpcloud/tail/CHANGES.md generated vendored Normal file
View File

@@ -0,0 +1,63 @@
# API v1 (gopkg.in/hpcloud/tail.v1)
## April, 2016
* Migrated to godep, as depman is not longer supported
* Introduced golang vendoring feature
* Fixed issue [#57](https://github.com/hpcloud/tail/issues/57) related to reopen deleted file
## July, 2015
* Fix inotify watcher leak; remove `Cleanup` (#51)
# API v0 (gopkg.in/hpcloud/tail.v0)
## June, 2015
* Don't return partial lines (PR #40)
* Use stable version of fsnotify (#46)
## July, 2014
* Fix tail for Windows (PR #36)
## May, 2014
* Improved rate limiting using leaky bucket (PR #29)
* Fix odd line splitting (PR #30)
## Apr, 2014
* LimitRate now discards read buffer (PR #28)
* allow reading of longer lines if MaxLineSize is unset (PR #24)
* updated deps.json to latest fsnotify (441bbc86b1)
## Feb, 2014
* added `Config.Logger` to suppress library logging
## Nov, 2013
* add Cleanup to remove leaky inotify watches (PR #20)
## Aug, 2013
* redesigned Location field (PR #12)
* add tail.Tell (PR #14)
## July, 2013
* Rate limiting (PR #10)
## May, 2013
* Detect file deletions/renames in polling file watcher (PR #1)
* Detect file truncation
* Fix potential race condition when reopening the file (issue 5)
* Fix potential blocking of `tail.Stop` (issue 4)
* Fix uncleaned up ChangeEvents goroutines after calling tail.Stop
* Support Follow=false
## Feb, 2013
* Initial open source release

19
vendor/github.com/hpcloud/tail/Dockerfile generated vendored Normal file
View File

@@ -0,0 +1,19 @@
FROM golang
RUN mkdir -p $GOPATH/src/github.com/hpcloud/tail/
ADD . $GOPATH/src/github.com/hpcloud/tail/
# expecting to fetch dependencies successfully.
RUN go get -v github.com/hpcloud/tail
# expecting to run the test successfully.
RUN go test -v github.com/hpcloud/tail
# expecting to install successfully
RUN go install -v github.com/hpcloud/tail
RUN go install -v github.com/hpcloud/tail/cmd/gotail
RUN $GOPATH/bin/gotail -h || true
ENV PATH $GOPATH/bin:$PATH
CMD ["gotail"]

15
vendor/github.com/hpcloud/tail/Godeps/Godeps.json generated vendored Normal file
View File

@@ -0,0 +1,15 @@
{
"ImportPath": "github.com/hpcloud/tail",
"GoVersion": "go1.5.1",
"Deps": [
{
"ImportPath": "gopkg.in/fsnotify.v1",
"Comment": "v1.2.1",
"Rev": "7be54206639f256967dd82fa767397ba5f8f48f5"
},
{
"ImportPath": "gopkg.in/tomb.v1",
"Rev": "c131134a1947e9afd9cecfe11f4c6dff0732ae58"
}
]
}

5
vendor/github.com/hpcloud/tail/Godeps/Readme generated vendored Normal file
View File

@@ -0,0 +1,5 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

21
vendor/github.com/hpcloud/tail/LICENSE.txt generated vendored Normal file
View File

@@ -0,0 +1,21 @@
# The MIT License (MIT)
# © Copyright 2015 Hewlett Packard Enterprise Development LP
Copyright (c) 2014 ActiveState
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

11
vendor/github.com/hpcloud/tail/Makefile generated vendored Normal file
View File

@@ -0,0 +1,11 @@
default: test
test: *.go
go test -v -race ./...
fmt:
gofmt -w .
# Run the test in an isolated environment.
fulltest:
docker build -t hpcloud/tail .

28
vendor/github.com/hpcloud/tail/README.md generated vendored Normal file
View File

@@ -0,0 +1,28 @@
[![Build Status](https://travis-ci.org/hpcloud/tail.svg)](https://travis-ci.org/hpcloud/tail)
[![Build status](https://ci.appveyor.com/api/projects/status/vrl3paf9md0a7bgk/branch/master?svg=true)](https://ci.appveyor.com/project/Nino-K/tail/branch/master)
# Go package for tail-ing files
A Go package striving to emulate the features of the BSD `tail` program.
```Go
t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true})
for line := range t.Lines {
fmt.Println(line.Text)
}
```
See [API documentation](http://godoc.org/github.com/hpcloud/tail).
## Log rotation
Tail comes with full support for truncation/move detection as it is
designed to work with log rotation tools.
## Installing
go get github.com/hpcloud/tail/...
## Windows support
This package [needs assistance](https://github.com/hpcloud/tail/labels/Windows) for full Windows support.

11
vendor/github.com/hpcloud/tail/appveyor.yml generated vendored Normal file
View File

@@ -0,0 +1,11 @@
version: 0.{build}
skip_tags: true
cache: C:\Users\appveyor\AppData\Local\NuGet\Cache
build_script:
- SET GOPATH=c:\workspace
- go test -v -race ./...
test: off
clone_folder: c:\workspace\src\github.com\hpcloud\tail
branches:
only:
- master

1
vendor/github.com/hpcloud/tail/cmd/gotail/.gitignore generated vendored Normal file
View File

@@ -0,0 +1 @@
gotail

4
vendor/github.com/hpcloud/tail/cmd/gotail/Makefile generated vendored Normal file
View File

@@ -0,0 +1,4 @@
default: gotail
gotail: *.go ../../*.go
go build

66
vendor/github.com/hpcloud/tail/cmd/gotail/gotail.go generated vendored Normal file
View File

@@ -0,0 +1,66 @@
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
package main
import (
"flag"
"fmt"
"os"
"github.com/hpcloud/tail"
)
func args2config() (tail.Config, int64) {
config := tail.Config{Follow: true}
n := int64(0)
maxlinesize := int(0)
flag.Int64Var(&n, "n", 0, "tail from the last Nth location")
flag.IntVar(&maxlinesize, "max", 0, "max line size")
flag.BoolVar(&config.Follow, "f", false, "wait for additional data to be appended to the file")
flag.BoolVar(&config.ReOpen, "F", false, "follow, and track file rename/rotation")
flag.BoolVar(&config.Poll, "p", false, "use polling, instead of inotify")
flag.Parse()
if config.ReOpen {
config.Follow = true
}
config.MaxLineSize = maxlinesize
return config, n
}
func main() {
config, n := args2config()
if flag.NFlag() < 1 {
fmt.Println("need one or more files as arguments")
os.Exit(1)
}
if n != 0 {
config.Location = &tail.SeekInfo{-n, os.SEEK_END}
}
done := make(chan bool)
for _, filename := range flag.Args() {
go tailFile(filename, config, done)
}
for _, _ = range flag.Args() {
<-done
}
}
func tailFile(filename string, config tail.Config, done chan bool) {
defer func() { done <- true }()
t, err := tail.TailFile(filename, config)
if err != nil {
fmt.Println(err)
return
}
for line := range t.Lines {
fmt.Println(line.Text)
}
err = t.Wait()
if err != nil {
fmt.Println(err)
}
}

7
vendor/github.com/hpcloud/tail/ratelimiter/Licence generated vendored Normal file
View File

@@ -0,0 +1,7 @@
Copyright (C) 2013 99designs
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -0,0 +1,97 @@
// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends.
package ratelimiter
import (
"time"
)
type LeakyBucket struct {
Size uint16
Fill float64
LeakInterval time.Duration // time.Duration for 1 unit of size to leak
Lastupdate time.Time
Now func() time.Time
}
func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket {
bucket := LeakyBucket{
Size: size,
Fill: 0,
LeakInterval: leakInterval,
Now: time.Now,
Lastupdate: time.Now(),
}
return &bucket
}
func (b *LeakyBucket) updateFill() {
now := b.Now()
if b.Fill > 0 {
elapsed := now.Sub(b.Lastupdate)
b.Fill -= float64(elapsed) / float64(b.LeakInterval)
if b.Fill < 0 {
b.Fill = 0
}
}
b.Lastupdate = now
}
func (b *LeakyBucket) Pour(amount uint16) bool {
b.updateFill()
var newfill float64 = b.Fill + float64(amount)
if newfill > float64(b.Size) {
return false
}
b.Fill = newfill
return true
}
// The time at which this bucket will be completely drained
func (b *LeakyBucket) DrainedAt() time.Time {
return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval)))
}
// The duration until this bucket is completely drained
func (b *LeakyBucket) TimeToDrain() time.Duration {
return b.DrainedAt().Sub(b.Now())
}
func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration {
return b.Now().Sub(b.Lastupdate)
}
type LeakyBucketSer struct {
Size uint16
Fill float64
LeakInterval time.Duration // time.Duration for 1 unit of size to leak
Lastupdate time.Time
}
func (b *LeakyBucket) Serialise() *LeakyBucketSer {
bucket := LeakyBucketSer{
Size: b.Size,
Fill: b.Fill,
LeakInterval: b.LeakInterval,
Lastupdate: b.Lastupdate,
}
return &bucket
}
func (b *LeakyBucketSer) DeSerialise() *LeakyBucket {
bucket := LeakyBucket{
Size: b.Size,
Fill: b.Fill,
LeakInterval: b.LeakInterval,
Lastupdate: b.Lastupdate,
Now: time.Now,
}
return &bucket
}

View File

@@ -0,0 +1,73 @@
package ratelimiter
import (
"testing"
"time"
)
func TestPour(t *testing.T) {
bucket := NewLeakyBucket(60, time.Second)
bucket.Lastupdate = time.Unix(0, 0)
bucket.Now = func() time.Time { return time.Unix(1, 0) }
if bucket.Pour(61) {
t.Error("Expected false")
}
if !bucket.Pour(10) {
t.Error("Expected true")
}
if !bucket.Pour(49) {
t.Error("Expected true")
}
if bucket.Pour(2) {
t.Error("Expected false")
}
bucket.Now = func() time.Time { return time.Unix(61, 0) }
if !bucket.Pour(60) {
t.Error("Expected true")
}
if bucket.Pour(1) {
t.Error("Expected false")
}
bucket.Now = func() time.Time { return time.Unix(70, 0) }
if !bucket.Pour(1) {
t.Error("Expected true")
}
}
func TestTimeSinceLastUpdate(t *testing.T) {
bucket := NewLeakyBucket(60, time.Second)
bucket.Now = func() time.Time { return time.Unix(1, 0) }
bucket.Pour(1)
bucket.Now = func() time.Time { return time.Unix(2, 0) }
sinceLast := bucket.TimeSinceLastUpdate()
if sinceLast != time.Second*1 {
t.Errorf("Expected time since last update to be less than 1 second, got %d", sinceLast)
}
}
func TestTimeToDrain(t *testing.T) {
bucket := NewLeakyBucket(60, time.Second)
bucket.Now = func() time.Time { return time.Unix(1, 0) }
bucket.Pour(10)
if bucket.TimeToDrain() != time.Second*10 {
t.Error("Time to drain should be 10 seconds")
}
bucket.Now = func() time.Time { return time.Unix(2, 0) }
if bucket.TimeToDrain() != time.Second*9 {
t.Error("Time to drain should be 9 seconds")
}
}

60
vendor/github.com/hpcloud/tail/ratelimiter/memory.go generated vendored Normal file
View File

@@ -0,0 +1,60 @@
package ratelimiter
import (
"errors"
"time"
)
const (
GC_SIZE int = 100
GC_PERIOD time.Duration = 60 * time.Second
)
type Memory struct {
store map[string]LeakyBucket
lastGCCollected time.Time
}
func NewMemory() *Memory {
m := new(Memory)
m.store = make(map[string]LeakyBucket)
m.lastGCCollected = time.Now()
return m
}
func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) {
bucket, ok := m.store[key]
if !ok {
return nil, errors.New("miss")
}
return &bucket, nil
}
func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error {
if len(m.store) > GC_SIZE {
m.GarbageCollect()
}
m.store[key] = bucket
return nil
}
func (m *Memory) GarbageCollect() {
now := time.Now()
// rate limit GC to once per minute
if now.Unix() >= m.lastGCCollected.Add(GC_PERIOD).Unix() {
for key, bucket := range m.store {
// if the bucket is drained, then GC
if bucket.DrainedAt().Unix() < now.Unix() {
delete(m.store, key)
}
}
m.lastGCCollected = now
}
}

View File

@@ -0,0 +1,6 @@
package ratelimiter
type Storage interface {
GetBucketFor(string) (*LeakyBucket, error)
SetBucketFor(string, LeakyBucket) error
}

437
vendor/github.com/hpcloud/tail/tail.go generated vendored Normal file
View File

@@ -0,0 +1,437 @@
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
package tail
import (
"bufio"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"strings"
"sync"
"time"
"github.com/hpcloud/tail/ratelimiter"
"github.com/hpcloud/tail/util"
"github.com/hpcloud/tail/watch"
"gopkg.in/tomb.v1"
)
var (
ErrStop = errors.New("tail should now stop")
)
type Line struct {
Text string
Time time.Time
Err error // Error from tail
}
// NewLine returns a Line with present time.
func NewLine(text string) *Line {
return &Line{text, time.Now(), nil}
}
// SeekInfo represents arguments to `os.Seek`
type SeekInfo struct {
Offset int64
Whence int // os.SEEK_*
}
type logger interface {
Fatal(v ...interface{})
Fatalf(format string, v ...interface{})
Fatalln(v ...interface{})
Panic(v ...interface{})
Panicf(format string, v ...interface{})
Panicln(v ...interface{})
Print(v ...interface{})
Printf(format string, v ...interface{})
Println(v ...interface{})
}
// Config is used to specify how a file must be tailed.
type Config struct {
// File-specifc
Location *SeekInfo // Seek to this location before tailing
ReOpen bool // Reopen recreated files (tail -F)
MustExist bool // Fail early if the file does not exist
Poll bool // Poll for file changes instead of using inotify
Pipe bool // Is a named pipe (mkfifo)
RateLimiter *ratelimiter.LeakyBucket
// Generic IO
Follow bool // Continue looking for new lines (tail -f)
MaxLineSize int // If non-zero, split longer lines into multiple lines
// Logger, when nil, is set to tail.DefaultLogger
// To disable logging: set field to tail.DiscardingLogger
Logger logger
}
type Tail struct {
Filename string
Lines chan *Line
Config
file *os.File
reader *bufio.Reader
watcher watch.FileWatcher
changes *watch.FileChanges
tomb.Tomb // provides: Done, Kill, Dying
lk sync.Mutex
}
var (
// DefaultLogger is used when Config.Logger == nil
DefaultLogger = log.New(os.Stderr, "", log.LstdFlags)
// DiscardingLogger can be used to disable logging output
DiscardingLogger = log.New(ioutil.Discard, "", 0)
)
// TailFile begins tailing the file. Output stream is made available
// via the `Tail.Lines` channel. To handle errors during tailing,
// invoke the `Wait` or `Err` method after finishing reading from the
// `Lines` channel.
func TailFile(filename string, config Config) (*Tail, error) {
if config.ReOpen && !config.Follow {
util.Fatal("cannot set ReOpen without Follow.")
}
t := &Tail{
Filename: filename,
Lines: make(chan *Line),
Config: config,
}
// when Logger was not specified in config, use default logger
if t.Logger == nil {
t.Logger = log.New(os.Stderr, "", log.LstdFlags)
}
if t.Poll {
t.watcher = watch.NewPollingFileWatcher(filename)
} else {
t.watcher = watch.NewInotifyFileWatcher(filename)
}
if t.MustExist {
var err error
t.file, err = OpenFile(t.Filename)
if err != nil {
return nil, err
}
}
go t.tailFileSync()
return t, nil
}
// Return the file's current position, like stdio's ftell().
// But this value is not very accurate.
// it may readed one line in the chan(tail.Lines),
// so it may lost one line.
func (tail *Tail) Tell() (offset int64, err error) {
if tail.file == nil {
return
}
offset, err = tail.file.Seek(0, os.SEEK_CUR)
if err != nil {
return
}
tail.lk.Lock()
defer tail.lk.Unlock()
if tail.reader == nil {
return
}
offset -= int64(tail.reader.Buffered())
return
}
// Stop stops the tailing activity.
func (tail *Tail) Stop() error {
tail.Kill(nil)
return tail.Wait()
}
// StopAtEOF stops tailing as soon as the end of the file is reached.
func (tail *Tail) StopAtEOF() error {
tail.Kill(errStopAtEOF)
return tail.Wait()
}
var errStopAtEOF = errors.New("tail: stop at eof")
func (tail *Tail) close() {
close(tail.Lines)
tail.closeFile()
}
func (tail *Tail) closeFile() {
if tail.file != nil {
tail.file.Close()
tail.file = nil
}
}
func (tail *Tail) reopen() error {
tail.closeFile()
for {
var err error
tail.file, err = OpenFile(tail.Filename)
if err != nil {
if os.IsNotExist(err) {
tail.Logger.Printf("Waiting for %s to appear...", tail.Filename)
if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil {
if err == tomb.ErrDying {
return err
}
return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err)
}
continue
}
return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err)
}
break
}
return nil
}
func (tail *Tail) readLine() (string, error) {
tail.lk.Lock()
line, err := tail.reader.ReadString('\n')
tail.lk.Unlock()
if err != nil {
// Note ReadString "returns the data read before the error" in
// case of an error, including EOF, so we return it as is. The
// caller is expected to process it if err is EOF.
return line, err
}
line = strings.TrimRight(line, "\n")
return line, err
}
func (tail *Tail) tailFileSync() {
defer tail.Done()
defer tail.close()
if !tail.MustExist {
// deferred first open.
err := tail.reopen()
if err != nil {
if err != tomb.ErrDying {
tail.Kill(err)
}
return
}
}
// Seek to requested location on first open of the file.
if tail.Location != nil {
_, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence)
tail.Logger.Printf("Seeked %s - %+v\n", tail.Filename, tail.Location)
if err != nil {
tail.Killf("Seek error on %s: %s", tail.Filename, err)
return
}
}
tail.openReader()
var offset int64
var err error
// Read line by line.
for {
// do not seek in named pipes
if !tail.Pipe {
// grab the position in case we need to back up in the event of a half-line
offset, err = tail.Tell()
if err != nil {
tail.Kill(err)
return
}
}
line, err := tail.readLine()
// Process `line` even if err is EOF.
if err == nil {
cooloff := !tail.sendLine(line)
if cooloff {
// Wait a second before seeking till the end of
// file when rate limit is reached.
msg := ("Too much log activity; waiting a second " +
"before resuming tailing")
tail.Lines <- &Line{msg, time.Now(), errors.New(msg)}
select {
case <-time.After(time.Second):
case <-tail.Dying():
return
}
if err := tail.seekEnd(); err != nil {
tail.Kill(err)
return
}
}
} else if err == io.EOF {
if !tail.Follow {
if line != "" {
tail.sendLine(line)
}
return
}
if tail.Follow && line != "" {
// this has the potential to never return the last line if
// it's not followed by a newline; seems a fair trade here
err := tail.seekTo(SeekInfo{Offset: offset, Whence: 0})
if err != nil {
tail.Kill(err)
return
}
}
// When EOF is reached, wait for more data to become
// available. Wait strategy is based on the `tail.watcher`
// implementation (inotify or polling).
err := tail.waitForChanges()
if err != nil {
if err != ErrStop {
tail.Kill(err)
}
return
}
} else {
// non-EOF error
tail.Killf("Error reading %s: %s", tail.Filename, err)
return
}
select {
case <-tail.Dying():
if tail.Err() == errStopAtEOF {
continue
}
return
default:
}
}
}
// waitForChanges waits until the file has been appended, deleted,
// moved or truncated. When moved or deleted - the file will be
// reopened if ReOpen is true. Truncated files are always reopened.
func (tail *Tail) waitForChanges() error {
if tail.changes == nil {
pos, err := tail.file.Seek(0, os.SEEK_CUR)
if err != nil {
return err
}
tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos)
if err != nil {
return err
}
}
select {
case <-tail.changes.Modified:
return nil
case <-tail.changes.Deleted:
tail.changes = nil
if tail.ReOpen {
// XXX: we must not log from a library.
tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename)
if err := tail.reopen(); err != nil {
return err
}
tail.Logger.Printf("Successfully reopened %s", tail.Filename)
tail.openReader()
return nil
} else {
tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename)
return ErrStop
}
case <-tail.changes.Truncated:
// Always reopen truncated files (Follow is true)
tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename)
if err := tail.reopen(); err != nil {
return err
}
tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename)
tail.openReader()
return nil
case <-tail.Dying():
return ErrStop
}
panic("unreachable")
}
func (tail *Tail) openReader() {
if tail.MaxLineSize > 0 {
// add 2 to account for newline characters
tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2)
} else {
tail.reader = bufio.NewReader(tail.file)
}
}
func (tail *Tail) seekEnd() error {
return tail.seekTo(SeekInfo{Offset: 0, Whence: os.SEEK_END})
}
func (tail *Tail) seekTo(pos SeekInfo) error {
_, err := tail.file.Seek(pos.Offset, pos.Whence)
if err != nil {
return fmt.Errorf("Seek error on %s: %s", tail.Filename, err)
}
// Reset the read buffer whenever the file is re-seek'ed
tail.reader.Reset(tail.file)
return nil
}
// sendLine sends the line(s) to Lines channel, splitting longer lines
// if necessary. Return false if rate limit is reached.
func (tail *Tail) sendLine(line string) bool {
now := time.Now()
lines := []string{line}
// Split longer lines
if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize {
lines = util.PartitionString(line, tail.MaxLineSize)
}
for _, line := range lines {
tail.Lines <- &Line{line, now, nil}
}
if tail.Config.RateLimiter != nil {
ok := tail.Config.RateLimiter.Pour(uint16(len(lines)))
if !ok {
tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.\n",
tail.Filename)
return false
}
}
return true
}
// Cleanup removes inotify watches added by the tail package. This function is
// meant to be invoked from a process's exit handler. Linux kernel may not
// automatically remove inotify watches after the process exits.
func (tail *Tail) Cleanup() {
watch.Cleanup(tail.Filename)
}

11
vendor/github.com/hpcloud/tail/tail_posix.go generated vendored Normal file
View File

@@ -0,0 +1,11 @@
// +build linux darwin freebsd netbsd openbsd
package tail
import (
"os"
)
func OpenFile(name string) (file *os.File, err error) {
return os.Open(name)
}

563
vendor/github.com/hpcloud/tail/tail_test.go generated vendored Normal file
View File

@@ -0,0 +1,563 @@
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
// TODO:
// * repeat all the tests with Poll:true
package tail
import (
_ "fmt"
"io/ioutil"
"os"
"strings"
"testing"
"time"
"github.com/hpcloud/tail/ratelimiter"
"github.com/hpcloud/tail/watch"
)
func init() {
// Clear the temporary test directory
err := os.RemoveAll(".test")
if err != nil {
panic(err)
}
}
func TestMain(m *testing.M) {
// Use a smaller poll duration for faster test runs. Keep it below
// 100ms (which value is used as common delays for tests)
watch.POLL_DURATION = 5 * time.Millisecond
os.Exit(m.Run())
}
func TestMustExist(t *testing.T) {
tail, err := TailFile("/no/such/file", Config{Follow: true, MustExist: true})
if err == nil {
t.Error("MustExist:true is violated")
tail.Stop()
}
tail, err = TailFile("/no/such/file", Config{Follow: true, MustExist: false})
if err != nil {
t.Error("MustExist:false is violated")
}
tail.Stop()
_, err = TailFile("README.md", Config{Follow: true, MustExist: true})
if err != nil {
t.Error("MustExist:true on an existing file is violated")
}
tail.Cleanup()
}
func TestWaitsForFileToExist(t *testing.T) {
tailTest := NewTailTest("waits-for-file-to-exist", t)
tail := tailTest.StartTail("test.txt", Config{})
go tailTest.VerifyTailOutput(tail, []string{"hello", "world"}, false)
<-time.After(100 * time.Millisecond)
tailTest.CreateFile("test.txt", "hello\nworld\n")
tailTest.Cleanup(tail, true)
}
func TestWaitsForFileToExistRelativePath(t *testing.T) {
tailTest := NewTailTest("waits-for-file-to-exist-relative", t)
oldWD, err := os.Getwd()
if err != nil {
tailTest.Fatal(err)
}
os.Chdir(tailTest.path)
defer os.Chdir(oldWD)
tail, err := TailFile("test.txt", Config{})
if err != nil {
tailTest.Fatal(err)
}
go tailTest.VerifyTailOutput(tail, []string{"hello", "world"}, false)
<-time.After(100 * time.Millisecond)
if err := ioutil.WriteFile("test.txt", []byte("hello\nworld\n"), 0600); err != nil {
tailTest.Fatal(err)
}
tailTest.Cleanup(tail, true)
}
func TestStop(t *testing.T) {
tail, err := TailFile("_no_such_file", Config{Follow: true, MustExist: false})
if err != nil {
t.Error("MustExist:false is violated")
}
if tail.Stop() != nil {
t.Error("Should be stoped successfully")
}
tail.Cleanup()
}
func TestStopAtEOF(t *testing.T) {
tailTest := NewTailTest("maxlinesize", t)
tailTest.CreateFile("test.txt", "hello\nthere\nworld\n")
tail := tailTest.StartTail("test.txt", Config{Follow: true, Location: nil})
// read "hello"
line := <-tail.Lines
if line.Text != "hello" {
t.Errorf("Expected to get 'hello', got '%s' instead", line.Text)
}
tailTest.VerifyTailOutput(tail, []string{"there", "world"}, false)
tail.StopAtEOF()
tailTest.Cleanup(tail, true)
}
func TestMaxLineSizeFollow(t *testing.T) {
// As last file line does not end with newline, it will not be present in tail's output
maxLineSize(t, true, "hello\nworld\nfin\nhe", []string{"hel", "lo", "wor", "ld", "fin"})
}
func TestMaxLineSizeNoFollow(t *testing.T) {
maxLineSize(t, false, "hello\nworld\nfin\nhe", []string{"hel", "lo", "wor", "ld", "fin", "he"})
}
func TestOver4096ByteLine(t *testing.T) {
tailTest := NewTailTest("Over4096ByteLine", t)
testString := strings.Repeat("a", 4097)
tailTest.CreateFile("test.txt", "test\n"+testString+"\nhello\nworld\n")
tail := tailTest.StartTail("test.txt", Config{Follow: true, Location: nil})
go tailTest.VerifyTailOutput(tail, []string{"test", testString, "hello", "world"}, false)
// Delete after a reasonable delay, to give tail sufficient time
// to read all lines.
<-time.After(100 * time.Millisecond)
tailTest.RemoveFile("test.txt")
tailTest.Cleanup(tail, true)
}
func TestOver4096ByteLineWithSetMaxLineSize(t *testing.T) {
tailTest := NewTailTest("Over4096ByteLineMaxLineSize", t)
testString := strings.Repeat("a", 4097)
tailTest.CreateFile("test.txt", "test\n"+testString+"\nhello\nworld\n")
tail := tailTest.StartTail("test.txt", Config{Follow: true, Location: nil, MaxLineSize: 4097})
go tailTest.VerifyTailOutput(tail, []string{"test", testString, "hello", "world"}, false)
// Delete after a reasonable delay, to give tail sufficient time
// to read all lines.
<-time.After(100 * time.Millisecond)
tailTest.RemoveFile("test.txt")
tailTest.Cleanup(tail, true)
}
func TestLocationFull(t *testing.T) {
tailTest := NewTailTest("location-full", t)
tailTest.CreateFile("test.txt", "hello\nworld\n")
tail := tailTest.StartTail("test.txt", Config{Follow: true, Location: nil})
go tailTest.VerifyTailOutput(tail, []string{"hello", "world"}, false)
// Delete after a reasonable delay, to give tail sufficient time
// to read all lines.
<-time.After(100 * time.Millisecond)
tailTest.RemoveFile("test.txt")
tailTest.Cleanup(tail, true)
}
func TestLocationFullDontFollow(t *testing.T) {
tailTest := NewTailTest("location-full-dontfollow", t)
tailTest.CreateFile("test.txt", "hello\nworld\n")
tail := tailTest.StartTail("test.txt", Config{Follow: false, Location: nil})
go tailTest.VerifyTailOutput(tail, []string{"hello", "world"}, false)
// Add more data only after reasonable delay.
<-time.After(100 * time.Millisecond)
tailTest.AppendFile("test.txt", "more\ndata\n")
<-time.After(100 * time.Millisecond)
tailTest.Cleanup(tail, true)
}
func TestLocationEnd(t *testing.T) {
tailTest := NewTailTest("location-end", t)
tailTest.CreateFile("test.txt", "hello\nworld\n")
tail := tailTest.StartTail("test.txt", Config{Follow: true, Location: &SeekInfo{0, os.SEEK_END}})
go tailTest.VerifyTailOutput(tail, []string{"more", "data"}, false)
<-time.After(100 * time.Millisecond)
tailTest.AppendFile("test.txt", "more\ndata\n")
// Delete after a reasonable delay, to give tail sufficient time
// to read all lines.
<-time.After(100 * time.Millisecond)
tailTest.RemoveFile("test.txt")
tailTest.Cleanup(tail, true)
}
func TestLocationMiddle(t *testing.T) {
// Test reading from middle.
tailTest := NewTailTest("location-middle", t)
tailTest.CreateFile("test.txt", "hello\nworld\n")
tail := tailTest.StartTail("test.txt", Config{Follow: true, Location: &SeekInfo{-6, os.SEEK_END}})
go tailTest.VerifyTailOutput(tail, []string{"world", "more", "data"}, false)
<-time.After(100 * time.Millisecond)
tailTest.AppendFile("test.txt", "more\ndata\n")
// Delete after a reasonable delay, to give tail sufficient time
// to read all lines.
<-time.After(100 * time.Millisecond)
tailTest.RemoveFile("test.txt")
tailTest.Cleanup(tail, true)
}
// The use of polling file watcher could affect file rotation
// (detected via renames), so test these explicitly.
func TestReOpenInotify(t *testing.T) {
reOpen(t, false)
}
func TestReOpenPolling(t *testing.T) {
reOpen(t, true)
}
// The use of polling file watcher could affect file rotation
// (detected via renames), so test these explicitly.
func TestReSeekInotify(t *testing.T) {
reSeek(t, false)
}
func TestReSeekPolling(t *testing.T) {
reSeek(t, true)
}
func TestRateLimiting(t *testing.T) {
tailTest := NewTailTest("rate-limiting", t)
tailTest.CreateFile("test.txt", "hello\nworld\nagain\nextra\n")
config := Config{
Follow: true,
RateLimiter: ratelimiter.NewLeakyBucket(2, time.Second)}
leakybucketFull := "Too much log activity; waiting a second before resuming tailing"
tail := tailTest.StartTail("test.txt", config)
// TODO: also verify that tail resumes after the cooloff period.
go tailTest.VerifyTailOutput(tail, []string{
"hello", "world", "again",
leakybucketFull,
"more", "data",
leakybucketFull}, false)
// Add more data only after reasonable delay.
<-time.After(1200 * time.Millisecond)
tailTest.AppendFile("test.txt", "more\ndata\n")
// Delete after a reasonable delay, to give tail sufficient time
// to read all lines.
<-time.After(100 * time.Millisecond)
tailTest.RemoveFile("test.txt")
tailTest.Cleanup(tail, true)
}
func TestTell(t *testing.T) {
tailTest := NewTailTest("tell-position", t)
tailTest.CreateFile("test.txt", "hello\nworld\nagain\nmore\n")
config := Config{
Follow: false,
Location: &SeekInfo{0, os.SEEK_SET}}
tail := tailTest.StartTail("test.txt", config)
// read noe line
<-tail.Lines
offset, err := tail.Tell()
if err != nil {
tailTest.Errorf("Tell return error: %s", err.Error())
}
tail.Done()
// tail.close()
config = Config{
Follow: false,
Location: &SeekInfo{offset, os.SEEK_SET}}
tail = tailTest.StartTail("test.txt", config)
for l := range tail.Lines {
// it may readed one line in the chan(tail.Lines),
// so it may lost one line.
if l.Text != "world" && l.Text != "again" {
tailTest.Fatalf("mismatch; expected world or again, but got %s",
l.Text)
}
break
}
tailTest.RemoveFile("test.txt")
tail.Done()
tail.Cleanup()
}
func TestBlockUntilExists(t *testing.T) {
tailTest := NewTailTest("block-until-file-exists", t)
config := Config{
Follow: true,
}
tail := tailTest.StartTail("test.txt", config)
go func() {
time.Sleep(100 * time.Millisecond)
tailTest.CreateFile("test.txt", "hello world\n")
}()
for l := range tail.Lines {
if l.Text != "hello world" {
tailTest.Fatalf("mismatch; expected hello world, but got %s",
l.Text)
}
break
}
tailTest.RemoveFile("test.txt")
tail.Stop()
tail.Cleanup()
}
func maxLineSize(t *testing.T, follow bool, fileContent string, expected []string) {
tailTest := NewTailTest("maxlinesize", t)
tailTest.CreateFile("test.txt", fileContent)
tail := tailTest.StartTail("test.txt", Config{Follow: follow, Location: nil, MaxLineSize: 3})
go tailTest.VerifyTailOutput(tail, expected, false)
// Delete after a reasonable delay, to give tail sufficient time
// to read all lines.
<-time.After(100 * time.Millisecond)
tailTest.RemoveFile("test.txt")
tailTest.Cleanup(tail, true)
}
func reOpen(t *testing.T, poll bool) {
var name string
var delay time.Duration
if poll {
name = "reopen-polling"
delay = 300 * time.Millisecond // account for POLL_DURATION
} else {
name = "reopen-inotify"
delay = 100 * time.Millisecond
}
tailTest := NewTailTest(name, t)
tailTest.CreateFile("test.txt", "hello\nworld\n")
tail := tailTest.StartTail(
"test.txt",
Config{Follow: true, ReOpen: true, Poll: poll})
content := []string{"hello", "world", "more", "data", "endofworld"}
go tailTest.VerifyTailOutput(tail, content, false)
if poll {
// deletion must trigger reopen
<-time.After(delay)
tailTest.RemoveFile("test.txt")
<-time.After(delay)
tailTest.CreateFile("test.txt", "more\ndata\n")
} else {
// In inotify mode, fsnotify is currently unable to deliver notifications
// about deletion of open files, so we are not testing file deletion.
// (see https://github.com/fsnotify/fsnotify/issues/194 for details).
<-time.After(delay)
tailTest.AppendToFile("test.txt", "more\ndata\n")
}
// rename must trigger reopen
<-time.After(delay)
tailTest.RenameFile("test.txt", "test.txt.rotated")
<-time.After(delay)
tailTest.CreateFile("test.txt", "endofworld\n")
// Delete after a reasonable delay, to give tail sufficient time
// to read all lines.
<-time.After(delay)
tailTest.RemoveFile("test.txt")
<-time.After(delay)
// Do not bother with stopping as it could kill the tomb during
// the reading of data written above. Timings can vary based on
// test environment.
tailTest.Cleanup(tail, false)
}
func TestInotify_WaitForCreateThenMove(t *testing.T) {
tailTest := NewTailTest("wait-for-create-then-reopen", t)
os.Remove(tailTest.path + "/test.txt") // Make sure the file does NOT exist.
tail := tailTest.StartTail(
"test.txt",
Config{Follow: true, ReOpen: true, Poll: false})
content := []string{"hello", "world", "endofworld"}
go tailTest.VerifyTailOutput(tail, content, false)
time.Sleep(50 * time.Millisecond)
tailTest.CreateFile("test.txt", "hello\nworld\n")
time.Sleep(50 * time.Millisecond)
tailTest.RenameFile("test.txt", "test.txt.rotated")
time.Sleep(50 * time.Millisecond)
tailTest.CreateFile("test.txt", "endofworld\n")
time.Sleep(50 * time.Millisecond)
tailTest.RemoveFile("test.txt.rotated")
tailTest.RemoveFile("test.txt")
// Do not bother with stopping as it could kill the tomb during
// the reading of data written above. Timings can vary based on
// test environment.
tailTest.Cleanup(tail, false)
}
func reSeek(t *testing.T, poll bool) {
var name string
if poll {
name = "reseek-polling"
} else {
name = "reseek-inotify"
}
tailTest := NewTailTest(name, t)
tailTest.CreateFile("test.txt", "a really long string goes here\nhello\nworld\n")
tail := tailTest.StartTail(
"test.txt",
Config{Follow: true, ReOpen: false, Poll: poll})
go tailTest.VerifyTailOutput(tail, []string{
"a really long string goes here", "hello", "world", "h311o", "w0r1d", "endofworld"}, false)
// truncate now
<-time.After(100 * time.Millisecond)
tailTest.TruncateFile("test.txt", "h311o\nw0r1d\nendofworld\n")
// Delete after a reasonable delay, to give tail sufficient time
// to read all lines.
<-time.After(100 * time.Millisecond)
tailTest.RemoveFile("test.txt")
// Do not bother with stopping as it could kill the tomb during
// the reading of data written above. Timings can vary based on
// test environment.
tailTest.Cleanup(tail, false)
}
// Test library
type TailTest struct {
Name string
path string
done chan struct{}
*testing.T
}
func NewTailTest(name string, t *testing.T) TailTest {
tt := TailTest{name, ".test/" + name, make(chan struct{}), t}
err := os.MkdirAll(tt.path, os.ModeTemporary|0700)
if err != nil {
tt.Fatal(err)
}
return tt
}
func (t TailTest) CreateFile(name string, contents string) {
err := ioutil.WriteFile(t.path+"/"+name, []byte(contents), 0600)
if err != nil {
t.Fatal(err)
}
}
func (t TailTest) AppendToFile(name string, contents string) {
err := ioutil.WriteFile(t.path+"/"+name, []byte(contents), 0600|os.ModeAppend)
if err != nil {
t.Fatal(err)
}
}
func (t TailTest) RemoveFile(name string) {
err := os.Remove(t.path + "/" + name)
if err != nil {
t.Fatal(err)
}
}
func (t TailTest) RenameFile(oldname string, newname string) {
oldname = t.path + "/" + oldname
newname = t.path + "/" + newname
err := os.Rename(oldname, newname)
if err != nil {
t.Fatal(err)
}
}
func (t TailTest) AppendFile(name string, contents string) {
f, err := os.OpenFile(t.path+"/"+name, os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
t.Fatal(err)
}
defer f.Close()
_, err = f.WriteString(contents)
if err != nil {
t.Fatal(err)
}
}
func (t TailTest) TruncateFile(name string, contents string) {
f, err := os.OpenFile(t.path+"/"+name, os.O_TRUNC|os.O_WRONLY, 0600)
if err != nil {
t.Fatal(err)
}
defer f.Close()
_, err = f.WriteString(contents)
if err != nil {
t.Fatal(err)
}
}
func (t TailTest) StartTail(name string, config Config) *Tail {
tail, err := TailFile(t.path+"/"+name, config)
if err != nil {
t.Fatal(err)
}
return tail
}
func (t TailTest) VerifyTailOutput(tail *Tail, lines []string, expectEOF bool) {
defer close(t.done)
t.ReadLines(tail, lines)
// It is important to do this if only EOF is expected
// otherwise we could block on <-tail.Lines
if expectEOF {
line, ok := <-tail.Lines
if ok {
t.Fatalf("more content from tail: %+v", line)
}
}
}
func (t TailTest) ReadLines(tail *Tail, lines []string) {
for idx, line := range lines {
tailedLine, ok := <-tail.Lines
if !ok {
// tail.Lines is closed and empty.
err := tail.Err()
if err != nil {
t.Fatalf("tail ended with error: %v", err)
}
t.Fatalf("tail ended early; expecting more: %v", lines[idx:])
}
if tailedLine == nil {
t.Fatalf("tail.Lines returned nil; not possible")
}
// Note: not checking .Err as the `lines` argument is designed
// to match error strings as well.
if tailedLine.Text != line {
t.Fatalf(
"unexpected line/err from tail: "+
"expecting <<%s>>>, but got <<<%s>>>",
line, tailedLine.Text)
}
}
}
func (t TailTest) Cleanup(tail *Tail, stop bool) {
<-t.done
if stop {
tail.Stop()
}
tail.Cleanup()
}

12
vendor/github.com/hpcloud/tail/tail_windows.go generated vendored Normal file
View File

@@ -0,0 +1,12 @@
// +build windows
package tail
import (
"github.com/hpcloud/tail/winfile"
"os"
)
func OpenFile(name string) (file *os.File, err error) {
return winfile.OpenFile(name, os.O_RDONLY, 0)
}

48
vendor/github.com/hpcloud/tail/util/util.go generated vendored Normal file
View File

@@ -0,0 +1,48 @@
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
package util
import (
"fmt"
"log"
"os"
"runtime/debug"
)
type Logger struct {
*log.Logger
}
var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)}
// fatal is like panic except it displays only the current goroutine's stack.
func Fatal(format string, v ...interface{}) {
// https://github.com/hpcloud/log/blob/master/log.go#L45
LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack()))
os.Exit(1)
}
// partitionString partitions the string into chunks of given size,
// with the last chunk of variable size.
func PartitionString(s string, chunkSize int) []string {
if chunkSize <= 0 {
panic("invalid chunkSize")
}
length := len(s)
chunks := 1 + length/chunkSize
start := 0
end := chunkSize
parts := make([]string, 0, chunks)
for {
if end > length {
end = length
}
parts = append(parts, s[start:end])
if end == length {
break
}
start, end = end, end+chunkSize
}
return parts
}

36
vendor/github.com/hpcloud/tail/watch/filechanges.go generated vendored Normal file
View File

@@ -0,0 +1,36 @@
package watch
type FileChanges struct {
Modified chan bool // Channel to get notified of modifications
Truncated chan bool // Channel to get notified of truncations
Deleted chan bool // Channel to get notified of deletions/renames
}
func NewFileChanges() *FileChanges {
return &FileChanges{
make(chan bool, 1), make(chan bool, 1), make(chan bool, 1)}
}
func (fc *FileChanges) NotifyModified() {
sendOnlyIfEmpty(fc.Modified)
}
func (fc *FileChanges) NotifyTruncated() {
sendOnlyIfEmpty(fc.Truncated)
}
func (fc *FileChanges) NotifyDeleted() {
sendOnlyIfEmpty(fc.Deleted)
}
// sendOnlyIfEmpty sends on a bool channel only if the channel has no
// backlog to be read by other goroutines. This concurrency pattern
// can be used to notify other goroutines if and only if they are
// looking for it (i.e., subsequent notifications can be compressed
// into one).
func sendOnlyIfEmpty(ch chan bool) {
select {
case ch <- true:
default:
}
}

135
vendor/github.com/hpcloud/tail/watch/inotify.go generated vendored Normal file
View File

@@ -0,0 +1,135 @@
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
package watch
import (
"fmt"
"os"
"path/filepath"
"github.com/hpcloud/tail/util"
"gopkg.in/fsnotify/fsnotify.v1"
"gopkg.in/tomb.v1"
)
// InotifyFileWatcher uses inotify to monitor file changes.
type InotifyFileWatcher struct {
Filename string
Size int64
}
func NewInotifyFileWatcher(filename string) *InotifyFileWatcher {
fw := &InotifyFileWatcher{filepath.Clean(filename), 0}
return fw
}
func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
err := WatchCreate(fw.Filename)
if err != nil {
return err
}
defer RemoveWatchCreate(fw.Filename)
// Do a real check now as the file might have been created before
// calling `WatchFlags` above.
if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) {
// file exists, or stat returned an error.
return err
}
events := Events(fw.Filename)
for {
select {
case evt, ok := <-events:
if !ok {
return fmt.Errorf("inotify watcher has been closed")
}
evtName, err := filepath.Abs(evt.Name)
if err != nil {
return err
}
fwFilename, err := filepath.Abs(fw.Filename)
if err != nil {
return err
}
if evtName == fwFilename {
return nil
}
case <-t.Dying():
return tomb.ErrDying
}
}
panic("unreachable")
}
func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
err := Watch(fw.Filename)
if err != nil {
return nil, err
}
changes := NewFileChanges()
fw.Size = pos
go func() {
events := Events(fw.Filename)
for {
prevSize := fw.Size
var evt fsnotify.Event
var ok bool
select {
case evt, ok = <-events:
if !ok {
RemoveWatch(fw.Filename)
return
}
case <-t.Dying():
RemoveWatch(fw.Filename)
return
}
switch {
case evt.Op&fsnotify.Remove == fsnotify.Remove:
fallthrough
case evt.Op&fsnotify.Rename == fsnotify.Rename:
RemoveWatch(fw.Filename)
changes.NotifyDeleted()
return
//With an open fd, unlink(fd) - inotify returns IN_ATTRIB (==fsnotify.Chmod)
case evt.Op&fsnotify.Chmod == fsnotify.Chmod:
fallthrough
case evt.Op&fsnotify.Write == fsnotify.Write:
fi, err := os.Stat(fw.Filename)
if err != nil {
if os.IsNotExist(err) {
RemoveWatch(fw.Filename)
changes.NotifyDeleted()
return
}
// XXX: report this error back to the user
util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
}
fw.Size = fi.Size()
if prevSize > 0 && prevSize > fw.Size {
changes.NotifyTruncated()
} else {
changes.NotifyModified()
}
prevSize = fw.Size
}
}
}()
return changes, nil
}

248
vendor/github.com/hpcloud/tail/watch/inotify_tracker.go generated vendored Normal file
View File

@@ -0,0 +1,248 @@
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
package watch
import (
"log"
"os"
"path/filepath"
"sync"
"syscall"
"github.com/hpcloud/tail/util"
"gopkg.in/fsnotify/fsnotify.v1"
)
type InotifyTracker struct {
mux sync.Mutex
watcher *fsnotify.Watcher
chans map[string]chan fsnotify.Event
done map[string]chan bool
watchNums map[string]int
watch chan *watchInfo
remove chan *watchInfo
error chan error
}
type watchInfo struct {
op fsnotify.Op
fname string
}
func (this *watchInfo) isCreate() bool {
return this.op == fsnotify.Create
}
var (
// globally shared InotifyTracker; ensures only one fsnotify.Watcher is used
shared *InotifyTracker
// these are used to ensure the shared InotifyTracker is run exactly once
once = sync.Once{}
goRun = func() {
shared = &InotifyTracker{
mux: sync.Mutex{},
chans: make(map[string]chan fsnotify.Event),
done: make(map[string]chan bool),
watchNums: make(map[string]int),
watch: make(chan *watchInfo),
remove: make(chan *watchInfo),
error: make(chan error),
}
go shared.run()
}
logger = log.New(os.Stderr, "", log.LstdFlags)
)
// Watch signals the run goroutine to begin watching the input filename
func Watch(fname string) error {
return watch(&watchInfo{
fname: fname,
})
}
// Watch create signals the run goroutine to begin watching the input filename
// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate
func WatchCreate(fname string) error {
return watch(&watchInfo{
op: fsnotify.Create,
fname: fname,
})
}
func watch(winfo *watchInfo) error {
// start running the shared InotifyTracker if not already running
once.Do(goRun)
winfo.fname = filepath.Clean(winfo.fname)
shared.watch <- winfo
return <-shared.error
}
// RemoveWatch signals the run goroutine to remove the watch for the input filename
func RemoveWatch(fname string) error {
return remove(&watchInfo{
fname: fname,
})
}
// RemoveWatch create signals the run goroutine to remove the watch for the input filename
func RemoveWatchCreate(fname string) error {
return remove(&watchInfo{
op: fsnotify.Create,
fname: fname,
})
}
func remove(winfo *watchInfo) error {
// start running the shared InotifyTracker if not already running
once.Do(goRun)
winfo.fname = filepath.Clean(winfo.fname)
shared.mux.Lock()
done := shared.done[winfo.fname]
if done != nil {
delete(shared.done, winfo.fname)
close(done)
}
shared.mux.Unlock()
shared.remove <- winfo
return <-shared.error
}
// Events returns a channel to which FileEvents corresponding to the input filename
// will be sent. This channel will be closed when removeWatch is called on this
// filename.
func Events(fname string) <-chan fsnotify.Event {
shared.mux.Lock()
defer shared.mux.Unlock()
return shared.chans[fname]
}
// Cleanup removes the watch for the input filename if necessary.
func Cleanup(fname string) error {
return RemoveWatch(fname)
}
// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating
// a new Watcher if the previous Watcher was closed.
func (shared *InotifyTracker) addWatch(winfo *watchInfo) error {
shared.mux.Lock()
defer shared.mux.Unlock()
if shared.chans[winfo.fname] == nil {
shared.chans[winfo.fname] = make(chan fsnotify.Event)
}
if shared.done[winfo.fname] == nil {
shared.done[winfo.fname] = make(chan bool)
}
fname := winfo.fname
if winfo.isCreate() {
// Watch for new files to be created in the parent directory.
fname = filepath.Dir(fname)
}
var err error
// already in inotify watch
if shared.watchNums[fname] == 0 {
err = shared.watcher.Add(fname)
}
if err == nil {
shared.watchNums[fname]++
}
return err
}
// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the
// corresponding events channel.
func (shared *InotifyTracker) removeWatch(winfo *watchInfo) error {
shared.mux.Lock()
ch := shared.chans[winfo.fname]
if ch != nil {
delete(shared.chans, winfo.fname)
close(ch)
}
fname := winfo.fname
if winfo.isCreate() {
// Watch for new files to be created in the parent directory.
fname = filepath.Dir(fname)
}
shared.watchNums[fname]--
watchNum := shared.watchNums[fname]
if watchNum == 0 {
delete(shared.watchNums, fname)
}
shared.mux.Unlock()
var err error
// If we were the last ones to watch this file, unsubscribe from inotify.
// This needs to happen after releasing the lock because fsnotify waits
// synchronously for the kernel to acknowledge the removal of the watch
// for this file, which causes us to deadlock if we still held the lock.
if watchNum == 0 {
err = shared.watcher.Remove(fname)
}
return err
}
// sendEvent sends the input event to the appropriate Tail.
func (shared *InotifyTracker) sendEvent(event fsnotify.Event) {
name := filepath.Clean(event.Name)
shared.mux.Lock()
ch := shared.chans[name]
done := shared.done[name]
shared.mux.Unlock()
if ch != nil && done != nil {
select {
case ch <- event:
case <-done:
}
}
}
// run starts the goroutine in which the shared struct reads events from its
// Watcher's Event channel and sends the events to the appropriate Tail.
func (shared *InotifyTracker) run() {
watcher, err := fsnotify.NewWatcher()
if err != nil {
util.Fatal("failed to create Watcher")
}
shared.watcher = watcher
for {
select {
case winfo := <-shared.watch:
shared.error <- shared.addWatch(winfo)
case winfo := <-shared.remove:
shared.error <- shared.removeWatch(winfo)
case event, open := <-shared.watcher.Events:
if !open {
return
}
shared.sendEvent(event)
case err, open := <-shared.watcher.Errors:
if !open {
return
} else if err != nil {
sysErr, ok := err.(*os.SyscallError)
if !ok || sysErr.Err != syscall.EINTR {
logger.Printf("Error in Watcher Error channel: %s", err)
}
}
}
}
}

118
vendor/github.com/hpcloud/tail/watch/polling.go generated vendored Normal file
View File

@@ -0,0 +1,118 @@
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
package watch
import (
"os"
"runtime"
"time"
"github.com/hpcloud/tail/util"
"gopkg.in/tomb.v1"
)
// PollingFileWatcher polls the file for changes.
type PollingFileWatcher struct {
Filename string
Size int64
}
func NewPollingFileWatcher(filename string) *PollingFileWatcher {
fw := &PollingFileWatcher{filename, 0}
return fw
}
var POLL_DURATION time.Duration
func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
for {
if _, err := os.Stat(fw.Filename); err == nil {
return nil
} else if !os.IsNotExist(err) {
return err
}
select {
case <-time.After(POLL_DURATION):
continue
case <-t.Dying():
return tomb.ErrDying
}
}
panic("unreachable")
}
func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
origFi, err := os.Stat(fw.Filename)
if err != nil {
return nil, err
}
changes := NewFileChanges()
var prevModTime time.Time
// XXX: use tomb.Tomb to cleanly manage these goroutines. replace
// the fatal (below) with tomb's Kill.
fw.Size = pos
go func() {
prevSize := fw.Size
for {
select {
case <-t.Dying():
return
default:
}
time.Sleep(POLL_DURATION)
fi, err := os.Stat(fw.Filename)
if err != nil {
// Windows cannot delete a file if a handle is still open (tail keeps one open)
// so it gives access denied to anything trying to read it until all handles are released.
if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) {
// File does not exist (has been deleted).
changes.NotifyDeleted()
return
}
// XXX: report this error back to the user
util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
}
// File got moved/renamed?
if !os.SameFile(origFi, fi) {
changes.NotifyDeleted()
return
}
// File got truncated?
fw.Size = fi.Size()
if prevSize > 0 && prevSize > fw.Size {
changes.NotifyTruncated()
prevSize = fw.Size
continue
}
// File got bigger?
if prevSize > 0 && prevSize < fw.Size {
changes.NotifyModified()
prevSize = fw.Size
continue
}
prevSize = fw.Size
// File was appended to (changed)?
modTime := fi.ModTime()
if modTime != prevModTime {
prevModTime = modTime
changes.NotifyModified()
}
}
}()
return changes, nil
}
func init() {
POLL_DURATION = 250 * time.Millisecond
}

20
vendor/github.com/hpcloud/tail/watch/watch.go generated vendored Normal file
View File

@@ -0,0 +1,20 @@
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
package watch
import "gopkg.in/tomb.v1"
// FileWatcher monitors file-level events.
type FileWatcher interface {
// BlockUntilExists blocks until the file comes into existence.
BlockUntilExists(*tomb.Tomb) error
// ChangeEvents reports on changes to a file, be it modification,
// deletion, renames or truncations. Returned FileChanges group of
// channels will be closed, thus become unusable, after a deletion
// or truncation event.
// In order to properly report truncations, ChangeEvents requires
// the caller to pass their current offset in the file.
ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error)
}

92
vendor/github.com/hpcloud/tail/winfile/winfile.go generated vendored Normal file
View File

@@ -0,0 +1,92 @@
// +build windows
package winfile
import (
"os"
"syscall"
"unsafe"
)
// issue also described here
//https://codereview.appspot.com/8203043/
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218
func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) {
if len(path) == 0 {
return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
}
pathp, err := syscall.UTF16PtrFromString(path)
if err != nil {
return syscall.InvalidHandle, err
}
var access uint32
switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) {
case syscall.O_RDONLY:
access = syscall.GENERIC_READ
case syscall.O_WRONLY:
access = syscall.GENERIC_WRITE
case syscall.O_RDWR:
access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
}
if mode&syscall.O_CREAT != 0 {
access |= syscall.GENERIC_WRITE
}
if mode&syscall.O_APPEND != 0 {
access &^= syscall.GENERIC_WRITE
access |= syscall.FILE_APPEND_DATA
}
sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE)
var sa *syscall.SecurityAttributes
if mode&syscall.O_CLOEXEC == 0 {
sa = makeInheritSa()
}
var createmode uint32
switch {
case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL):
createmode = syscall.CREATE_NEW
case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC):
createmode = syscall.CREATE_ALWAYS
case mode&syscall.O_CREAT == syscall.O_CREAT:
createmode = syscall.OPEN_ALWAYS
case mode&syscall.O_TRUNC == syscall.O_TRUNC:
createmode = syscall.TRUNCATE_EXISTING
default:
createmode = syscall.OPEN_EXISTING
}
h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0)
return h, e
}
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211
func makeInheritSa() *syscall.SecurityAttributes {
var sa syscall.SecurityAttributes
sa.Length = uint32(unsafe.Sizeof(sa))
sa.InheritHandle = 1
return &sa
}
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133
func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) {
r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm))
if e != nil {
return nil, e
}
return os.NewFile(uintptr(r), name), nil
}
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61
func syscallMode(i os.FileMode) (o uint32) {
o |= uint32(i.Perm())
if i&os.ModeSetuid != 0 {
o |= syscall.S_ISUID
}
if i&os.ModeSetgid != 0 {
o |= syscall.S_ISGID
}
if i&os.ModeSticky != 0 {
o |= syscall.S_ISVTX
}
// No mapping for Go's ModeTemporary (plan9 only).
return
}

1
vendor/github.com/kr/pty/go.mod generated vendored Normal file
View File

@@ -0,0 +1 @@
module github.com/kr/pty

View File

@@ -46,6 +46,6 @@ func ptsname(f *os.File) (string, error) {
func unlockpt(f *os.File) error {
var u _C_int
// use TIOCSPTLCK with a zero valued arg to clear the slave pty lock
// use TIOCSPTLCK with a pointer to zero to clear the lock
return ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u)))
}

View File

@@ -11,7 +11,7 @@ func open() (pty, tty *os.File, err error) {
* from ptm(4):
* The PTMGET command allocates a free pseudo terminal, changes its
* ownership to the caller, revokes the access privileges for all previous
* users, opens the file descriptors for the master and slave devices and
* users, opens the file descriptors for the pty and tty devices and
* returns them to the caller in struct ptmget.
*/

12
vendor/github.com/kr/pty/run.go generated vendored
View File

@@ -33,9 +33,15 @@ func StartWithSize(c *exec.Cmd, sz *Winsize) (pty *os.File, err error) {
return nil, err
}
}
c.Stdout = tty
c.Stdin = tty
c.Stderr = tty
if c.Stdout == nil {
c.Stdout = tty
}
if c.Stderr == nil {
c.Stderr = tty
}
if c.Stdin == nil {
c.Stdin = tty
}
if c.SysProcAttr == nil {
c.SysProcAttr = &syscall.SysProcAttr{}
}

12
vendor/github.com/kr/pty/util.go generated vendored
View File

@@ -8,15 +8,15 @@ import (
"unsafe"
)
// InheritSize applies the terminal size of master to slave. This should be run
// in a signal handler for syscall.SIGWINCH to automatically resize the slave when
// the master receives a window size change notification.
func InheritSize(master, slave *os.File) error {
size, err := GetsizeFull(master)
// InheritSize applies the terminal size of pty to tty. This should be run
// in a signal handler for syscall.SIGWINCH to automatically resize the tty when
// the pty receives a window size change notification.
func InheritSize(pty, tty *os.File) error {
size, err := GetsizeFull(pty)
if err != nil {
return err
}
err = Setsize(slave, size)
err = Setsize(tty, size)
if err != nil {
return err
}

3
vendor/github.com/mattn/go-isatty/go.mod generated vendored Normal file
View File

@@ -0,0 +1,3 @@
module github.com/mattn/go-isatty
require golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223

2
vendor/github.com/mattn/go-isatty/go.sum generated vendored Normal file
View File

@@ -0,0 +1,2 @@
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=

View File

@@ -1,12 +1,10 @@
// +build linux
// +build ppc64 ppc64le
// +build android
package isatty
import (
"syscall"
"unsafe"
syscall "golang.org/x/sys/unix"
)
const ioctlReadTermios = syscall.TCGETS

View File

@@ -1,20 +1,15 @@
// +build linux
// +build !appengine,!ppc64,!ppc64le
// +build !appengine
// +build !android
package isatty
import (
"syscall"
"unsafe"
)
const ioctlReadTermios = syscall.TCGETS
import "golang.org/x/sys/unix"
// IsTerminal return true if the file descriptor is terminal.
func IsTerminal(fd uintptr) bool {
var termios syscall.Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
_, err := unix.IoctlGetTermios(int(fd), unix.TCGETS)
return err == nil
}
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2

View File

@@ -2,4 +2,6 @@
TODO
tmp/**/*
*.coverprofile
.vscode
.vscode
.idea/
*.log

View File

@@ -1,9 +1,9 @@
language: go
go:
- 1.5.x
- 1.6.x
- 1.7.x
- 1.8.x
- 1.10.x
- 1.11.x
- 1.12.x
- tip
install:
- go get -v -t ./...
@@ -12,4 +12,4 @@ install:
- go install github.com/onsi/ginkgo/ginkgo
- export PATH=$PATH:$HOME/gopath/bin
script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace
script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace && go vet

View File

@@ -1,3 +1,69 @@
## 1.8.0
### New Features
- allow config of the vet flag for `go test` (#562) [3cd45fa]
- Support projects using go modules [d56ee76]
### Fixes and Minor Improvements
- chore(godoc): fixes typos in Measurement funcs [dbaca8e]
- Optimize focus to avoid allocations [f493786]
- Ensure generated test file names are underscored [505cc35]
## 1.7.0
### New Features
- Add JustAfterEach (#484) [0d4f080]
### Fixes
- Correctly round suite time in junit reporter [2445fc1]
- Avoid using -i argument to go test for Golang 1.10+ [46bbc26]
## 1.6.0
### New Features
- add --debug flag to emit node output to files (#499) [39febac]
### Fixes
- fix: for `go vet` to pass [69338ec]
- docs: fix for contributing instructions [7004cb1]
- consolidate and streamline contribution docs (#494) [d848015]
- Make generated Junit file compatable with "Maven Surefire" (#488) [e51bee6]
- all: gofmt [000d317]
- Increase eventually timeout to 30s [c73579c]
- Clarify asynchronous test behaviour [294d8f4]
- Travis badge should only show master [26d2143]
## 1.5.0 5/10/2018
### New Features
- Supports go v1.10 (#443, #446, #451) [e873237, 468e89e, e37dbfe, a37f4c0, c0b857d, bca5260, 4177ca8]
- Add a When() synonym for Context() (#386) [747514b, 7484dad, 7354a07, dd826c8]
- Re-add noisySkippings flag [652e15c]
- Allow coverage to be displayed for focused specs (#367) [11459a8]
- Handle -outputdir flag (#364) [228e3a8]
- Handle -coverprofile flag (#355) [43392d5]
### Fixes
- When using custom reporters register the custom reporters *before* the default reporter. This allows users to see the output of any print statements in their customer reporters. (#365) [8382b23]
- When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0]
- `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd]
- Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98]
- Increase the threshold when checking time measuments (#455) [2f714bf, 68f622c]
- Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b]
- Add an extra new line after reporting spec run completion for test2json [874520d]
- added name name field to junit reported testsuite [ae61c63]
- Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856]
- Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe]
- Synchronise the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d]
- Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed]
- Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8]
- Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598]
- Use fmt.Errorf instead of errors.New(fmt.Sprintf (#401) [a299f56, 44e2eaa]
- Imports in generated code should follow conventions (#398) [0bec0b0, e8536d8]
- Prevent data race error when Recording a benchmark value from multiple go routines (#390) [c0c4881, 7a241e9]
- Replace GOPATH in Environment [4b883f0]
## 1.4.0 7/16/2017
- `ginkgo` now provides a hint if you accidentally forget to run `ginkgo bootstrap` to generate a `*_suite_test.go` file that actually invokes the Ginkgo test runner. [#345](https://github.com/onsi/ginkgo/pull/345)

View File

@@ -1,12 +1,33 @@
# Contributing to Ginkgo
Your contributions to Ginkgo are essential for its long-term maintenance and improvement. To make a contribution:
Your contributions to Ginkgo are essential for its long-term maintenance and improvement.
- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code!
- Ensure adequate test coverage:
- If you're adding functionality to the Ginkgo library, make sure to add appropriate unit and/or integration tests (under the `integration` folder).
- If you're adding functionality to the Ginkgo CLI note that there are very few unit tests. Please add an integration test.
- Please run all tests locally (`ginkgo -r -p`) and make sure they go green before submitting the PR
- Update the documentation. In addition to standard `godoc` comments Ginkgo has extensive documentation on the `gh-pages` branch. If relevant, please submit a docs PR to that branch alongside your code PR.
- When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder).
- When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test.
- Update the documentation. Ginko uses `godoc` comments and documentation on the `gh-pages` branch.
If relevant, please submit a docs PR to that branch alongside your code PR.
Thanks for supporting Ginkgo!
Thanks for supporting Ginkgo!
## Setup
Fork the repo, then:
```
go get github.com/onsi/ginkgo
go get github.com/onsi/gomega/...
cd $GOPATH/src/github.com/onsi/ginkgo
git remote add fork git@github.com:<NAME>/ginkgo.git
ginkgo -r -p # ensure tests are green
go vet ./... # ensure linter is happy
```
## Making the PR
- go to a new branch `git checkout -b my-feature`
- make your changes
- run tests and linter again (see above)
- `git push fork`
- open PR 🎉

View File

@@ -1,6 +1,6 @@
![Ginkgo: A Golang BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png)
![Ginkgo: A Go BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png)
[![Build Status](https://travis-ci.org/onsi/ginkgo.svg)](https://travis-ci.org/onsi/ginkgo)
[![Build Status](https://travis-ci.org/onsi/ginkgo.svg?branch=master)](https://travis-ci.org/onsi/ginkgo)
Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
@@ -11,9 +11,9 @@ If you have a question, comment, bug report, feature request, etc. please open a
- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
- Structure your BDD-style tests expressively:
- Nestable [`Describe` and `Context` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
- Nestable [`Describe`, `Context` and `When` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
- [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
- [`It` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions
- [`It` and `Specify` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions
- [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
- [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
@@ -25,7 +25,7 @@ If you have a question, comment, bug report, feature request, etc. please open a
- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
- `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
- `ginkgo -cover` runs your tests using Golang's code coverage tool
- `ginkgo -cover` runs your tests using Go's code coverage tool
- `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
- `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression
- `ginkgo -r` runs all tests suites under the current directory
@@ -55,18 +55,18 @@ If you have a question, comment, bug report, feature request, etc. please open a
Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/)
## [Agouti](http://github.com/sclevine/agouti): A Golang Acceptance Testing Framework
## [Agouti](http://github.com/sclevine/agouti): A Go Acceptance Testing Framework
Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org)
## Set Me Up!
You'll need Golang v1.3+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!)
You'll need the Go command-line tools. Ginkgo is tested with Go 1.6+, but preferably you should get the latest. Follow the [installation instructions](https://golang.org/doc/install) if you don't have it installed.
```bash
go get github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI
go get github.com/onsi/gomega # fetches the matcher library
go get -u github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI
go get -u github.com/onsi/gomega/... # fetches the matcher library
cd path/to/package/you/want/to/test
@@ -85,11 +85,11 @@ Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Go
With that said, it's great to know what your options are :)
### What Golang gives you out of the box
### What Go gives you out of the box
Testing is a first class citizen in Golang, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
### Matcher libraries for Golang's XUnit style tests
### Matcher libraries for Go's XUnit style tests
A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction:
@@ -100,7 +100,7 @@ You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomeg
### BDD style testing frameworks
There are a handful of BDD-style testing frameworks written for Golang. Here are a few:
There are a handful of BDD-style testing frameworks written for Go. Here are a few:
- [Ginkgo](https://github.com/onsi/ginkgo) ;)
- [GoConvey](https://github.com/smartystreets/goconvey)
@@ -108,10 +108,14 @@ There are a handful of BDD-style testing frameworks written for Golang. Here ar
- [Mao](https://github.com/azer/mao)
- [Zen](https://github.com/pranavraja/zen)
Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of golang testing libraries.
Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of Go testing libraries.
Go explore!
## License
Ginkgo is MIT-Licensed
## Contributing
See [CONTRIBUTING.md](CONTRIBUTING.md)

14
vendor/github.com/onsi/ginkgo/RELEASING.md generated vendored Normal file
View File

@@ -0,0 +1,14 @@
A Ginkgo release is a tagged git sha and a GitHub release. To cut a release:
1. Ensure CHANGELOG.md is up to date.
- Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release
- Categorize the changes into
- Breaking Changes (requires a major version)
- New Features (minor version)
- Fixes (fix version)
- Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact)
1. Update `VERSION` in `config/config.go`
1. Create a commit with the version number as the commit message (e.g. `v1.3.0`)
1. Tag the commit with the version number as the tag name (e.g. `v1.3.0`)
1. Push the commit and tag to GitHub
1. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes.

View File

@@ -20,7 +20,7 @@ import (
"fmt"
)
const VERSION = "1.4.0"
const VERSION = "1.8.0"
type GinkgoConfigType struct {
RandomSeed int64
@@ -34,6 +34,7 @@ type GinkgoConfigType struct {
FlakeAttempts int
EmitSpecProgress bool
DryRun bool
DebugParallel bool
ParallelNode int
ParallelTotal int
@@ -47,6 +48,7 @@ type DefaultReporterConfigType struct {
NoColor bool
SlowSpecThreshold float64
NoisyPendings bool
NoisySkippings bool
Succinct bool
Verbose bool
FullTrace bool
@@ -64,7 +66,7 @@ func processPrefix(prefix string) string {
func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
prefix = processPrefix(prefix)
flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.")
flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe/Context groups.")
flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When groups.")
flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.")
flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.")
flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.")
@@ -80,6 +82,8 @@ func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.")
flagSet.BoolVar(&(GinkgoConfig.DebugParallel), prefix+"debug", false, "If set, ginkgo will emit node output to files when running in parallel.")
if includeParallelFlags {
flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.")
flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.")
@@ -90,6 +94,7 @@ func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.")
flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
flagSet.BoolVar(&(DefaultReporterConfig.NoisySkippings), prefix+"noisySkippings", true, "If set, default reporter will shout about skipping tests.")
flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
@@ -139,6 +144,10 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor
result = append(result, fmt.Sprintf("--%sprogress", prefix))
}
if ginkgo.DebugParallel {
result = append(result, fmt.Sprintf("--%sdebug", prefix))
}
if ginkgo.ParallelNode != 0 {
result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode))
}
@@ -171,6 +180,10 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor
result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
}
if !reporter.NoisySkippings {
result = append(result, fmt.Sprintf("--%snoisySkippings=false", prefix))
}
if reporter.Verbose {
result = append(result, fmt.Sprintf("--%sv", prefix))
}

View File

@@ -43,10 +43,10 @@ func BuildBootstrapCommand() *Command {
var bootstrapText = `package {{.Package}}
import (
"testing"
{{.GinkgoImport}}
{{.GomegaImport}}
"testing"
)
func Test{{.FormattedName}}(t *testing.T) {
@@ -58,11 +58,11 @@ func Test{{.FormattedName}}(t *testing.T) {
var agoutiBootstrapText = `package {{.Package}}
import (
"testing"
{{.GinkgoImport}}
{{.GomegaImport}}
"github.com/sclevine/agouti"
"testing"
)
func Test{{.FormattedName}}(t *testing.T) {

View File

@@ -3,8 +3,9 @@ package main
import (
"flag"
"fmt"
"github.com/onsi/ginkgo/ginkgo/convert"
"os"
"github.com/onsi/ginkgo/ginkgo/convert"
)
func BuildConvertCommand() *Command {

View File

@@ -34,10 +34,10 @@ func BuildGenerateCommand() *Command {
var specText = `package {{.Package}}
import (
{{if .DotImportPackage}}. "{{.PackageImportPath}}"{{end}}
{{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
{{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
{{if .DotImportPackage}}. "{{.PackageImportPath}}"{{end}}
)
var _ = Describe("{{.Subject}}", func() {
@@ -45,15 +45,15 @@ var _ = Describe("{{.Subject}}", func() {
})
`
var agoutiSpecText = `package {{.Package}}_test
var agoutiSpecText = `package {{.Package}}
import (
{{if .DotImportPackage}}. "{{.PackageImportPath}}"{{end}}
{{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
{{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
. "github.com/sclevine/agouti/matchers"
"github.com/sclevine/agouti"
. "github.com/sclevine/agouti/matchers"
{{if .DotImportPackage}}. "{{.PackageImportPath}}"{{end}}
)
var _ = Describe("{{.Subject}}", func() {
@@ -108,10 +108,8 @@ func generateSpec(args []string, agouti, noDot, internal bool) {
func generateSpecForSubject(subject string, agouti, noDot, internal bool) error {
packageName, specFilePrefix, formattedName := getPackageAndFormattedName()
if subject != "" {
subject = strings.Split(subject, ".go")[0]
subject = strings.Split(subject, "_test")[0]
specFilePrefix = subject
formattedName = prettifyPackageName(subject)
specFilePrefix = formatSubject(subject)
formattedName = prettifyPackageName(specFilePrefix)
}
data := specData{
@@ -152,6 +150,14 @@ func generateSpecForSubject(subject string, agouti, noDot, internal bool) error
return nil
}
func formatSubject(name string) string {
name = strings.Replace(name, "-", "_", -1)
name = strings.Replace(name, " ", "_", -1)
name = strings.Split(name, ".go")[0]
name = strings.Split(name, "_test")[0]
return name
}
func getPackageImportPath() string {
workingDir, err := os.Getwd()
if err != nil {

View File

@@ -47,6 +47,7 @@ var SynchronizedBeforeSuite = ginkgo.SynchronizedBeforeSuite
var SynchronizedAfterSuite = ginkgo.SynchronizedAfterSuite
var BeforeEach = ginkgo.BeforeEach
var JustBeforeEach = ginkgo.JustBeforeEach
var JustAfterEach = ginkgo.JustAfterEach
var AfterEach = ginkgo.AfterEach
// Declarations for Gomega DSL

View File

@@ -1,8 +1,9 @@
package nodot_test
import (
. "github.com/onsi/ginkgo/ginkgo/nodot"
"strings"
. "github.com/onsi/ginkgo/ginkgo/nodot"
)
var _ = Describe("ApplyNoDot", func() {

View File

@@ -3,11 +3,12 @@ package main
import (
"bufio"
"flag"
"github.com/onsi/ginkgo/ginkgo/nodot"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"github.com/onsi/ginkgo/ginkgo/nodot"
)
func BuildNodotCommand() *Command {

View File

@@ -5,8 +5,12 @@ import (
"fmt"
"math/rand"
"os"
"strings"
"time"
"io/ioutil"
"path/filepath"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
"github.com/onsi/ginkgo/ginkgo/testrunner"
@@ -104,10 +108,25 @@ func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
runner.CleanUp()
}
if r.isInCoverageMode() {
if r.getOutputDir() != "" {
// If coverprofile is set, combine coverages
if r.getCoverprofile() != "" {
if err := r.combineCoverprofiles(runners); err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
} else {
// Just move them
r.moveCoverprofiles(runners)
}
}
}
fmt.Printf("\nGinkgo ran %d %s in %s\n", numSuites, pluralizedWord("suite", "suites", numSuites), time.Since(t))
if runResult.Passed {
if runResult.HasProgrammaticFocus {
if runResult.HasProgrammaticFocus && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
fmt.Printf("Test Suite Passed\n")
fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE)
os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
@@ -121,6 +140,70 @@ func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
}
}
// Moves all generated profiles to specified directory
func (r *SpecRunner) moveCoverprofiles(runners []*testrunner.TestRunner) {
for _, runner := range runners {
_, filename := filepath.Split(runner.CoverageFile)
err := os.Rename(runner.CoverageFile, filepath.Join(r.getOutputDir(), filename))
if err != nil {
fmt.Printf("Unable to move coverprofile %s, %v\n", runner.CoverageFile, err)
return
}
}
}
// Combines all generated profiles in the specified directory
func (r *SpecRunner) combineCoverprofiles(runners []*testrunner.TestRunner) error {
path, _ := filepath.Abs(r.getOutputDir())
if !fileExists(path) {
return fmt.Errorf("Unable to create combined profile, outputdir does not exist: %s", r.getOutputDir())
}
fmt.Println("path is " + path)
combined, err := os.OpenFile(filepath.Join(path, r.getCoverprofile()),
os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
fmt.Printf("Unable to create combined profile, %v\n", err)
return nil // non-fatal error
}
for _, runner := range runners {
contents, err := ioutil.ReadFile(runner.CoverageFile)
if err != nil {
fmt.Printf("Unable to read coverage file %s to combine, %v\n", runner.CoverageFile, err)
return nil // non-fatal error
}
_, err = combined.Write(contents)
if err != nil {
fmt.Printf("Unable to append to coverprofile, %v\n", err)
return nil // non-fatal error
}
}
fmt.Println("All profiles combined")
return nil
}
func (r *SpecRunner) isInCoverageMode() bool {
opts := r.commandFlags.GoOpts
return *opts["cover"].(*bool) || *opts["coverpkg"].(*string) != "" || *opts["covermode"].(*string) != ""
}
func (r *SpecRunner) getCoverprofile() string {
return *r.commandFlags.GoOpts["coverprofile"].(*string)
}
func (r *SpecRunner) getOutputDir() string {
return *r.commandFlags.GoOpts["outputdir"].(*string)
}
func (r *SpecRunner) ComputeSuccinctMode(numSuites int) {
if config.DefaultReporterConfig.Verbose {
config.DefaultReporterConfig.Succinct = false

View File

@@ -126,6 +126,7 @@ func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
c.FlagSet.BoolVar(c.boolSlot("work"), "work", false, "Print the name of the temporary work directory and do not delete it when exiting.")
c.FlagSet.StringVar(c.stringSlot("asmflags"), "asmflags", "", "Arguments to pass on each go tool asm invocation.")
c.FlagSet.StringVar(c.stringSlot("buildmode"), "buildmode", "", "Build mode to use. See 'go help buildmode' for more.")
c.FlagSet.StringVar(c.stringSlot("mod"), "mod", "", "Go module control. See 'go help modules' for more.")
c.FlagSet.StringVar(c.stringSlot("compiler"), "compiler", "", "Name of compiler to use, as in runtime.Compiler (gccgo or gc).")
c.FlagSet.StringVar(c.stringSlot("gccgoflags"), "gccgoflags", "", "Arguments to pass on each gccgo compiler/linker invocation.")
c.FlagSet.StringVar(c.stringSlot("installsuffix"), "installsuffix", "", "A suffix to use in the name of the package installation directory.")
@@ -140,6 +141,7 @@ func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
c.FlagSet.IntVar(c.intSlot("memprofilerate"), "memprofilerate", 0, "Enable more precise (and expensive) memory profiles by setting runtime.MemProfileRate.")
c.FlagSet.StringVar(c.stringSlot("outputdir"), "outputdir", "", "Place output files from profiling in the specified directory.")
c.FlagSet.BoolVar(c.boolSlot("requireSuite"), "requireSuite", false, "Fail if there are ginkgo tests in a directory but no test suite (missing RunSpecs)")
c.FlagSet.StringVar(c.stringSlot("vet"), "vet", "", "Configure the invocation of 'go vet' to use the comma-separated list of vet checks. If list is 'off', 'go test' does not run 'go vet' at all.")
if mode == runMode || mode == watchMode {
config.Flags(c.FlagSet, "", false)

View File

@@ -0,0 +1,7 @@
// +build go1.10
package testrunner
var (
buildArgs = []string{"test", "-c"}
)

View File

@@ -0,0 +1,7 @@
// +build !go1.10
package testrunner
var (
buildArgs = []string{"test", "-c", "-i"}
)

View File

@@ -17,6 +17,7 @@ import (
"github.com/onsi/ginkgo/ginkgo/testsuite"
"github.com/onsi/ginkgo/internal/remote"
"github.com/onsi/ginkgo/reporters/stenographer"
colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
"github.com/onsi/ginkgo/types"
)
@@ -32,6 +33,8 @@ type TestRunner struct {
goOpts map[string]interface{}
additionalArgs []string
stderr *bytes.Buffer
CoverageFile string
}
func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, timeout time.Duration, goOpts map[string]interface{}, additionalArgs []string) *TestRunner {
@@ -61,12 +64,14 @@ func (t *TestRunner) Compile() error {
}
func (t *TestRunner) BuildArgs(path string) []string {
args := []string{"test", "-c", "-i", "-o", path, t.Suite.Path}
args := make([]string, len(buildArgs), len(buildArgs)+3)
copy(args, buildArgs)
args = append(args, "-o", path, t.Suite.Path)
if *t.goOpts["covermode"].(*string) != "" {
args = append(args, "-cover", fmt.Sprintf("-covermode=%s", *t.goOpts["covermode"].(*string)))
if t.getCoverMode() != "" {
args = append(args, "-cover", fmt.Sprintf("-covermode=%s", t.getCoverMode()))
} else {
if *t.goOpts["cover"].(*bool) || *t.goOpts["coverpkg"].(*string) != "" {
if t.shouldCover() || t.getCoverPackage() != "" {
args = append(args, "-cover", "-covermode=atomic")
}
}
@@ -114,6 +119,8 @@ func (t *TestRunner) BuildArgs(path string) []string {
"coverpkg",
"tags",
"gcflags",
"vet",
"mod",
}
for _, opt := range stringOpts {
@@ -298,7 +305,7 @@ func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult {
os.Stdout.Sync()
if *t.goOpts["cover"].(*bool) || *t.goOpts["coverpkg"].(*string) != "" || *t.goOpts["covermode"].(*string) != "" {
if t.shouldCombineCoverprofiles() {
t.combineCoverprofiles()
}
@@ -311,7 +318,7 @@ func (t *TestRunner) runParallelGinkgoSuite() RunResult {
writers := make([]*logWriter, t.numCPU)
reports := make([]*bytes.Buffer, t.numCPU)
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1)
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout())
aggregator := remote.NewAggregator(t.numCPU, result, config.DefaultReporterConfig, stenographer)
server, err := remote.NewServer(t.numCPU)
@@ -364,9 +371,8 @@ func (t *TestRunner) runParallelGinkgoSuite() RunResult {
| |
| Ginkgo timed out waiting for all parallel nodes to report back! |
| |
-------------------------------------------------------------------
`)
fmt.Println(t.Suite.PackageName, "timed out. path:", t.Suite.Path)
-------------------------------------------------------------------`)
fmt.Println("\n", t.Suite.PackageName, "timed out. path:", t.Suite.Path)
os.Stdout.Sync()
for _, writer := range writers {
@@ -380,21 +386,40 @@ func (t *TestRunner) runParallelGinkgoSuite() RunResult {
os.Stdout.Sync()
}
if *t.goOpts["cover"].(*bool) || *t.goOpts["coverpkg"].(*string) != "" || *t.goOpts["covermode"].(*string) != "" {
if t.shouldCombineCoverprofiles() {
t.combineCoverprofiles()
}
return res
}
const CoverProfileSuffix = ".coverprofile"
func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.Cmd {
args := []string{"--test.timeout=" + t.timeout.String()}
if *t.goOpts["cover"].(*bool) || *t.goOpts["coverpkg"].(*string) != "" || *t.goOpts["covermode"].(*string) != "" {
coverprofile := "--test.coverprofile=" + t.Suite.PackageName + ".coverprofile"
if t.numCPU > 1 {
coverprofile = fmt.Sprintf("%s.%d", coverprofile, node)
coverProfile := t.getCoverProfile()
if t.shouldCombineCoverprofiles() {
testCoverProfile := "--test.coverprofile="
coverageFile := ""
// Set default name for coverage results
if coverProfile == "" {
coverageFile = t.Suite.PackageName + CoverProfileSuffix
} else {
coverageFile = coverProfile
}
args = append(args, coverprofile)
testCoverProfile += coverageFile
t.CoverageFile = filepath.Join(t.Suite.Path, coverageFile)
if t.numCPU > 1 {
testCoverProfile = fmt.Sprintf("%s.%d", testCoverProfile, node)
}
args = append(args, testCoverProfile)
}
args = append(args, ginkgoArgs...)
@@ -414,6 +439,30 @@ func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.
return cmd
}
func (t *TestRunner) shouldCover() bool {
return *t.goOpts["cover"].(*bool)
}
func (t *TestRunner) shouldRequireSuite() bool {
return *t.goOpts["requireSuite"].(*bool)
}
func (t *TestRunner) getCoverProfile() string {
return *t.goOpts["coverprofile"].(*string)
}
func (t *TestRunner) getCoverPackage() string {
return *t.goOpts["coverpkg"].(*string)
}
func (t *TestRunner) getCoverMode() string {
return *t.goOpts["covermode"].(*string)
}
func (t *TestRunner) shouldCombineCoverprofiles() bool {
return t.shouldCover() || t.getCoverPackage() != "" || t.getCoverMode() != ""
}
func (t *TestRunner) run(cmd *exec.Cmd, completions chan RunResult) RunResult {
var res RunResult
@@ -436,7 +485,7 @@ func (t *TestRunner) run(cmd *exec.Cmd, completions chan RunResult) RunResult {
res.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
if strings.Contains(t.stderr.String(), "warning: no tests to run") {
if *t.goOpts["requireSuite"].(*bool) {
if t.shouldRequireSuite() {
res.Passed = false
}
fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`)
@@ -447,8 +496,17 @@ func (t *TestRunner) run(cmd *exec.Cmd, completions chan RunResult) RunResult {
func (t *TestRunner) combineCoverprofiles() {
profiles := []string{}
coverProfile := t.getCoverProfile()
for cpu := 1; cpu <= t.numCPU; cpu++ {
coverFile := fmt.Sprintf("%s.coverprofile.%d", t.Suite.PackageName, cpu)
var coverFile string
if coverProfile == "" {
coverFile = fmt.Sprintf("%s%s.%d", t.Suite.PackageName, CoverProfileSuffix, cpu)
} else {
coverFile = fmt.Sprintf("%s.%d", coverProfile, cpu)
}
coverFile = filepath.Join(t.Suite.Path, coverFile)
coverProfile, err := ioutil.ReadFile(coverFile)
os.Remove(coverFile)
@@ -484,5 +542,17 @@ func (t *TestRunner) combineCoverprofiles() {
output = append(output, fmt.Sprintf("%s %d", line, lines[line]))
}
finalOutput := strings.Join(output, "\n")
ioutil.WriteFile(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.coverprofile", t.Suite.PackageName)), []byte(finalOutput), 0666)
finalFilename := ""
if coverProfile != "" {
finalFilename = coverProfile
} else {
finalFilename = fmt.Sprintf("%s%s", t.Suite.PackageName, CoverProfileSuffix)
}
coverageFilepath := filepath.Join(t.Suite.Path, finalFilename)
ioutil.WriteFile(coverageFilepath, []byte(finalOutput), 0666)
t.CoverageFile = coverageFilepath
}

View File

@@ -32,14 +32,19 @@ var _ = Describe("TestRunner", func() {
"coverpkg": strAddr(""),
"cover": boolAddr(false),
"blockprofilerate": intAddr(100),
"vet": strAddr("off"),
"mod": strAddr("vendor"),
}
tr := testrunner.New(testsuite.TestSuite{}, 1, false, 0, opts, []string{})
args := tr.BuildArgs(".")
// Remove the "-i" argument; This is discarded in Golang 1.10+.
if args[2] == "-i" {
args = append(args[0:2], args[3:]...)
}
Ω(args).Should(Equal([]string{
"test",
"-c",
"-i",
"-o",
".",
"",
@@ -47,6 +52,8 @@ var _ = Describe("TestRunner", func() {
"-asmflags=a",
"-pkgdir=b",
"-gcflags=c",
"-vet=off",
"-mod=vendor",
}))
})
})

View File

@@ -53,7 +53,7 @@ func SuitesInDir(dir string, recurse bool) []TestSuite {
}
files, _ := ioutil.ReadDir(dir)
re := regexp.MustCompile(`_test\.go$`)
re := regexp.MustCompile(`^[^._].*_test\.go$`)
for _, file := range files {
if !file.IsDir() && re.Match([]byte(file.Name())) {
suites = append(suites, New(dir, files))

View File

@@ -51,6 +51,10 @@ var _ = Describe("TestSuite", func() {
//non-go files in a nested directory
writeFile("/redherring", "big_test.jpg", "package ginkgo", 0666)
//ginkgo tests in ignored go files
writeFile("/ignored", ".ignore_dot_test.go", `import "github.com/onsi/ginkgo"`, 0666)
writeFile("/ignored", "_ignore_underscore_test.go", `import "github.com/onsi/ginkgo"`, 0666)
//non-ginkgo tests in a nested directory
writeFile("/professorplum", "professorplum_test.go", `import "testing"`, 0666)
@@ -140,6 +144,13 @@ var _ = Describe("TestSuite", func() {
})
})
Context("when there are ginkgo tests that are ignored by go in the specified directory ", func() {
It("should come up empty", func() {
suites := SuitesInDir(filepath.Join(tmpDir, "ignored"), false)
Ω(suites).Should(BeEmpty())
})
})
Context("when there are non-ginkgo tests in the specified directory", func() {
It("should return an appropriately configured suite", func() {
suites := SuitesInDir(filepath.Join(tmpDir, "professorplum"), false)

View File

@@ -3,7 +3,9 @@ package main
import (
"flag"
"fmt"
"io/ioutil"
"os/exec"
"strings"
)
func BuildUnfocusCommand() *Command {
@@ -26,13 +28,34 @@ func unfocusSpecs([]string, []string) {
unfocus("Measure")
unfocus("DescribeTable")
unfocus("Entry")
unfocus("Specify")
unfocus("When")
}
func unfocus(component string) {
fmt.Printf("Removing F%s...\n", component)
cmd := exec.Command("gofmt", fmt.Sprintf("-r=F%s -> %s", component, component), "-w", ".")
out, _ := cmd.CombinedOutput()
if string(out) != "" {
println(string(out))
files, err := ioutil.ReadDir(".")
if err != nil {
fmt.Println(err.Error())
return
}
for _, f := range files {
// Exclude "vendor" directory
if f.IsDir() && f.Name() == "vendor" {
continue
}
// Exclude non-go files in the current directory
if !f.IsDir() && !strings.HasSuffix(f.Name(), ".go") {
continue
}
// Recursively run `gofmt` otherwise
cmd := exec.Command("gofmt", fmt.Sprintf("-r=F%s -> %s", component, component), "-w", f.Name())
out, err := cmd.CombinedOutput()
if err != nil {
fmt.Println(err.Error())
}
if string(out) != "" {
fmt.Println(string(out))
}
}
}

View File

@@ -3,6 +3,7 @@ package main
import (
"flag"
"fmt"
"github.com/onsi/ginkgo/config"
)

View File

@@ -29,6 +29,7 @@ import (
"github.com/onsi/ginkgo/internal/writer"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/ginkgo/reporters/stenographer"
colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
"github.com/onsi/ginkgo/types"
)
@@ -149,7 +150,8 @@ type GinkgoTestDescription struct {
FileName string
LineNumber int
Failed bool
Failed bool
Duration time.Duration
}
//CurrentGinkgoTestDescripton returns information about the current running test.
@@ -169,6 +171,7 @@ func CurrentGinkgoTestDescription() GinkgoTestDescription {
FileName: subjectCodeLocation.FileName,
LineNumber: subjectCodeLocation.LineNumber,
Failed: summary.HasFailureState(),
Duration: summary.RunTime,
}
}
@@ -202,7 +205,7 @@ func RunSpecs(t GinkgoTestingT, description string) bool {
//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace
//RunSpecs() with this method.
func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
specReporters = append([]Reporter{buildDefaultReporter()}, specReporters...)
specReporters = append(specReporters, buildDefaultReporter())
return RunSpecsWithCustomReporters(t, description, specReporters)
}
@@ -216,7 +219,7 @@ func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specRepor
reporters[i] = reporter
}
passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig)
if passed && hasFocusedTests {
if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
fmt.Println("PASS | FOCUSED")
os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
}
@@ -226,14 +229,18 @@ func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specRepor
func buildDefaultReporter() Reporter {
remoteReportingServer := config.GinkgoConfig.StreamHost
if remoteReportingServer == "" {
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1)
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout())
return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer)
} else {
return remote.NewForwardingReporter(remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor())
debugFile := ""
if config.GinkgoConfig.DebugParallel {
debugFile = fmt.Sprintf("ginkgo-node-%d.log", config.GinkgoConfig.ParallelNode)
}
return remote.NewForwardingReporter(config.DefaultReporterConfig, remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor(), GinkgoWriter.(*writer.Writer), debugFile)
}
}
//Skip notifies Ginkgo that the current spec should be skipped.
//Skip notifies Ginkgo that the current spec was skipped.
func Skip(message string, callerSkip ...int) {
skip := 0
if len(callerSkip) > 0 {
@@ -275,9 +282,9 @@ func GinkgoRecover() {
//Describe blocks allow you to organize your specs. A Describe block can contain any number of
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
//
//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally
//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
//or method and, within that Describe, outline a number of Contexts.
//or method and, within that Describe, outline a number of Contexts and Whens.
func Describe(text string, body func()) bool {
globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
return true
@@ -304,9 +311,9 @@ func XDescribe(text string, body func()) bool {
//Context blocks allow you to organize your specs. A Context block can contain any number of
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
//
//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally
//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
//or method and, within that Describe, outline a number of Contexts.
//or method and, within that Describe, outline a number of Contexts and Whens.
func Context(text string, body func()) bool {
globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
return true
@@ -330,6 +337,35 @@ func XContext(text string, body func()) bool {
return true
}
//When blocks allow you to organize your specs. A When block can contain any number of
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
//
//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
//or method and, within that Describe, outline a number of Contexts and Whens.
func When(text string, body func()) bool {
globalSuite.PushContainerNode("when "+text, body, types.FlagTypeNone, codelocation.New(1))
return true
}
//You can focus the tests within a describe block using FWhen
func FWhen(text string, body func()) bool {
globalSuite.PushContainerNode("when "+text, body, types.FlagTypeFocused, codelocation.New(1))
return true
}
//You can mark the tests within a describe block as pending using PWhen
func PWhen(text string, body func()) bool {
globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1))
return true
}
//You can mark the tests within a describe block as pending using XWhen
func XWhen(text string, body func()) bool {
globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1))
return true
}
//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks
//within an It block.
//
@@ -362,22 +398,26 @@ func XIt(text string, _ ...interface{}) bool {
//which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks
//which apply to It blocks.
func Specify(text string, body interface{}, timeout ...float64) bool {
return It(text, body, timeout...)
globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
return true
}
//You can focus individual Specifys using FSpecify
func FSpecify(text string, body interface{}, timeout ...float64) bool {
return FIt(text, body, timeout...)
globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
return true
}
//You can mark Specifys as pending using PSpecify
func PSpecify(text string, is ...interface{}) bool {
return PIt(text, is...)
globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
return true
}
//You can mark Specifys as pending using XSpecify
func XSpecify(text string, is ...interface{}) bool {
return XIt(text, is...)
globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
return true
}
//By allows you to better document large Its.
@@ -417,13 +457,13 @@ func FMeasure(text string, body interface{}, samples int) bool {
return true
}
//You can mark Maeasurements as pending using PMeasure
//You can mark Measurements as pending using PMeasure
func PMeasure(text string, _ ...interface{}) bool {
globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
return true
}
//You can mark Maeasurements as pending using XMeasure
//You can mark Measurements as pending using XMeasure
func XMeasure(text string, _ ...interface{}) bool {
globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
return true
@@ -550,6 +590,16 @@ func JustBeforeEach(body interface{}, timeout ...float64) bool {
return true
}
//JustAfterEach blocks are run after It blocks but *before* all AfterEach blocks. For more details,
//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
//
//Like It blocks, JustAfterEach blocks can be made asynchronous by providing a body function that accepts
//a Done channel
func JustAfterEach(body interface{}, timeout ...float64) bool {
globalSuite.PushJustAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
return true
}
//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested
//Describe and Context blocks the innermost AfterEach blocks are run first.
//

View File

@@ -0,0 +1,21 @@
package first_package
func A() string {
return "A"
}
func B() string {
return "B"
}
func C() string {
return "C"
}
func D() string {
return "D"
}
func E() string {
return "untested"
}

View File

@@ -0,0 +1,13 @@
package first_package_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
func TestCoverageFixture(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "CombinedFixture First Suite")
}

View File

@@ -0,0 +1,31 @@
package first_package_test
import (
. "github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/first_package"
. "github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/first_package/external_coverage_fixture"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("CoverageFixture", func() {
It("should test A", func() {
Ω(A()).Should(Equal("A"))
})
It("should test B", func() {
Ω(B()).Should(Equal("B"))
})
It("should test C", func() {
Ω(C()).Should(Equal("C"))
})
It("should test D", func() {
Ω(D()).Should(Equal("D"))
})
It("should test external package", func() {
Ω(Tested()).Should(Equal("tested"))
})
})

View File

@@ -0,0 +1,9 @@
package external_coverage
func Tested() string {
return "tested"
}
func Untested() string {
return "untested"
}

View File

@@ -0,0 +1,21 @@
package second_package
func A() string {
return "A"
}
func B() string {
return "B"
}
func C() string {
return "C"
}
func D() string {
return "D"
}
func E() string {
return "E"
}

View File

@@ -0,0 +1,13 @@
package second_package_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
func TestCoverageFixture(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "CombinedFixture Second Suite")
}

View File

@@ -0,0 +1,29 @@
package second_package_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/integration/_fixtures/combined_coverage_fixture/second_package"
. "github.com/onsi/gomega"
)
var _ = Describe("CoverageFixture", func() {
It("should test A", func() {
Ω(A()).Should(Equal("A"))
})
It("should test B", func() {
Ω(B()).Should(Equal("B"))
})
It("should test C", func() {
Ω(C()).Should(Equal("C"))
})
It("should test D", func() {
Ω(D()).Should(Equal("D"))
})
It("should test E", func() {
Ω(E()).Should(Equal("E"))
})
})

View File

@@ -1,10 +1,10 @@
package nested_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
func TestNested(t *testing.T) {

View File

@@ -1,10 +1,10 @@
package tmp_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
func TestConvertFixtures(t *testing.T) {

View File

@@ -1,5 +1,9 @@
package coverage_fixture
import (
_ "github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture"
)
func A() string {
return "A"
}

View File

@@ -0,0 +1,13 @@
package debug_parallel_fixture_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestDebugParallelFixture(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "DebugParallelFixture Suite")
}

View File

@@ -0,0 +1,18 @@
package debug_parallel_fixture_test
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
)
var _ = Describe("DebugParallelFixture", func() {
It("emits output to a file", func() {
for i := 0; i < 10; i += 1 {
fmt.Printf("StdOut %d\n", i)
GinkgoWriter.Write([]byte(fmt.Sprintf("GinkgoWriter %d\n", i)))
}
time.Sleep(time.Second)
})
})

View File

@@ -97,3 +97,7 @@ var _ = Describe("Excercising different failure modes", func() {
println("NEVER SEE THIS")
}, 1)
})
var _ = Specify("a top level specify", func() {
Fail("fail the test")
})

View File

@@ -3,11 +3,13 @@ package flags_test
import (
"flag"
"fmt"
remapped "math"
_ "math/cmplx"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/integration/_fixtures/flags_tests"
. "github.com/onsi/gomega"
"time"
remapped "math"
)
var customFlag string

View File

@@ -0,0 +1 @@
This file should remain the same, regardless the fact that contains FIt, FDescribe, or FWhen.

View File

@@ -18,10 +18,20 @@ var _ = Describe("FocusedFixture", func() {
})
})
FWhen("focused", func() {
It("focused", func() {
})
})
FIt("focused", func() {
})
FSpecify("focused", func() {
})
FMeasure("focused", func(b Benchmarker) {
}, 2)

View File

@@ -0,0 +1,13 @@
package focused_fixture_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
func TestFocused_fixture(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Focused_fixture Suite")
}

View File

@@ -0,0 +1,73 @@
package focused_fixture_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
)
var _ = Describe("FocusedFixture", func() {
FDescribe("focused", func() {
It("focused", func() {
})
})
FContext("focused", func() {
It("focused", func() {
})
})
FWhen("focused", func() {
It("focused", func() {
})
})
FIt("focused", func() {
})
FSpecify("focused", func() {
})
FMeasure("focused", func(b Benchmarker) {
}, 2)
FDescribeTable("focused",
func() {},
Entry("focused"),
)
DescribeTable("focused",
func() {},
FEntry("focused"),
)
Describe("not focused", func() {
It("not focused", func() {
})
})
Context("not focused", func() {
It("not focused", func() {
})
})
It("not focused", func() {
})
Measure("not focused", func(b Benchmarker) {
}, 2)
DescribeTable("not focused",
func() {},
Entry("not focused"),
)
})

View File

@@ -3,6 +3,7 @@ package hanging_suite_test
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
)

View File

@@ -32,8 +32,18 @@ var _ = Describe("ProgressFixture", func() {
fmt.Fprintln(GinkgoWriter, ">inner after<")
})
It("should emit progress as it goes", func() {
fmt.Fprintln(GinkgoWriter, ">it<")
When("Inner When", func() {
BeforeEach(func() {
fmt.Fprintln(GinkgoWriter, ">inner before<")
})
It("should emit progress as it goes", func() {
fmt.Fprintln(GinkgoWriter, ">it<")
})
})
})
Specify("should emit progress as it goes", func() {
fmt.Fprintln(GinkgoWriter, ">specify<")
})
})

View File

@@ -48,7 +48,6 @@ var _ = Describe("Excercising different skip modes", func() {
}, 1)
})
var _ = Describe("SKIP in a BeforeEach", func() {
BeforeEach(func() {
Skip("a BeforeEach SKIP")
@@ -69,4 +68,4 @@ var _ = Describe("SKIP in an AfterEach", func() {
It("a SKIP AfterEach", func() {
Expect(true).To(BeTrue())
})
})
})

View File

@@ -1,53 +1,147 @@
package integration_test
import (
"os"
"os/exec"
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
)
var _ = Describe("Coverage Specs", func() {
AfterEach(func() {
os.RemoveAll("./_fixtures/coverage_fixture/coverage_fixture.coverprofile")
Context("when it runs coverage analysis in series and in parallel", func() {
AfterEach(func() {
removeSuccessfully("./_fixtures/coverage_fixture/coverage_fixture.coverprofile")
})
It("works", func() {
session := startGinkgo("./_fixtures/coverage_fixture", "-cover")
Eventually(session).Should(gexec.Exit(0))
Ω(session.Out).Should(gbytes.Say(("coverage: 80.0% of statements")))
coverFile := "./_fixtures/coverage_fixture/coverage_fixture.coverprofile"
serialCoverProfileOutput, err := exec.Command("go", "tool", "cover", fmt.Sprintf("-func=%s", coverFile)).CombinedOutput()
Ω(err).ShouldNot(HaveOccurred())
removeSuccessfully(coverFile)
Eventually(startGinkgo("./_fixtures/coverage_fixture", "-cover", "-nodes=4")).Should(gexec.Exit(0))
parallelCoverProfileOutput, err := exec.Command("go", "tool", "cover", fmt.Sprintf("-func=%s", coverFile)).CombinedOutput()
Ω(err).ShouldNot(HaveOccurred())
Ω(parallelCoverProfileOutput).Should(Equal(serialCoverProfileOutput))
By("handling external packages", func() {
session = startGinkgo("./_fixtures/coverage_fixture", "-coverpkg=github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture,github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture")
Eventually(session).Should(gexec.Exit(0))
Ω(session.Out).Should(gbytes.Say("coverage: 71.4% of statements in github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture, github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture"))
serialCoverProfileOutput, err = exec.Command("go", "tool", "cover", fmt.Sprintf("-func=%s", coverFile)).CombinedOutput()
Ω(err).ShouldNot(HaveOccurred())
removeSuccessfully("./_fixtures/coverage_fixture/coverage_fixture.coverprofile")
Eventually(startGinkgo("./_fixtures/coverage_fixture", "-coverpkg=github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture,github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture", "-nodes=4")).Should(gexec.Exit(0))
parallelCoverProfileOutput, err = exec.Command("go", "tool", "cover", fmt.Sprintf("-func=%s", coverFile)).CombinedOutput()
Ω(err).ShouldNot(HaveOccurred())
Ω(parallelCoverProfileOutput).Should(Equal(serialCoverProfileOutput))
})
})
})
It("runs coverage analysis in series and in parallel", func() {
session := startGinkgo("./_fixtures/coverage_fixture", "-cover")
Eventually(session).Should(gexec.Exit(0))
Context("when a custom profile name is specified", func() {
AfterEach(func() {
removeSuccessfully("./_fixtures/coverage_fixture/coverage.txt")
})
It("generates cover profiles with the specified name", func() {
session := startGinkgo("./_fixtures/coverage_fixture", "-cover", "-coverprofile=coverage.txt")
Eventually(session).Should(gexec.Exit(0))
Ω("./_fixtures/coverage_fixture/coverage.txt").Should(BeARegularFile())
})
})
Context("when run in recursive mode", func() {
AfterEach(func() {
removeSuccessfully("./_fixtures/combined_coverage_fixture/coverage-recursive.txt")
removeSuccessfully("./_fixtures/combined_coverage_fixture/first_package/coverage-recursive.txt")
removeSuccessfully("./_fixtures/combined_coverage_fixture/second_package/coverage-recursive.txt")
})
It("generates a coverage file per package", func() {
session := startGinkgo("./_fixtures/combined_coverage_fixture", "-r", "-cover", "-coverprofile=coverage-recursive.txt")
Eventually(session).Should(gexec.Exit(0))
Ω("./_fixtures/combined_coverage_fixture/first_package/coverage-recursive.txt").Should(BeARegularFile())
Ω("./_fixtures/combined_coverage_fixture/second_package/coverage-recursive.txt").Should(BeARegularFile())
})
})
Context("when run in parallel mode", func() {
AfterEach(func() {
removeSuccessfully("./_fixtures/coverage_fixture/coverage-parallel.txt")
})
It("works", func() {
session := startGinkgo("./_fixtures/coverage_fixture", "-p", "-cover", "-coverprofile=coverage-parallel.txt")
Eventually(session).Should(gexec.Exit(0))
Ω("./_fixtures/coverage_fixture/coverage-parallel.txt").Should(BeARegularFile())
})
})
Context("when run in recursive mode specifying a coverprofile", func() {
AfterEach(func() {
removeSuccessfully("./_fixtures/combined_coverage_fixture/coverprofile-recursive.txt")
removeSuccessfully("./_fixtures/combined_coverage_fixture/first_package/coverprofile-recursive.txt")
removeSuccessfully("./_fixtures/combined_coverage_fixture/second_package/coverprofile-recursive.txt")
})
It("combines the coverages", func() {
session := startGinkgo("./_fixtures/combined_coverage_fixture", "-outputdir=./", "-r", "-cover", "-coverprofile=coverprofile-recursive.txt")
Eventually(session).Should(gexec.Exit(0))
By("generating a combined coverage file", func() {
Ω("./_fixtures/combined_coverage_fixture/coverprofile-recursive.txt").Should(BeARegularFile())
})
By("also generating the single package coverage files", func() {
Ω("./_fixtures/combined_coverage_fixture/first_package/coverprofile-recursive.txt").Should(BeARegularFile())
Ω("./_fixtures/combined_coverage_fixture/second_package/coverprofile-recursive.txt").Should(BeARegularFile())
})
})
})
It("Fails with an error if output dir and coverprofile were set, but the output dir did not exist", func() {
session := startGinkgo("./_fixtures/combined_coverage_fixture", "-outputdir=./all/profiles/here", "-r", "-cover", "-coverprofile=coverage.txt")
Eventually(session).Should(gexec.Exit(1))
output := session.Out.Contents()
Ω(output).Should(ContainSubstring("coverage: 80.0% of statements"))
Ω(string(output)).Should(ContainSubstring("Unable to create combined profile, outputdir does not exist: ./all/profiles/here"))
})
serialCoverProfileOutput, err := exec.Command("go", "tool", "cover", "-func=./_fixtures/coverage_fixture/coverage_fixture.coverprofile").CombinedOutput()
Ω(err).ShouldNot(HaveOccurred())
Context("when only output dir was set", func() {
AfterEach(func() {
removeSuccessfully("./_fixtures/combined_coverage_fixture/first_package.coverprofile")
removeSuccessfully("./_fixtures/combined_coverage_fixture/first_package/coverage.txt")
removeSuccessfully("./_fixtures/combined_coverage_fixture/second_package.coverprofile")
removeSuccessfully("./_fixtures/combined_coverage_fixture/second_package/coverage.txt")
})
It("moves coverages", func() {
session := startGinkgo("./_fixtures/combined_coverage_fixture", "-outputdir=./", "-r", "-cover")
Eventually(session).Should(gexec.Exit(0))
os.RemoveAll("./_fixtures/coverage_fixture/coverage_fixture.coverprofile")
Eventually(startGinkgo("./_fixtures/coverage_fixture", "-cover", "-nodes=4")).Should(gexec.Exit(0))
parallelCoverProfileOutput, err := exec.Command("go", "tool", "cover", "-func=./_fixtures/coverage_fixture/coverage_fixture.coverprofile").CombinedOutput()
Ω(err).ShouldNot(HaveOccurred())
Ω(parallelCoverProfileOutput).Should(Equal(serialCoverProfileOutput))
By("handling external packages")
session = startGinkgo("./_fixtures/coverage_fixture", "-coverpkg=github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture,github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture")
Eventually(session).Should(gexec.Exit(0))
output = session.Out.Contents()
Ω(output).Should(ContainSubstring("coverage: 71.4% of statements in github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture, github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture"))
serialCoverProfileOutput, err = exec.Command("go", "tool", "cover", "-func=./_fixtures/coverage_fixture/coverage_fixture.coverprofile").CombinedOutput()
Ω(err).ShouldNot(HaveOccurred())
os.RemoveAll("./_fixtures/coverage_fixture/coverage_fixture.coverprofile")
Eventually(startGinkgo("./_fixtures/coverage_fixture", "-coverpkg=github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture,github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture", "-nodes=4")).Should(gexec.Exit(0))
parallelCoverProfileOutput, err = exec.Command("go", "tool", "cover", "-func=./_fixtures/coverage_fixture/coverage_fixture.coverprofile").CombinedOutput()
Ω(err).ShouldNot(HaveOccurred())
Ω(parallelCoverProfileOutput).Should(Equal(serialCoverProfileOutput))
Ω("./_fixtures/combined_coverage_fixture/first_package.coverprofile").Should(BeARegularFile())
Ω("./_fixtures/combined_coverage_fixture/second_package.coverprofile").Should(BeARegularFile())
})
})
})

View File

@@ -11,7 +11,7 @@ var _ = Describe("Failing Specs", func() {
BeforeEach(func() {
pathToTest = tmpPath("failing")
copyIn("fail_fixture", pathToTest)
copyIn(fixturePath("fail_fixture"), pathToTest, false)
})
It("should fail in all the possible ways", func() {
@@ -43,6 +43,13 @@ var _ = Describe("Failing Specs", func() {
Ω(output).Should(ContainSubstring("a measure FAIL failure"))
Ω(output).Should(MatchRegexp(`Test Panicked\n\s+a measure panic`))
Ω(output).Should(ContainSubstring("0 Passed | 16 Failed"))
Ω(output).Should(ContainSubstring("a top level specify"))
Ω(output).ShouldNot(ContainSubstring("ginkgo_dsl.go"))
// depending on the go version this could be the first line of the Specify
// block (>= go1.9) or the last line of the Specify block (< go1.9)
Ω(output).Should(Or(ContainSubstring("fail_fixture_test.go:101"), ContainSubstring("fail_fixture_test.go:103")))
Ω(output).Should(ContainSubstring("fail_fixture_test.go:102"))
Ω(output).Should(ContainSubstring("0 Passed | 17 Failed"))
})
})

Some files were not shown because too many files have changed in this diff Show More