* update circleci to go1.11

* update opencensus dep to build with go1.11

* fix up for new gofmt rules
This commit is contained in:
Reed Allman
2018-08-27 10:55:52 -07:00
committed by GitHub
parent 0cdcd8419c
commit 292f673747
83 changed files with 1723 additions and 2958 deletions

View File

@@ -6,7 +6,7 @@ jobs:
working_directory: ~/go/src/github.com/fnproject/fn
environment: # apparently expansion doesn't work here yet: https://discuss.circleci.com/t/environment-variable-expansion-in-working-directory/11322
- GOPATH=/home/circleci/go
- GOVERSION=1.10
- GOVERSION=1.11
- OS=linux
- ARCH=amd64
- FN_LOG_LEVEL=debug

7
Gopkg.lock generated
View File

@@ -361,6 +361,7 @@
[[projects]]
name = "go.opencensus.io"
packages = [
".",
"exporter/jaeger",
"exporter/jaeger/internal/gen-go/jaeger",
"exporter/prometheus",
@@ -377,8 +378,8 @@
"trace/internal",
"trace/propagation"
]
revision = "10cec2c05ea2cfb8b0d856711daedc49d8a45c56"
version = "v0.9.0"
revision = "7b558058b7cc960667590e5413ef55157b06652e"
version = "v0.15.0"
[[projects]]
branch = "master"
@@ -501,6 +502,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "dbce15832ac7b58692cd257f32b9d152d8f6ca092ac9d5723c2a313a0ce6e13d"
inputs-digest = "3a999b52438a7f308dfa208212e3c3154f7a310f024d3f288b54334868ab8ef9"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -64,7 +64,7 @@ ignored = ["github.com/fnproject/fn/cli",
[[constraint]]
name = "go.opencensus.io"
version = "0.9.0"
version = "~0.15.0"
[[override]]
name = "git.apache.org/thrift.git"

View File

@@ -95,15 +95,15 @@ func TestDecimate(t *testing.T) {
func TestParseImage(t *testing.T) {
cases := map[string][]string{
"fnproject/fn-test-utils": {"", "fnproject/fn-test-utils", "latest"},
"fnproject/fn-test-utils:v1": {"", "fnproject/fn-test-utils", "v1"},
"my.registry/fn-test-utils": {"my.registry", "fn-test-utils", "latest"},
"my.registry/fn-test-utils:v1": {"my.registry", "fn-test-utils", "v1"},
"mongo": {"", "library/mongo", "latest"},
"mongo:v1": {"", "library/mongo", "v1"},
"quay.com/fnproject/fn-test-utils": {"quay.com", "fnproject/fn-test-utils", "latest"},
"quay.com:8080/fnproject/fn-test-utils:v2": {"quay.com:8080", "fnproject/fn-test-utils", "v2"},
"localhost.localdomain:5000/samalba/hipache:latest": {"localhost.localdomain:5000", "samalba/hipache", "latest"},
"fnproject/fn-test-utils": {"", "fnproject/fn-test-utils", "latest"},
"fnproject/fn-test-utils:v1": {"", "fnproject/fn-test-utils", "v1"},
"my.registry/fn-test-utils": {"my.registry", "fn-test-utils", "latest"},
"my.registry/fn-test-utils:v1": {"my.registry", "fn-test-utils", "v1"},
"mongo": {"", "library/mongo", "latest"},
"mongo:v1": {"", "library/mongo", "v1"},
"quay.com/fnproject/fn-test-utils": {"quay.com", "fnproject/fn-test-utils", "latest"},
"quay.com:8080/fnproject/fn-test-utils:v2": {"quay.com:8080", "fnproject/fn-test-utils", "v2"},
"localhost.localdomain:5000/samalba/hipache:latest": {"localhost.localdomain:5000", "samalba/hipache", "latest"},
"localhost.localdomain:5000/samalba/hipache/isthisallowedeven:latest": {"localhost.localdomain:5000", "samalba/hipache/isthisallowedeven", "latest"},
}

View File

@@ -0,0 +1,21 @@
---
name: Bug report
about: Create a report to help us improve
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
Add any other context about the problem here.

View File

@@ -0,0 +1,20 @@
---
name: Feature request
about: Suggest an idea for this project
---
**NB:** Before opening a feature request against this repo, consider whether the feature should/could be implemented in other the OpenCensus libraries in other languages. If so, please [open an issue on opencensus-specs](https://github.com/census-instrumentation/opencensus-specs/issues/new) first.
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

3
vendor/go.opencensus.io/.gitignore generated vendored
View File

@@ -2,3 +2,6 @@
# go.opencensus.io/exporter/aws
/exporter/aws/
# Exclude vendor, use dep ensure after checkout:
/vendor/

View File

@@ -24,3 +24,4 @@ script:
- go vet ./...
- go test -v -race $PKGS # Run all the tests with the race detector enabled
- 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi'
- go run internal/check/version.go

View File

@@ -21,4 +21,36 @@ All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult [GitHub Help] for more
information on using pull requests.
[GitHub Help]: https://help.github.com/articles/about-pull-requests/
[GitHub Help]: https://help.github.com/articles/about-pull-requests/
## Instructions
Fork the repo, checkout the upstream repo to your GOPATH by:
```
$ go get -d go.opencensus.io
```
Add your fork as an origin:
```
cd $(go env GOPATH)/src/go.opencensus.io
git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opencensus-go.git
```
Run tests:
```
$ go test ./...
```
Checkout a new branch, make modifications and push the branch to your fork:
```
$ git checkout -b feature
# edit files
$ git commit
$ git push fork feature
```
Open a pull request against the main opencensus-go repo.

31
vendor/go.opencensus.io/Gopkg.lock generated vendored
View File

@@ -9,14 +9,14 @@
"monitoring/apiv3",
"trace/apiv2"
]
revision = "29f476ffa9c4cd4fd14336b6043090ac1ad76733"
version = "v0.21.0"
revision = "0fd7230b2a7505833d5f69b75cbd6c9582401479"
version = "v0.23.0"
[[projects]]
branch = "master"
name = "git.apache.org/thrift.git"
packages = ["lib/go/thrift"]
revision = "606f1ef31447526b908244933d5b716397a6bad8"
revision = "88591e32e710a0524327153c8b629d5b461e35e0"
source = "github.com/apache/thrift"
[[projects]]
@@ -37,8 +37,8 @@
"ptypes/timestamp",
"ptypes/wrappers"
]
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
version = "v1.0.0"
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
[[projects]]
name = "github.com/googleapis/gax-go"
@@ -88,7 +88,7 @@
"internal/bitbucket.org/ww/goautoneg",
"model"
]
revision = "d0f7cd64bda49e08b22ae8a730aa57aa0db125d6"
revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
[[projects]]
branch = "master"
@@ -107,14 +107,14 @@
packages = [
"context",
"context/ctxhttp",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"internal/timeseries",
"lex/httplex",
"trace"
]
revision = "61147c48b25b599e5b561d2e9c4f3e1ef489ca41"
revision = "9ef9f5bb98a1fdc41f8cf6c250a4404b4085e389"
[[projects]]
branch = "master"
@@ -126,7 +126,7 @@
"jws",
"jwt"
]
revision = "921ae394b9430ed4fb549668d7b087601bd60a81"
revision = "dd5f5d8e78ce062a4aa881dff95a94f2a0fd405a"
[[projects]]
branch = "master"
@@ -168,7 +168,7 @@
"transport/grpc",
"transport/http"
]
revision = "fca24fcb41126b846105a93fb9e30f416bdd55ce"
revision = "4f7dd2b006a4ffd9fd683c1c734d2fe91ca0ea1c"
[[projects]]
name = "google.golang.org/appengine"
@@ -204,7 +204,7 @@
"googleapis/rpc/status",
"protobuf/field_mask"
]
revision = "51d0944304c3cbce4afe9e5247e21100037bff78"
revision = "11a468237815f3a3ddf9f7c6e8b6b3b382a24d15"
[[projects]]
name = "google.golang.org/grpc"
@@ -213,6 +213,7 @@
"balancer",
"balancer/base",
"balancer/roundrobin",
"channelz",
"codes",
"connectivity",
"credentials",
@@ -226,8 +227,6 @@
"metadata",
"naming",
"peer",
"reflection",
"reflection/grpc_reflection_v1alpha",
"resolver",
"resolver/dns",
"resolver/passthrough",
@@ -236,12 +235,12 @@
"tap",
"transport"
]
revision = "d11072e7ca9811b1100b80ca0269ac831f06d024"
version = "v1.11.3"
revision = "41344da2231b913fa3d983840a57a6b1b7b631a1"
version = "v1.12.0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "1be7e5255452682d433fe616bb0987e00cb73c1172fe797b9b7a6fd2c1f53d37"
inputs-digest = "3fd3b357ae771c152cbc6b6d7b731c00c91c871cf2dbccb2f155ecc84ec80c4f"
solver-name = "gps-cdcl"
solver-version = 1

10
vendor/go.opencensus.io/Gopkg.toml generated vendored
View File

@@ -1,6 +1,10 @@
# For v0.x.y dependencies, prefer adding a constraints of the form: version=">= 0.x.y"
# to avoid locking to a particular minor version which can cause dep to not be
# able to find a satisfying dependency graph.
[[constraint]]
name = "cloud.google.com/go"
version = "0.21.0"
version = ">=0.21.0"
[[constraint]]
branch = "master"
@@ -13,11 +17,11 @@
[[constraint]]
name = "github.com/openzipkin/zipkin-go"
version = "0.1.0"
version = ">=0.1.0"
[[constraint]]
name = "github.com/prometheus/client_golang"
version = "0.8.0"
version = ">=0.8.0"
[[constraint]]
branch = "master"

101
vendor/go.opencensus.io/README.md generated vendored
View File

@@ -22,17 +22,39 @@ The use of vendoring or a dependency management tool is recommended.
OpenCensus Go libraries require Go 1.8 or later.
## Getting Started
The easiest way to get started using OpenCensus in your application is to use an existing
integration with your RPC framework:
* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp)
* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc)
* [database/sql](https://godoc.org/github.com/basvanbeek/ocsql)
* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus)
* [Groupcache](https://godoc.org/github.com/orijtech/groupcache)
* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy)
* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver)
* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo)
* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis)
* [Memcache](https://godoc.org/github.com/orijtech/gomemcache)
If you're a framework not listed here, you could either implement your own middleware for your
framework or use [custom stats](#stats) and [spans](#spans) directly in your application.
## Exporters
OpenCensus can export instrumentation data to various backends.
Currently, OpenCensus supports:
OpenCensus can export instrumentation data to various backends.
OpenCensus has exporter implementations for the following, users
can implement their own exporters by implementing the exporter interfaces
([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter),
[trace](https://godoc.org/go.opencensus.io/trace#Exporter)):
* [Prometheus][exporter-prom] for stats
* [OpenZipkin][exporter-zipkin] for traces
* Stackdriver [Monitoring][exporter-stackdriver] and [Trace][exporter-stackdriver]
* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces
* [Jaeger][exporter-jaeger] for traces
* [AWS X-Ray][exporter-xray] for traces
* [Datadog][exporter-datadog] for stats and traces
## Overview
@@ -43,13 +65,6 @@ multiple services until there is a response. OpenCensus allows
you to instrument your services and collect diagnostics data all
through your services end-to-end.
Start with instrumenting HTTP and gRPC clients and servers,
then add additional custom instrumentation if needed.
* [HTTP guide](https://github.com/census-instrumentation/opencensus-go/tree/master/examples/http)
* [gRPC guide](https://github.com/census-instrumentation/opencensus-go/tree/master/examples/grpc)
## Tags
Tags represent propagated key-value pairs. They are propagated using `context.Context`
@@ -116,26 +131,79 @@ Here we create a view with the DistributionAggregation over our measure.
[embedmd]:# (internal/readme/stats.go view)
```go
if err := view.Register(&view.View{
Name: "my.org/video_size_distribution",
Name: "example.com/video_size_distribution",
Description: "distribution of processed video size over time",
Measure: videoSize,
Aggregation: view.Distribution(0, 1<<32, 2<<32, 3<<32),
}); err != nil {
log.Fatalf("Failed to subscribe to view: %v", err)
log.Fatalf("Failed to register view: %v", err)
}
```
Subscribe begins collecting data for the view. Subscribed views' data will be
Register begins collecting data for the view. Registered views' data will be
exported via the registered exporters.
## Traces
A distributed trace tracks the progression of a single user request as
it is handled by the services and processes that make up an application.
Each step is called a span in the trace. Spans include metadata about the step,
including especially the time spent in the step, called the spans latency.
Below you see a trace and several spans underneath it.
![Traces and spans](https://i.imgur.com/7hZwRVj.png)
### Spans
Span is the unit step in a trace. Each span has a name, latency, status and
additional metadata.
Below we are starting a span for a cache read and ending it
when we are done:
[embedmd]:# (internal/readme/trace.go startend)
```go
ctx, span := trace.StartSpan(ctx, "your choice of name")
ctx, span := trace.StartSpan(ctx, "cache.Get")
defer span.End()
// Do work to get from cache.
```
### Propagation
Spans can have parents or can be root spans if they don't have any parents.
The current span is propagated in-process and across the network to allow associating
new child spans with the parent.
In the same process, context.Context is used to propagate spans.
trace.StartSpan creates a new span as a root if the current context
doesn't contain a span. Or, it creates a child of the span that is
already in current context. The returned context can be used to keep
propagating the newly created span in the current context.
[embedmd]:# (internal/readme/trace.go startend)
```go
ctx, span := trace.StartSpan(ctx, "cache.Get")
defer span.End()
// Do work to get from cache.
```
Across the network, OpenCensus provides different propagation
methods for different protocols.
* gRPC integrations uses the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation).
* HTTP integrations uses Zipkin's [B3](https://github.com/openzipkin/b3-propagation)
by default but can be configured to use a custom propagation method by setting another
[propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat).
## Execution Tracer
With Go 1.11, OpenCensus Go will support integration with the Go execution tracer.
See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68)
for an example of their mutual use.
## Profiles
OpenCensus tags can be applied as profiler labels
@@ -167,7 +235,7 @@ Before version 1.0.0, the following deprecation policy will be observed:
No backwards-incompatible changes will be made except for the removal of symbols that have
been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release
removing the *Deprecated* functionality will be made no sooner than 28 days after the first
removing the *Deprecated* functionality will be made no sooner than 28 days after the first
release in which the functionality was marked *Deprecated*.
[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master
@@ -188,3 +256,4 @@ release in which the functionality was marked *Deprecated*.
[exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin
[exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger
[exporter-xray]: https://github.com/census-instrumentation/opencensus-go-exporter-aws
[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog

View File

@@ -46,7 +46,7 @@ func main() {
// stats handler to enable stats and tracing.
conn, err := grpc.Dial(address, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}), grpc.WithInsecure())
if err != nil {
log.Fatalf("did not connect: %v", err)
log.Fatalf("Cannot connect: %v", err)
}
defer conn.Close()
c := pb.NewGreeterClient(conn)
@@ -60,10 +60,10 @@ func main() {
for {
r, err := c.SayHello(context.Background(), &pb.HelloRequest{Name: name})
if err != nil {
log.Fatalf("could not greet: %v", err)
log.Printf("Could not greet: %v", err)
} else {
log.Printf("Greeting: %s", r.Message)
}
log.Printf("Greeting: %s", r.Message)
time.Sleep(2 * time.Second) // Wait for the data collection.
time.Sleep(2 * time.Second)
}
}

View File

@@ -47,10 +47,13 @@ func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloRe
}
func main() {
// Start z-Pages server.
go func() {
http.Handle("/debug/", http.StripPrefix("/debug", zpages.Handler))
log.Fatal(http.ListenAndServe(":8081", nil))
mux := http.NewServeMux()
zpages.Handle(mux, "/debug")
log.Fatal(http.ListenAndServe("127.0.0.1:8081", mux))
}()
// Register stats and trace exporters to export
// the collected data.
view.RegisterExporter(&exporter.PrintExporter{})

View File

@@ -49,23 +49,24 @@ func main() {
trace.RegisterExporter(e)
var err error
frontendKey, err = tag.NewKey("my.org/keys/frontend")
frontendKey, err = tag.NewKey("example.com/keys/frontend")
if err != nil {
log.Fatal(err)
}
videoSize = stats.Int64("my.org/measure/video_size", "size of processed videos", stats.UnitBytes)
videoSize = stats.Int64("example.com/measure/video_size", "size of processed videos", stats.UnitBytes)
view.SetReportingPeriod(2 * time.Second)
// Create view to see the processed video size
// distribution broken down by frontend.
// Register will allow view data to be exported.
if err := view.Register(&view.View{
Name: "my.org/views/video_size",
Name: "example.com/views/video_size",
Description: "processed video size over time",
TagKeys: []tag.Key{frontendKey},
Measure: videoSize,
Aggregation: view.Distribution(0, 1<<16, 1<<32),
}); err != nil {
log.Fatalf("Cannot subscribe to the view: %v", err)
log.Fatalf("Cannot register view: %v", err)
}
// Process the video.
@@ -86,7 +87,7 @@ func process(ctx context.Context) {
if err != nil {
log.Fatal(err)
}
ctx, span := trace.StartSpan(ctx, "my.org/ProcessVideo")
ctx, span := trace.StartSpan(ctx, "example.com/ProcessVideo")
defer span.End()
// Process video.
// Record the processed video size.

View File

@@ -27,5 +27,5 @@ You will see traces and stats exported on the stdout. You can use one of the
to upload collected data to the backend of your choice.
You can also see the z-pages provided from the server:
* Traces: http://localhost:8081/tracez
* RPCs: http://localhost:8081/rpcz
* Traces: http://localhost:8081/debug/tracez
* RPCs: http://localhost:8081/debug/rpcz

View File

@@ -29,7 +29,12 @@ import (
)
func main() {
go func() { log.Fatal(http.ListenAndServe(":8081", zpages.Handler)) }()
// Start z-Pages server.
go func() {
mux := http.NewServeMux()
zpages.Handle(mux, "/debug")
log.Fatal(http.ListenAndServe("127.0.0.1:8081", mux))
}()
// Register stats and trace exporters to export the collected data.
exporter := &exporter.PrintExporter{}

View File

@@ -31,8 +31,8 @@ import (
// Create measures. The program will record measures for the size of
// processed videos and the number of videos marked as spam.
var (
videoCount = stats.Int64("my.org/measures/video_count", "number of processed videos", stats.UnitDimensionless)
videoSize = stats.Int64("my.org/measures/video_size", "size of processed video", stats.UnitBytes)
videoCount = stats.Int64("example.com/measures/video_count", "number of processed videos", stats.UnitDimensionless)
videoSize = stats.Int64("example.com/measures/video_size", "size of processed video", stats.UnitBytes)
)
func main() {
@@ -62,7 +62,7 @@ func main() {
Aggregation: view.Distribution(0, 1<<16, 1<<32),
},
); err != nil {
log.Fatalf("Cannot subscribe to the view: %v", err)
log.Fatalf("Cannot register the view: %v", err)
}
// Set reporting period to report data at every second.

View File

@@ -29,7 +29,7 @@ func Example() {
}
view.RegisterExporter(exporter)
// Serve the scrap endpoint at localhost:9999.
// Serve the scrape endpoint on port 9999.
http.Handle("/metrics", exporter)
log.Fatal(http.ListenAndServe(":9999", nil))
}

View File

@@ -12,14 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package prometheus contains a Prometheus exporter.
//
// Please note that this exporter is currently work in progress and not complete.
// Package prometheus contains a Prometheus exporter that supports exporting
// OpenCensus views as Prometheus metrics.
package prometheus // import "go.opencensus.io/exporter/prometheus"
import (
"bytes"
"errors"
"fmt"
"log"
"net/http"
@@ -51,23 +49,8 @@ type Options struct {
OnError func(err error)
}
var (
newExporterOnce sync.Once
errSingletonExporter = errors.New("expecting only one exporter per instance")
)
// NewExporter returns an exporter that exports stats to Prometheus.
// Only one exporter should exist per instance
func NewExporter(o Options) (*Exporter, error) {
var err = errSingletonExporter
var exporter *Exporter
newExporterOnce.Do(func() {
exporter, err = newExporter(o)
})
return exporter, err
}
func newExporter(o Options) (*Exporter, error) {
if o.Registry == nil {
o.Registry = prometheus.NewRegistry()
}
@@ -257,7 +240,7 @@ func (c *collector) toMetric(desc *prometheus.Desc, v *view.View, row *view.Row)
return prometheus.NewConstMetric(desc, prometheus.UntypedValue, data.Value, tagValues(row.Tags)...)
case *view.LastValueData:
return prometheus.NewConstMetric(desc, prometheus.UntypedValue, data.Value, tagValues(row.Tags)...)
return prometheus.NewConstMetric(desc, prometheus.GaugeValue, data.Value, tagValues(row.Tags)...)
default:
return nil, fmt.Errorf("aggregation %T is not yet supported", v.Aggregation)

View File

@@ -45,6 +45,7 @@ func newView(measureName string, agg *view.Aggregation) *view.View {
func TestOnlyCumulativeWindowSupported(t *testing.T) {
// See Issue https://github.com/census-instrumentation/opencensus-go/issues/214.
count1 := &view.CountData{Value: 1}
lastValue1 := &view.LastValueData{Value: 56.7}
tests := []struct {
vds *view.Data
want int
@@ -64,6 +65,15 @@ func TestOnlyCumulativeWindowSupported(t *testing.T) {
},
want: 1,
},
2: {
vds: &view.Data{
View: newView("TestOnlyCumulativeWindowSupported/m3", view.LastValue()),
Rows: []*view.Row{
{Data: lastValue1},
},
},
want: 1,
},
}
for i, tt := range tests {
@@ -81,29 +91,10 @@ func TestOnlyCumulativeWindowSupported(t *testing.T) {
}
}
func TestSingletonExporter(t *testing.T) {
exp, err := NewExporter(Options{})
if err != nil {
t.Fatalf("NewExporter() = %v", err)
}
if exp == nil {
t.Fatal("Nil exporter")
}
// Should all now fail
exp, err = NewExporter(Options{})
if err == nil {
t.Fatal("NewExporter() = nil")
}
if exp != nil {
t.Fatal("Non-nil exporter")
}
}
func TestCollectNonRacy(t *testing.T) {
// Despite enforcing the singleton, for this case we
// need an exporter hence won't be using NewExporter.
exp, err := newExporter(Options{})
exp, err := NewExporter(Options{})
if err != nil {
t.Fatalf("NewExporter: %v", err)
}
@@ -192,7 +183,7 @@ func (vc *vCreator) createAndAppend(name, description string, keys []tag.Key, me
}
func TestMetricsEndpointOutput(t *testing.T) {
exporter, err := newExporter(Options{})
exporter, err := NewExporter(Options{})
if err != nil {
t.Fatalf("failed to create prometheus exporter: %v", err)
}
@@ -266,7 +257,7 @@ func TestMetricsEndpointOutput(t *testing.T) {
}
func TestCumulativenessFromHistograms(t *testing.T) {
exporter, err := newExporter(Options{})
exporter, err := NewExporter(Options{})
if err != nil {
t.Fatalf("failed to create prometheus exporter: %v", err)
}

View File

@@ -1,82 +0,0 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Command stackdriver is an example program that collects data for
// video size. Collected data is exported to
// Stackdriver Monitoring.
package main
import (
"context"
"fmt"
"log"
"time"
"go.opencensus.io/exporter/stackdriver"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
)
// Create measures. The program will record measures for the size of
// processed videos and the nubmer of videos marked as spam.
var videoSize = stats.Int64("my.org/measure/video_size", "size of processed videos", stats.UnitBytes)
func main() {
ctx := context.Background()
// Collected view data will be reported to Stackdriver Monitoring API
// via the Stackdriver exporter.
//
// In order to use the Stackdriver exporter, enable Stackdriver Monitoring API
// at https://console.cloud.google.com/apis/dashboard.
//
// Once API is enabled, you can use Google Application Default Credentials
// to setup the authorization.
// See https://developers.google.com/identity/protocols/application-default-credentials
// for more details.
exporter, err := stackdriver.NewExporter(stackdriver.Options{
ProjectID: "project-id", // Google Cloud Console project ID.
})
if err != nil {
log.Fatal(err)
}
view.RegisterExporter(exporter)
// Set reporting period to report data at every second.
view.SetReportingPeriod(1 * time.Second)
// Create view to see the processed video size cumulatively.
// Subscribe will allow view data to be exported.
// Once no longer need, you can unsubscribe from the view.
if err := view.Register(&view.View{
Name: "my.org/views/video_size_cum",
Description: "processed video size over time",
Measure: videoSize,
Aggregation: view.Distribution(0, 1<<16, 1<<32),
}); err != nil {
log.Fatalf("Cannot subscribe to the view: %v", err)
}
processVideo(ctx)
// Wait for a duration longer than reporting duration to ensure the stats
// library reports the collected data.
fmt.Println("Wait longer than the reporting duration...")
time.Sleep(1 * time.Minute)
}
func processVideo(ctx context.Context) {
// Do some processing and record stats.
stats.Record(ctx, videoSize.M(25648))
}

View File

@@ -1,142 +0,0 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package stackdriver has moved.
//
// Deprecated: Use contrib.go.opencensus.io/exporter/stackdriver instead.
package stackdriver // import "go.opencensus.io/exporter/stackdriver"
import (
"context"
"errors"
"fmt"
"log"
"time"
traceapi "cloud.google.com/go/trace/apiv2"
"go.opencensus.io/stats/view"
"go.opencensus.io/trace"
"golang.org/x/oauth2/google"
"google.golang.org/api/option"
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
)
// Options contains options for configuring the exporter.
type Options struct {
// ProjectID is the identifier of the Stackdriver
// project the user is uploading the stats data to.
// If not set, this will default to your "Application Default Credentials".
// For details see: https://developers.google.com/accounts/docs/application-default-credentials
ProjectID string
// OnError is the hook to be called when there is
// an error uploading the stats or tracing data.
// If no custom hook is set, errors are logged.
// Optional.
OnError func(err error)
// MonitoringClientOptions are additional options to be passed
// to the underlying Stackdriver Monitoring API client.
// Optional.
MonitoringClientOptions []option.ClientOption
// TraceClientOptions are additional options to be passed
// to the underlying Stackdriver Trace API client.
// Optional.
TraceClientOptions []option.ClientOption
// BundleDelayThreshold determines the max amount of time
// the exporter can wait before uploading view data to
// the backend.
// Optional.
BundleDelayThreshold time.Duration
// BundleCountThreshold determines how many view data events
// can be buffered before batch uploading them to the backend.
// Optional.
BundleCountThreshold int
// Resource is an optional field that represents the Stackdriver
// MonitoredResource, a resource that can be used for monitoring.
// If no custom ResourceDescriptor is set, a default MonitoredResource
// with type global and no resource labels will be used.
// Optional.
Resource *monitoredrespb.MonitoredResource
// MetricPrefix overrides the OpenCensus prefix of a stackdriver metric.
// Optional.
MetricPrefix string
}
// Exporter is a stats.Exporter and trace.Exporter
// implementation that uploads data to Stackdriver.
type Exporter struct {
traceExporter *traceExporter
statsExporter *statsExporter
}
// NewExporter creates a new Exporter that implements both stats.Exporter and
// trace.Exporter.
func NewExporter(o Options) (*Exporter, error) {
if o.ProjectID == "" {
creds, err := google.FindDefaultCredentials(context.Background(), traceapi.DefaultAuthScopes()...)
if err != nil {
return nil, fmt.Errorf("stackdriver: %v", err)
}
if creds.ProjectID == "" {
return nil, errors.New("stackdriver: no project found with application default credentials")
}
o.ProjectID = creds.ProjectID
}
se, err := newStatsExporter(o)
if err != nil {
return nil, err
}
te, err := newTraceExporter(o)
if err != nil {
return nil, err
}
return &Exporter{
statsExporter: se,
traceExporter: te,
}, nil
}
// ExportView exports to the Stackdriver Monitoring if view data
// has one or more rows.
func (e *Exporter) ExportView(vd *view.Data) {
e.statsExporter.ExportView(vd)
}
// ExportSpan exports a SpanData to Stackdriver Trace.
func (e *Exporter) ExportSpan(sd *trace.SpanData) {
e.traceExporter.ExportSpan(sd)
}
// Flush waits for exported data to be uploaded.
//
// This is useful if your program is ending and you do not
// want to lose recent stats or spans.
func (e *Exporter) Flush() {
e.statsExporter.Flush()
e.traceExporter.Flush()
}
func (o Options) handleError(err error) {
if o.OnError != nil {
o.OnError(err)
return
}
log.Printf("Error exporting to Stackdriver: %v", err)
}

View File

@@ -1,125 +0,0 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stackdriver
import (
"context"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"testing"
"time"
"go.opencensus.io/internal/testpb"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/stats/view"
"go.opencensus.io/trace"
"golang.org/x/net/context/ctxhttp"
)
func TestExport(t *testing.T) {
projectID, ok := os.LookupEnv("STACKDRIVER_TEST_PROJECT_ID")
if !ok {
t.Skip("STACKDRIVER_TEST_PROJECT_ID not set")
}
var exportErrors []error
exporter, err := NewExporter(Options{ProjectID: projectID, OnError: func(err error) {
exportErrors = append(exportErrors, err)
}})
if err != nil {
t.Fatal(err)
}
defer exporter.Flush()
trace.RegisterExporter(exporter)
defer trace.UnregisterExporter(exporter)
view.RegisterExporter(exporter)
defer view.UnregisterExporter(exporter)
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
_, span := trace.StartSpan(context.Background(), "custom-span")
time.Sleep(10 * time.Millisecond)
span.End()
// Test HTTP spans
handler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
_, backgroundSpan := trace.StartSpan(context.Background(), "BackgroundWork")
spanContext := backgroundSpan.SpanContext()
time.Sleep(10 * time.Millisecond)
backgroundSpan.End()
_, span := trace.StartSpan(req.Context(), "Sleep")
span.AddLink(trace.Link{Type: trace.LinkTypeChild, TraceID: spanContext.TraceID, SpanID: spanContext.SpanID})
time.Sleep(150 * time.Millisecond) // do work
span.End()
rw.Write([]byte("Hello, world!"))
})
server := httptest.NewServer(&ochttp.Handler{Handler: handler})
defer server.Close()
ctx := context.Background()
client := &http.Client{
Transport: &ochttp.Transport{},
}
resp, err := ctxhttp.Get(ctx, client, server.URL+"/test/123?abc=xyz")
if err != nil {
t.Fatal(err)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
if want, got := "Hello, world!", string(body); want != got {
t.Fatalf("resp.Body = %q; want %q", want, got)
}
// Flush twice to expose issue of exporter creating traces internally (#557)
exporter.Flush()
exporter.Flush()
for _, err := range exportErrors {
t.Error(err)
}
}
func TestGRPC(t *testing.T) {
projectID, ok := os.LookupEnv("STACKDRIVER_TEST_PROJECT_ID")
if !ok {
t.Skip("STACKDRIVER_TEST_PROJECT_ID not set")
}
exporter, err := NewExporter(Options{ProjectID: projectID})
if err != nil {
t.Fatal(err)
}
defer exporter.Flush()
trace.RegisterExporter(exporter)
defer trace.UnregisterExporter(exporter)
view.RegisterExporter(exporter)
defer view.UnregisterExporter(exporter)
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
client, done := testpb.NewTestClient(t)
defer done()
client.Single(context.Background(), &testpb.FooRequest{SleepNanos: int64(42 * time.Millisecond)})
}

View File

@@ -1,439 +0,0 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stackdriver
import (
"context"
"errors"
"fmt"
"os"
"path"
"strconv"
"strings"
"sync"
"time"
"go.opencensus.io/internal"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"go.opencensus.io/trace"
"cloud.google.com/go/monitoring/apiv3"
"github.com/golang/protobuf/ptypes/timestamp"
"google.golang.org/api/option"
"google.golang.org/api/support/bundler"
distributionpb "google.golang.org/genproto/googleapis/api/distribution"
labelpb "google.golang.org/genproto/googleapis/api/label"
"google.golang.org/genproto/googleapis/api/metric"
metricpb "google.golang.org/genproto/googleapis/api/metric"
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
)
const maxTimeSeriesPerUpload = 200
const opencensusTaskKey = "opencensus_task"
const opencensusTaskDescription = "Opencensus task identifier"
const defaultDisplayNamePrefix = "OpenCensus"
// statsExporter exports stats to the Stackdriver Monitoring.
type statsExporter struct {
bundler *bundler.Bundler
o Options
createdViewsMu sync.Mutex
createdViews map[string]*metricpb.MetricDescriptor // Views already created remotely
c *monitoring.MetricClient
taskValue string
}
// Enforces the singleton on NewExporter per projectID per process
// lest there will be races with Stackdriver.
var (
seenProjectsMu sync.Mutex
seenProjects = make(map[string]bool)
)
var (
errBlankProjectID = errors.New("expecting a non-blank ProjectID")
errSingletonExporter = errors.New("only one exporter can be created per unique ProjectID per process")
)
// newStatsExporter returns an exporter that uploads stats data to Stackdriver Monitoring.
// Only one Stackdriver exporter should be created per ProjectID per process, any subsequent
// invocations of NewExporter with the same ProjectID will return an error.
func newStatsExporter(o Options) (*statsExporter, error) {
if strings.TrimSpace(o.ProjectID) == "" {
return nil, errBlankProjectID
}
seenProjectsMu.Lock()
defer seenProjectsMu.Unlock()
_, seen := seenProjects[o.ProjectID]
if seen {
return nil, errSingletonExporter
}
seenProjects[o.ProjectID] = true
opts := append(o.MonitoringClientOptions, option.WithUserAgent(internal.UserAgent))
client, err := monitoring.NewMetricClient(context.Background(), opts...)
if err != nil {
return nil, err
}
e := &statsExporter{
c: client,
o: o,
createdViews: make(map[string]*metricpb.MetricDescriptor),
taskValue: getTaskValue(),
}
e.bundler = bundler.NewBundler((*view.Data)(nil), func(bundle interface{}) {
vds := bundle.([]*view.Data)
e.handleUpload(vds...)
})
e.bundler.DelayThreshold = e.o.BundleDelayThreshold
e.bundler.BundleCountThreshold = e.o.BundleCountThreshold
return e, nil
}
// ExportView exports to the Stackdriver Monitoring if view data
// has one or more rows.
func (e *statsExporter) ExportView(vd *view.Data) {
if len(vd.Rows) == 0 {
return
}
err := e.bundler.Add(vd, 1)
switch err {
case nil:
return
case bundler.ErrOversizedItem:
go e.handleUpload(vd)
case bundler.ErrOverflow:
e.o.handleError(errors.New("failed to upload: buffer full"))
default:
e.o.handleError(err)
}
}
// getTaskValue returns a task label value in the format of
// "go-<pid>@<hostname>".
func getTaskValue() string {
hostname, err := os.Hostname()
if err != nil {
hostname = "localhost"
}
return "go-" + strconv.Itoa(os.Getpid()) + "@" + hostname
}
// handleUpload handles uploading a slice
// of Data, as well as error handling.
func (e *statsExporter) handleUpload(vds ...*view.Data) {
if err := e.uploadStats(vds); err != nil {
e.o.handleError(err)
}
}
// Flush waits for exported view data to be uploaded.
//
// This is useful if your program is ending and you do not
// want to lose recent spans.
func (e *statsExporter) Flush() {
e.bundler.Flush()
}
func (e *statsExporter) uploadStats(vds []*view.Data) error {
ctx, span := trace.StartSpan(
context.Background(),
"go.opencensus.io/exporter/stackdriver.uploadStats",
trace.WithSampler(trace.NeverSample()),
)
defer span.End()
for _, vd := range vds {
if err := e.createMeasure(ctx, vd); err != nil {
span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
return err
}
}
for _, req := range e.makeReq(vds, maxTimeSeriesPerUpload) {
if err := e.c.CreateTimeSeries(ctx, req); err != nil {
span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
// TODO(jbd): Don't fail fast here, batch errors?
return err
}
}
return nil
}
func (e *statsExporter) makeReq(vds []*view.Data, limit int) []*monitoringpb.CreateTimeSeriesRequest {
var reqs []*monitoringpb.CreateTimeSeriesRequest
var timeSeries []*monitoringpb.TimeSeries
resource := e.o.Resource
if resource == nil {
resource = &monitoredrespb.MonitoredResource{
Type: "global",
}
}
for _, vd := range vds {
for _, row := range vd.Rows {
ts := &monitoringpb.TimeSeries{
Metric: &metricpb.Metric{
Type: namespacedViewName(vd.View.Name),
Labels: newLabels(row.Tags, e.taskValue),
},
Resource: resource,
Points: []*monitoringpb.Point{newPoint(vd.View, row, vd.Start, vd.End)},
}
timeSeries = append(timeSeries, ts)
if len(timeSeries) == limit {
reqs = append(reqs, &monitoringpb.CreateTimeSeriesRequest{
Name: monitoring.MetricProjectPath(e.o.ProjectID),
TimeSeries: timeSeries,
})
timeSeries = []*monitoringpb.TimeSeries{}
}
}
}
if len(timeSeries) > 0 {
reqs = append(reqs, &monitoringpb.CreateTimeSeriesRequest{
Name: monitoring.MetricProjectPath(e.o.ProjectID),
TimeSeries: timeSeries,
})
}
return reqs
}
// createMeasure creates a MetricDescriptor for the given view data in Stackdriver Monitoring.
// An error will be returned if there is already a metric descriptor created with the same name
// but it has a different aggregation or keys.
func (e *statsExporter) createMeasure(ctx context.Context, vd *view.Data) error {
e.createdViewsMu.Lock()
defer e.createdViewsMu.Unlock()
m := vd.View.Measure
agg := vd.View.Aggregation
tagKeys := vd.View.TagKeys
viewName := vd.View.Name
if md, ok := e.createdViews[viewName]; ok {
return equalMeasureAggTagKeys(md, m, agg, tagKeys)
}
metricType := namespacedViewName(viewName)
var valueType metricpb.MetricDescriptor_ValueType
unit := m.Unit()
switch agg.Type {
case view.AggTypeCount:
valueType = metricpb.MetricDescriptor_INT64
// If the aggregation type is count, which counts the number of recorded measurements, the unit must be "1",
// because this view does not apply to the recorded values.
unit = stats.UnitDimensionless
case view.AggTypeSum:
switch m.(type) {
case *stats.Int64Measure:
valueType = metricpb.MetricDescriptor_INT64
case *stats.Float64Measure:
valueType = metricpb.MetricDescriptor_DOUBLE
}
case view.AggTypeDistribution:
valueType = metricpb.MetricDescriptor_DISTRIBUTION
case view.AggTypeLastValue:
switch m.(type) {
case *stats.Int64Measure:
valueType = metricpb.MetricDescriptor_INT64
case *stats.Float64Measure:
valueType = metricpb.MetricDescriptor_DOUBLE
}
default:
return fmt.Errorf("unsupported aggregation type: %s", agg.Type.String())
}
metricKind := metricpb.MetricDescriptor_CUMULATIVE
displayNamePrefix := defaultDisplayNamePrefix
if e.o.MetricPrefix != "" {
displayNamePrefix = e.o.MetricPrefix
}
md, err := createMetricDescriptor(ctx, e.c, &monitoringpb.CreateMetricDescriptorRequest{
Name: fmt.Sprintf("projects/%s", e.o.ProjectID),
MetricDescriptor: &metricpb.MetricDescriptor{
Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", e.o.ProjectID, metricType),
DisplayName: path.Join(displayNamePrefix, viewName),
Description: vd.View.Description,
Unit: unit,
Type: metricType,
MetricKind: metricKind,
ValueType: valueType,
Labels: newLabelDescriptors(vd.View.TagKeys),
},
})
if err != nil {
return err
}
e.createdViews[viewName] = md
return nil
}
func newPoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point {
return &monitoringpb.Point{
Interval: &monitoringpb.TimeInterval{
StartTime: &timestamp.Timestamp{
Seconds: start.Unix(),
Nanos: int32(start.Nanosecond()),
},
EndTime: &timestamp.Timestamp{
Seconds: end.Unix(),
Nanos: int32(end.Nanosecond()),
},
},
Value: newTypedValue(v, row),
}
}
func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue {
switch v := r.Data.(type) {
case *view.CountData:
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
Int64Value: v.Value,
}}
case *view.SumData:
switch vd.Measure.(type) {
case *stats.Int64Measure:
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
Int64Value: int64(v.Value),
}}
case *stats.Float64Measure:
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
DoubleValue: v.Value,
}}
}
case *view.DistributionData:
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{
DistributionValue: &distributionpb.Distribution{
Count: v.Count,
Mean: v.Mean,
SumOfSquaredDeviation: v.SumOfSquaredDev,
// TODO(songya): uncomment this once Stackdriver supports min/max.
// Range: &distributionpb.Distribution_Range{
// Min: v.Min,
// Max: v.Max,
// },
BucketOptions: &distributionpb.Distribution_BucketOptions{
Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{
ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{
Bounds: vd.Aggregation.Buckets,
},
},
},
BucketCounts: v.CountPerBucket,
},
}}
case *view.LastValueData:
switch vd.Measure.(type) {
case *stats.Int64Measure:
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
Int64Value: int64(v.Value),
}}
case *stats.Float64Measure:
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
DoubleValue: v.Value,
}}
}
}
return nil
}
func namespacedViewName(v string) string {
return path.Join("custom.googleapis.com", "opencensus", v)
}
func newLabels(tags []tag.Tag, taskValue string) map[string]string {
labels := make(map[string]string)
for _, tag := range tags {
labels[internal.Sanitize(tag.Key.Name())] = tag.Value
}
labels[opencensusTaskKey] = taskValue
return labels
}
func newLabelDescriptors(keys []tag.Key) []*labelpb.LabelDescriptor {
labelDescriptors := make([]*labelpb.LabelDescriptor, len(keys)+1)
for i, key := range keys {
labelDescriptors[i] = &labelpb.LabelDescriptor{
Key: internal.Sanitize(key.Name()),
ValueType: labelpb.LabelDescriptor_STRING, // We only use string tags
}
}
// Add a specific open census task id label.
labelDescriptors[len(keys)] = &labelpb.LabelDescriptor{
Key: opencensusTaskKey,
ValueType: labelpb.LabelDescriptor_STRING,
Description: opencensusTaskDescription,
}
return labelDescriptors
}
func equalMeasureAggTagKeys(md *metricpb.MetricDescriptor, m stats.Measure, agg *view.Aggregation, keys []tag.Key) error {
var aggTypeMatch bool
switch md.ValueType {
case metricpb.MetricDescriptor_INT64:
if _, ok := m.(*stats.Int64Measure); !(ok || agg.Type == view.AggTypeCount) {
return fmt.Errorf("stackdriver metric descriptor was not created as int64")
}
aggTypeMatch = agg.Type == view.AggTypeCount || agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue
case metricpb.MetricDescriptor_DOUBLE:
if _, ok := m.(*stats.Float64Measure); !ok {
return fmt.Errorf("stackdriver metric descriptor was not created as double")
}
aggTypeMatch = agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue
case metricpb.MetricDescriptor_DISTRIBUTION:
aggTypeMatch = agg.Type == view.AggTypeDistribution
}
if !aggTypeMatch {
return fmt.Errorf("stackdriver metric descriptor was not created with aggregation type %T", agg.Type)
}
if len(md.Labels) != len(keys)+1 {
return errors.New("stackdriver metric descriptor was not created with the view labels")
}
labels := make(map[string]struct{}, len(keys)+1)
for _, k := range keys {
labels[internal.Sanitize(k.Name())] = struct{}{}
}
labels[opencensusTaskKey] = struct{}{}
for _, k := range md.Labels {
if _, ok := labels[k.Key]; !ok {
return fmt.Errorf("stackdriver metric descriptor was not created with label %q", k)
}
}
return nil
}
var createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
return c.CreateMetricDescriptor(ctx, mdr)
}
var getMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
return c.GetMetricDescriptor(ctx, mdr)
}

View File

@@ -1,866 +0,0 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stackdriver
import (
"context"
"reflect"
"testing"
"time"
"cloud.google.com/go/monitoring/apiv3"
"github.com/golang/protobuf/ptypes/timestamp"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"google.golang.org/api/option"
"google.golang.org/genproto/googleapis/api/label"
"google.golang.org/genproto/googleapis/api/metric"
metricpb "google.golang.org/genproto/googleapis/api/metric"
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
"google.golang.org/grpc"
)
var authOptions = []option.ClientOption{option.WithGRPCConn(&grpc.ClientConn{})}
func TestRejectBlankProjectID(t *testing.T) {
ids := []string{"", " ", " "}
for _, projectID := range ids {
opts := Options{ProjectID: projectID, MonitoringClientOptions: authOptions}
exp, err := newStatsExporter(opts)
if err == nil || exp != nil {
t.Errorf("%q ProjectID must be rejected: NewExporter() = %v err = %q", projectID, exp, err)
}
}
}
// Ensure only one exporter per projectID per process, any
// subsequent invocations of NewExporter should fail.
func TestNewExporterSingletonPerProcess(t *testing.T) {
ids := []string{"open-census.io", "x", "fakeProjectID"}
for _, projectID := range ids {
opts := Options{ProjectID: projectID, MonitoringClientOptions: authOptions}
exp, err := newStatsExporter(opts)
if err != nil {
t.Errorf("NewExporter() projectID = %q err = %q", projectID, err)
continue
}
if exp == nil {
t.Errorf("NewExporter returned a nil Exporter")
continue
}
exp, err = newStatsExporter(opts)
if err == nil || exp != nil {
t.Errorf("NewExporter more than once should fail; exp (%v) err %v", exp, err)
}
}
}
func TestExporter_makeReq(t *testing.T) {
m := stats.Float64("test-measure", "measure desc", "unit")
key, err := tag.NewKey("test_key")
if err != nil {
t.Fatal(err)
}
v := &view.View{
Name: "testview",
Description: "desc",
TagKeys: []tag.Key{key},
Measure: m,
Aggregation: view.Count(),
}
distView := &view.View{
Name: "distview",
Description: "desc",
Measure: m,
Aggregation: view.Distribution(2, 4, 7),
}
start := time.Now()
end := start.Add(time.Minute)
count1 := &view.CountData{Value: 10}
count2 := &view.CountData{Value: 16}
sum1 := &view.SumData{Value: 5.5}
sum2 := &view.SumData{Value: -11.1}
last1 := view.LastValueData{Value: 100}
last2 := view.LastValueData{Value: 200}
taskValue := getTaskValue()
tests := []struct {
name string
projID string
vd *view.Data
want []*monitoringpb.CreateTimeSeriesRequest
}{
{
name: "count agg + timeline",
projID: "proj-id",
vd: newTestViewData(v, start, end, count1, count2),
want: []*monitoringpb.CreateTimeSeriesRequest{{
Name: monitoring.MetricProjectPath("proj-id"),
TimeSeries: []*monitoringpb.TimeSeries{
{
Metric: &metricpb.Metric{
Type: "custom.googleapis.com/opencensus/testview",
Labels: map[string]string{
"test_key": "test-value-1",
opencensusTaskKey: taskValue,
},
},
Resource: &monitoredrespb.MonitoredResource{
Type: "global",
},
Points: []*monitoringpb.Point{
{
Interval: &monitoringpb.TimeInterval{
StartTime: &timestamp.Timestamp{
Seconds: start.Unix(),
Nanos: int32(start.Nanosecond()),
},
EndTime: &timestamp.Timestamp{
Seconds: end.Unix(),
Nanos: int32(end.Nanosecond()),
},
},
Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
Int64Value: 10,
}},
},
},
},
{
Metric: &metricpb.Metric{
Type: "custom.googleapis.com/opencensus/testview",
Labels: map[string]string{
"test_key": "test-value-2",
opencensusTaskKey: taskValue,
},
},
Resource: &monitoredrespb.MonitoredResource{
Type: "global",
},
Points: []*monitoringpb.Point{
{
Interval: &monitoringpb.TimeInterval{
StartTime: &timestamp.Timestamp{
Seconds: start.Unix(),
Nanos: int32(start.Nanosecond()),
},
EndTime: &timestamp.Timestamp{
Seconds: end.Unix(),
Nanos: int32(end.Nanosecond()),
},
},
Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
Int64Value: 16,
}},
},
},
},
},
}},
},
{
name: "sum agg + timeline",
projID: "proj-id",
vd: newTestViewData(v, start, end, sum1, sum2),
want: []*monitoringpb.CreateTimeSeriesRequest{{
Name: monitoring.MetricProjectPath("proj-id"),
TimeSeries: []*monitoringpb.TimeSeries{
{
Metric: &metricpb.Metric{
Type: "custom.googleapis.com/opencensus/testview",
Labels: map[string]string{
"test_key": "test-value-1",
opencensusTaskKey: taskValue,
},
},
Resource: &monitoredrespb.MonitoredResource{
Type: "global",
},
Points: []*monitoringpb.Point{
{
Interval: &monitoringpb.TimeInterval{
StartTime: &timestamp.Timestamp{
Seconds: start.Unix(),
Nanos: int32(start.Nanosecond()),
},
EndTime: &timestamp.Timestamp{
Seconds: end.Unix(),
Nanos: int32(end.Nanosecond()),
},
},
Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
DoubleValue: 5.5,
}},
},
},
},
{
Metric: &metricpb.Metric{
Type: "custom.googleapis.com/opencensus/testview",
Labels: map[string]string{
"test_key": "test-value-2",
opencensusTaskKey: taskValue,
},
},
Resource: &monitoredrespb.MonitoredResource{
Type: "global",
},
Points: []*monitoringpb.Point{
{
Interval: &monitoringpb.TimeInterval{
StartTime: &timestamp.Timestamp{
Seconds: start.Unix(),
Nanos: int32(start.Nanosecond()),
},
EndTime: &timestamp.Timestamp{
Seconds: end.Unix(),
Nanos: int32(end.Nanosecond()),
},
},
Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
DoubleValue: -11.1,
}},
},
},
},
},
}},
},
{
name: "last value agg",
projID: "proj-id",
vd: newTestViewData(v, start, end, &last1, &last2),
want: []*monitoringpb.CreateTimeSeriesRequest{{
Name: monitoring.MetricProjectPath("proj-id"),
TimeSeries: []*monitoringpb.TimeSeries{
{
Metric: &metricpb.Metric{
Type: "custom.googleapis.com/opencensus/testview",
Labels: map[string]string{
"test_key": "test-value-1",
opencensusTaskKey: taskValue,
},
},
Resource: &monitoredrespb.MonitoredResource{
Type: "global",
},
Points: []*monitoringpb.Point{
{
Interval: &monitoringpb.TimeInterval{
StartTime: &timestamp.Timestamp{
Seconds: start.Unix(),
Nanos: int32(start.Nanosecond()),
},
EndTime: &timestamp.Timestamp{
Seconds: end.Unix(),
Nanos: int32(end.Nanosecond()),
},
},
Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
DoubleValue: 100,
}},
},
},
},
{
Metric: &metricpb.Metric{
Type: "custom.googleapis.com/opencensus/testview",
Labels: map[string]string{
"test_key": "test-value-2",
opencensusTaskKey: taskValue,
},
},
Resource: &monitoredrespb.MonitoredResource{
Type: "global",
},
Points: []*monitoringpb.Point{
{
Interval: &monitoringpb.TimeInterval{
StartTime: &timestamp.Timestamp{
Seconds: start.Unix(),
Nanos: int32(start.Nanosecond()),
},
EndTime: &timestamp.Timestamp{
Seconds: end.Unix(),
Nanos: int32(end.Nanosecond()),
},
},
Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
DoubleValue: 200,
}},
},
},
},
},
}},
},
{
name: "dist agg + time window",
projID: "proj-id",
vd: newTestDistViewData(distView, start, end),
want: nil, //TODO: add expectation for distribution
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
e := &statsExporter{
o: Options{ProjectID: tt.projID},
taskValue: taskValue,
}
resps := e.makeReq([]*view.Data{tt.vd}, maxTimeSeriesPerUpload)
if tt.want == nil {
t.Skip("Missing expectation")
}
if got, want := len(resps), len(tt.want); got != want {
t.Fatalf("%v: Exporter.makeReq() returned %d responses; want %d", tt.name, got, want)
}
if len(tt.want) == 0 {
return
}
if !reflect.DeepEqual(resps, tt.want) {
t.Errorf("%v: Exporter.makeReq() = %v, want %v", tt.name, resps, tt.want)
}
})
}
}
func TestExporter_makeReq_batching(t *testing.T) {
m := stats.Float64("test-measure/makeReq_batching", "measure desc", "unit")
key, err := tag.NewKey("test_key")
if err != nil {
t.Fatal(err)
}
v := &view.View{
Name: "view",
Description: "desc",
TagKeys: []tag.Key{key},
Measure: m,
Aggregation: view.Count(),
}
tests := []struct {
name string
iter int
limit int
wantReqs int
wantTotal int
}{
{
name: "4 vds; 3 limit",
iter: 2,
limit: 3,
wantReqs: 2,
wantTotal: 4,
},
{
name: "4 vds; 4 limit",
iter: 2,
limit: 4,
wantReqs: 1,
wantTotal: 4,
},
{
name: "4 vds; 5 limit",
iter: 2,
limit: 5,
wantReqs: 1,
wantTotal: 4,
},
}
count1 := &view.CountData{Value: 10}
count2 := &view.CountData{Value: 16}
for _, tt := range tests {
var vds []*view.Data
for i := 0; i < tt.iter; i++ {
vds = append(vds, newTestViewData(v, time.Now(), time.Now(), count1, count2))
}
e := &statsExporter{}
resps := e.makeReq(vds, tt.limit)
if len(resps) != tt.wantReqs {
t.Errorf("%v: got %v; want %d requests", tt.name, resps, tt.wantReqs)
}
var total int
for _, resp := range resps {
total += len(resp.TimeSeries)
}
if got, want := total, tt.wantTotal; got != want {
t.Errorf("%v: len(resps[...].TimeSeries) = %d; want %d", tt.name, got, want)
}
}
}
func TestEqualAggWindowTagKeys(t *testing.T) {
key1, _ := tag.NewKey("test-key-one")
key2, _ := tag.NewKey("test-key-two")
tests := []struct {
name string
md *metricpb.MetricDescriptor
m stats.Measure
agg *view.Aggregation
keys []tag.Key
wantErr bool
}{
{
name: "count agg with in64 measure",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_INT64,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
m: stats.Int64("name", "", ""),
agg: view.Count(),
wantErr: false,
},
{
name: "count agg with double measure",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_INT64,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
m: stats.Float64("name", "", ""),
agg: view.Count(),
wantErr: false,
},
{
name: "sum agg double",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_DOUBLE,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
m: stats.Float64("name", "", ""),
agg: view.Sum(),
wantErr: false,
},
{
name: "sum agg int64",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_INT64,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
m: stats.Int64("name", "", ""),
agg: view.Sum(),
wantErr: false,
},
{
name: "last value agg double",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_DOUBLE,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
m: stats.Float64("name", "", ""),
agg: view.LastValue(),
wantErr: false,
},
{
name: "last value agg int64",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_INT64,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
m: stats.Int64("name", "", ""),
agg: view.LastValue(),
wantErr: false,
},
{
name: "distribution - mismatch",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_DISTRIBUTION,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
m: stats.Int64("name", "", ""),
agg: view.Count(),
wantErr: true,
},
{
name: "last value - measure mismatch",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_INT64,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
m: stats.Float64("name", "", ""),
agg: view.LastValue(),
wantErr: true,
},
{
name: "distribution agg with keys",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_DISTRIBUTION,
Labels: []*label.LabelDescriptor{
{Key: "test_key_one"},
{Key: "test_key_two"},
{Key: opencensusTaskKey},
},
},
m: stats.Int64("name", "", ""),
agg: view.Distribution(),
keys: []tag.Key{key1, key2},
wantErr: false,
},
{
name: "distribution agg with keys -- mismatch",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_DISTRIBUTION,
},
m: stats.Int64("name", "", ""),
agg: view.Distribution(),
keys: []tag.Key{key1, key2},
wantErr: true,
},
{
name: "count agg with pointers",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_INT64,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
m: stats.Int64("name", "", ""),
agg: view.Count(),
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := equalMeasureAggTagKeys(tt.md, tt.m, tt.agg, tt.keys)
if err != nil && !tt.wantErr {
t.Errorf("equalAggTagKeys() = %q; want no error", err)
}
if err == nil && tt.wantErr {
t.Errorf("equalAggTagKeys() = %q; want error", err)
}
})
}
}
func TestExporter_createMeasure(t *testing.T) {
oldCreateMetricDescriptor := createMetricDescriptor
defer func() {
createMetricDescriptor = oldCreateMetricDescriptor
}()
key, _ := tag.NewKey("test-key-one")
m := stats.Float64("test-measure/TestExporter_createMeasure", "measure desc", stats.UnitMilliseconds)
v := &view.View{
Name: "test_view_sum",
Description: "view_description",
TagKeys: []tag.Key{key},
Measure: m,
Aggregation: view.Sum(),
}
data := &view.CountData{Value: 0}
vd := newTestViewData(v, time.Now(), time.Now(), data, data)
e := &statsExporter{
createdViews: make(map[string]*metricpb.MetricDescriptor),
o: Options{ProjectID: "test_project"},
}
var createCalls int
createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
createCalls++
if got, want := mdr.MetricDescriptor.Name, "projects/test_project/metricDescriptors/custom.googleapis.com/opencensus/test_view_sum"; got != want {
t.Errorf("MetricDescriptor.Name = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.Type, "custom.googleapis.com/opencensus/test_view_sum"; got != want {
t.Errorf("MetricDescriptor.Type = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.ValueType, metricpb.MetricDescriptor_DOUBLE; got != want {
t.Errorf("MetricDescriptor.ValueType = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.MetricKind, metricpb.MetricDescriptor_CUMULATIVE; got != want {
t.Errorf("MetricDescriptor.MetricKind = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.Description, "view_description"; got != want {
t.Errorf("MetricDescriptor.Description = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.DisplayName, "OpenCensus/test_view_sum"; got != want {
t.Errorf("MetricDescriptor.DisplayName = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.Unit, stats.UnitMilliseconds; got != want {
t.Errorf("MetricDescriptor.Unit = %q; want %q", got, want)
}
return &metric.MetricDescriptor{
DisplayName: "OpenCensus/test_view_sum",
Description: "view_description",
Unit: stats.UnitMilliseconds,
Type: "custom.googleapis.com/opencensus/test_view_sum",
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_DOUBLE,
Labels: newLabelDescriptors(vd.View.TagKeys),
}, nil
}
ctx := context.Background()
if err := e.createMeasure(ctx, vd); err != nil {
t.Errorf("Exporter.createMeasure() error = %v", err)
}
if err := e.createMeasure(ctx, vd); err != nil {
t.Errorf("Exporter.createMeasure() error = %v", err)
}
if count := createCalls; count != 1 {
t.Errorf("createMetricDescriptor needs to be called for once; called %v times", count)
}
if count := len(e.createdViews); count != 1 {
t.Errorf("len(e.createdViews) = %v; want 1", count)
}
}
func TestExporter_createMeasure_CountAggregation(t *testing.T) {
oldCreateMetricDescriptor := createMetricDescriptor
defer func() {
createMetricDescriptor = oldCreateMetricDescriptor
}()
key, _ := tag.NewKey("test-key-one")
m := stats.Float64("test-measure/TestExporter_createMeasure", "measure desc", stats.UnitMilliseconds)
v := &view.View{
Name: "test_view_count",
Description: "view_description",
TagKeys: []tag.Key{key},
Measure: m,
Aggregation: view.Count(),
}
data := &view.CountData{Value: 0}
vd := newTestViewData(v, time.Now(), time.Now(), data, data)
e := &statsExporter{
createdViews: make(map[string]*metricpb.MetricDescriptor),
o: Options{ProjectID: "test_project"},
}
createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
if got, want := mdr.MetricDescriptor.Name, "projects/test_project/metricDescriptors/custom.googleapis.com/opencensus/test_view_count"; got != want {
t.Errorf("MetricDescriptor.Name = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.Type, "custom.googleapis.com/opencensus/test_view_count"; got != want {
t.Errorf("MetricDescriptor.Type = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.ValueType, metricpb.MetricDescriptor_INT64; got != want {
t.Errorf("MetricDescriptor.ValueType = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.MetricKind, metricpb.MetricDescriptor_CUMULATIVE; got != want {
t.Errorf("MetricDescriptor.MetricKind = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.Description, "view_description"; got != want {
t.Errorf("MetricDescriptor.Description = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.DisplayName, "OpenCensus/test_view_count"; got != want {
t.Errorf("MetricDescriptor.DisplayName = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.Unit, stats.UnitDimensionless; got != want {
t.Errorf("MetricDescriptor.Unit = %q; want %q", got, want)
}
return &metric.MetricDescriptor{
DisplayName: "OpenCensus/test_view_sum",
Description: "view_description",
Unit: stats.UnitDimensionless,
Type: "custom.googleapis.com/opencensus/test_view_count",
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_INT64,
Labels: newLabelDescriptors(vd.View.TagKeys),
}, nil
}
ctx := context.Background()
if err := e.createMeasure(ctx, vd); err != nil {
t.Errorf("Exporter.createMeasure() error = %v", err)
}
}
func TestExporter_makeReq_withCustomMonitoredResource(t *testing.T) {
m := stats.Float64("test-measure/TestExporter_makeReq_withCustomMonitoredResource", "measure desc", "unit")
key, err := tag.NewKey("test_key")
if err != nil {
t.Fatal(err)
}
v := &view.View{
Name: "testview",
Description: "desc",
TagKeys: []tag.Key{key},
Measure: m,
Aggregation: view.Count(),
}
if err := view.Register(v); err != nil {
t.Fatal(err)
}
defer view.Unregister(v)
start := time.Now()
end := start.Add(time.Minute)
count1 := &view.CountData{Value: 10}
count2 := &view.CountData{Value: 16}
taskValue := getTaskValue()
resource := &monitoredrespb.MonitoredResource{
Type: "gce_instance",
Labels: map[string]string{"instance_id": "instance", "zone": "us-west-1a"},
}
tests := []struct {
name string
projID string
vd *view.Data
want []*monitoringpb.CreateTimeSeriesRequest
}{
{
name: "count agg timeline",
projID: "proj-id",
vd: newTestViewData(v, start, end, count1, count2),
want: []*monitoringpb.CreateTimeSeriesRequest{{
Name: monitoring.MetricProjectPath("proj-id"),
TimeSeries: []*monitoringpb.TimeSeries{
{
Metric: &metricpb.Metric{
Type: "custom.googleapis.com/opencensus/testview",
Labels: map[string]string{
"test_key": "test-value-1",
opencensusTaskKey: taskValue,
},
},
Resource: resource,
Points: []*monitoringpb.Point{
{
Interval: &monitoringpb.TimeInterval{
StartTime: &timestamp.Timestamp{
Seconds: start.Unix(),
Nanos: int32(start.Nanosecond()),
},
EndTime: &timestamp.Timestamp{
Seconds: end.Unix(),
Nanos: int32(end.Nanosecond()),
},
},
Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
Int64Value: 10,
}},
},
},
},
{
Metric: &metricpb.Metric{
Type: "custom.googleapis.com/opencensus/testview",
Labels: map[string]string{
"test_key": "test-value-2",
opencensusTaskKey: taskValue,
},
},
Resource: resource,
Points: []*monitoringpb.Point{
{
Interval: &monitoringpb.TimeInterval{
StartTime: &timestamp.Timestamp{
Seconds: start.Unix(),
Nanos: int32(start.Nanosecond()),
},
EndTime: &timestamp.Timestamp{
Seconds: end.Unix(),
Nanos: int32(end.Nanosecond()),
},
},
Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
Int64Value: 16,
}},
},
},
},
},
}},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
e := &statsExporter{
o: Options{ProjectID: tt.projID, Resource: resource},
taskValue: taskValue,
}
resps := e.makeReq([]*view.Data{tt.vd}, maxTimeSeriesPerUpload)
if got, want := len(resps), len(tt.want); got != want {
t.Fatalf("%v: Exporter.makeReq() returned %d responses; want %d", tt.name, got, want)
}
if len(tt.want) == 0 {
return
}
if !reflect.DeepEqual(resps, tt.want) {
t.Errorf("%v: Exporter.makeReq() = %v, want %v", tt.name, resps, tt.want)
}
})
}
}
func newTestViewData(v *view.View, start, end time.Time, data1, data2 view.AggregationData) *view.Data {
key, _ := tag.NewKey("test-key")
tag1 := tag.Tag{Key: key, Value: "test-value-1"}
tag2 := tag.Tag{Key: key, Value: "test-value-2"}
return &view.Data{
View: v,
Rows: []*view.Row{
{
Tags: []tag.Tag{tag1},
Data: data1,
},
{
Tags: []tag.Tag{tag2},
Data: data2,
},
},
Start: start,
End: end,
}
}
func newTestDistViewData(v *view.View, start, end time.Time) *view.Data {
return &view.Data{
View: v,
Rows: []*view.Row{
{Data: &view.DistributionData{
Count: 5,
Min: 1,
Max: 7,
Mean: 3,
SumOfSquaredDev: 1.5,
CountPerBucket: []int64{2, 2, 1},
}},
},
Start: start,
End: end,
}
}

View File

@@ -1,172 +0,0 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stackdriver
import (
"context"
"fmt"
"log"
"sync"
"time"
tracingclient "cloud.google.com/go/trace/apiv2"
"go.opencensus.io/trace"
"google.golang.org/api/support/bundler"
tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2"
)
// traceExporter is an implementation of trace.Exporter that uploads spans to
// Stackdriver.
//
type traceExporter struct {
o Options
projectID string
bundler *bundler.Bundler
// uploadFn defaults to uploadSpans; it can be replaced for tests.
uploadFn func(spans []*trace.SpanData)
overflowLogger
client *tracingclient.Client
}
var _ trace.Exporter = (*traceExporter)(nil)
func newTraceExporter(o Options) (*traceExporter, error) {
client, err := tracingclient.NewClient(context.Background(), o.TraceClientOptions...)
if err != nil {
return nil, fmt.Errorf("stackdriver: couldn't initialize trace client: %v", err)
}
return newTraceExporterWithClient(o, client), nil
}
func newTraceExporterWithClient(o Options, c *tracingclient.Client) *traceExporter {
e := &traceExporter{
projectID: o.ProjectID,
client: c,
o: o,
}
bundler := bundler.NewBundler((*trace.SpanData)(nil), func(bundle interface{}) {
e.uploadFn(bundle.([]*trace.SpanData))
})
if o.BundleDelayThreshold > 0 {
bundler.DelayThreshold = o.BundleDelayThreshold
} else {
bundler.DelayThreshold = 2 * time.Second
}
if o.BundleCountThreshold > 0 {
bundler.BundleCountThreshold = o.BundleCountThreshold
} else {
bundler.BundleCountThreshold = 50
}
// The measured "bytes" are not really bytes, see exportReceiver.
bundler.BundleByteThreshold = bundler.BundleCountThreshold * 200
bundler.BundleByteLimit = bundler.BundleCountThreshold * 1000
bundler.BufferedByteLimit = bundler.BundleCountThreshold * 2000
e.bundler = bundler
e.uploadFn = e.uploadSpans
return e
}
// ExportSpan exports a SpanData to Stackdriver Trace.
func (e *traceExporter) ExportSpan(s *trace.SpanData) {
// n is a length heuristic.
n := 1
n += len(s.Attributes)
n += len(s.Annotations)
n += len(s.MessageEvents)
err := e.bundler.Add(s, n)
switch err {
case nil:
return
case bundler.ErrOversizedItem:
go e.uploadFn([]*trace.SpanData{s})
case bundler.ErrOverflow:
e.overflowLogger.log()
default:
e.o.handleError(err)
}
}
// Flush waits for exported trace spans to be uploaded.
//
// This is useful if your program is ending and you do not want to lose recent
// spans.
func (e *traceExporter) Flush() {
e.bundler.Flush()
}
// uploadSpans uploads a set of spans to Stackdriver.
func (e *traceExporter) uploadSpans(spans []*trace.SpanData) {
req := tracepb.BatchWriteSpansRequest{
Name: "projects/" + e.projectID,
Spans: make([]*tracepb.Span, 0, len(spans)),
}
for _, span := range spans {
req.Spans = append(req.Spans, protoFromSpanData(span, e.projectID))
}
// Create a never-sampled span to prevent traces associated with exporter.
ctx, span := trace.StartSpan( // TODO: add timeouts
context.Background(),
"go.opencensus.io/exporter/stackdriver.uploadSpans",
trace.WithSampler(trace.NeverSample()),
)
defer span.End()
span.AddAttributes(trace.Int64Attribute("num_spans", int64(len(spans))))
err := e.client.BatchWriteSpans(ctx, &req)
if err != nil {
span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
e.o.handleError(err)
}
}
// overflowLogger ensures that at most one overflow error log message is
// written every 5 seconds.
type overflowLogger struct {
mu sync.Mutex
pause bool
accum int
}
func (o *overflowLogger) delay() {
o.pause = true
time.AfterFunc(5*time.Second, func() {
o.mu.Lock()
defer o.mu.Unlock()
switch {
case o.accum == 0:
o.pause = false
case o.accum == 1:
log.Println("OpenCensus Stackdriver exporter: failed to upload span: buffer full")
o.accum = 0
o.delay()
default:
log.Printf("OpenCensus Stackdriver exporter: failed to upload %d spans: buffer full", o.accum)
o.accum = 0
o.delay()
}
})
}
func (o *overflowLogger) log() {
o.mu.Lock()
defer o.mu.Unlock()
if !o.pause {
log.Println("OpenCensus Stackdriver exporter: failed to upload span: buffer full")
o.delay()
} else {
o.accum++
}
}

View File

@@ -1,255 +0,0 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stackdriver
import (
"math"
"time"
"unicode/utf8"
"go.opencensus.io/internal"
"go.opencensus.io/plugin/ochttp"
timestamppb "github.com/golang/protobuf/ptypes/timestamp"
wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
"go.opencensus.io/trace"
tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2"
statuspb "google.golang.org/genproto/googleapis/rpc/status"
)
const (
maxAnnotationEventsPerSpan = 32
maxMessageEventsPerSpan = 128
maxAttributeStringValue = 256
agentLabel = "g.co/agent"
labelHTTPHost = `/http/host`
labelHTTPMethod = `/http/method`
labelHTTPStatusCode = `/http/status_code`
labelHTTPPath = `/http/path`
labelHTTPUserAgent = `/http/user_agent`
)
// proto returns a protocol buffer representation of a SpanData.
func protoFromSpanData(s *trace.SpanData, projectID string) *tracepb.Span {
if s == nil {
return nil
}
traceIDString := s.SpanContext.TraceID.String()
spanIDString := s.SpanContext.SpanID.String()
name := s.Name
switch s.SpanKind {
case trace.SpanKindClient:
name = "Sent." + name
case trace.SpanKindServer:
name = "Recv." + name
}
sp := &tracepb.Span{
Name: "projects/" + projectID + "/traces/" + traceIDString + "/spans/" + spanIDString,
SpanId: spanIDString,
DisplayName: trunc(name, 128),
StartTime: timestampProto(s.StartTime),
EndTime: timestampProto(s.EndTime),
SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: !s.HasRemoteParent},
}
if p := s.ParentSpanID; p != (trace.SpanID{}) {
sp.ParentSpanId = p.String()
}
if s.Status.Code != 0 || s.Status.Message != "" {
sp.Status = &statuspb.Status{Code: s.Status.Code, Message: s.Status.Message}
}
var annotations, droppedAnnotationsCount, messageEvents, droppedMessageEventsCount int
copyAttributes(&sp.Attributes, s.Attributes)
as := s.Annotations
for i, a := range as {
if annotations >= maxAnnotationEventsPerSpan {
droppedAnnotationsCount = len(as) - i
break
}
annotation := &tracepb.Span_TimeEvent_Annotation{Description: trunc(a.Message, maxAttributeStringValue)}
copyAttributes(&annotation.Attributes, a.Attributes)
event := &tracepb.Span_TimeEvent{
Time: timestampProto(a.Time),
Value: &tracepb.Span_TimeEvent_Annotation_{Annotation: annotation},
}
annotations++
if sp.TimeEvents == nil {
sp.TimeEvents = &tracepb.Span_TimeEvents{}
}
sp.TimeEvents.TimeEvent = append(sp.TimeEvents.TimeEvent, event)
}
if sp.Attributes == nil {
sp.Attributes = &tracepb.Span_Attributes{
AttributeMap: make(map[string]*tracepb.AttributeValue),
}
}
sp.Attributes.AttributeMap[agentLabel] = &tracepb.AttributeValue{
Value: &tracepb.AttributeValue_StringValue{
StringValue: trunc(internal.UserAgent, maxAttributeStringValue),
},
}
es := s.MessageEvents
for i, e := range es {
if messageEvents >= maxMessageEventsPerSpan {
droppedMessageEventsCount = len(es) - i
break
}
messageEvents++
if sp.TimeEvents == nil {
sp.TimeEvents = &tracepb.Span_TimeEvents{}
}
sp.TimeEvents.TimeEvent = append(sp.TimeEvents.TimeEvent, &tracepb.Span_TimeEvent{
Time: timestampProto(e.Time),
Value: &tracepb.Span_TimeEvent_MessageEvent_{
MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{
Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType),
Id: e.MessageID,
UncompressedSizeBytes: e.UncompressedByteSize,
CompressedSizeBytes: e.CompressedByteSize,
},
},
})
}
if droppedAnnotationsCount != 0 || droppedMessageEventsCount != 0 {
if sp.TimeEvents == nil {
sp.TimeEvents = &tracepb.Span_TimeEvents{}
}
sp.TimeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount)
sp.TimeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount)
}
if len(s.Links) > 0 {
sp.Links = &tracepb.Span_Links{}
sp.Links.Link = make([]*tracepb.Span_Link, 0, len(s.Links))
for _, l := range s.Links {
link := &tracepb.Span_Link{
TraceId: l.TraceID.String(),
SpanId: l.SpanID.String(),
Type: tracepb.Span_Link_Type(l.Type),
}
copyAttributes(&link.Attributes, l.Attributes)
sp.Links.Link = append(sp.Links.Link, link)
}
}
return sp
}
// timestampProto creates a timestamp proto for a time.Time.
func timestampProto(t time.Time) *timestamppb.Timestamp {
return &timestamppb.Timestamp{
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
}
// copyAttributes copies a map of attributes to a proto map field.
// It creates the map if it is nil.
func copyAttributes(out **tracepb.Span_Attributes, in map[string]interface{}) {
if len(in) == 0 {
return
}
if *out == nil {
*out = &tracepb.Span_Attributes{}
}
if (*out).AttributeMap == nil {
(*out).AttributeMap = make(map[string]*tracepb.AttributeValue)
}
var dropped int32
for key, value := range in {
av := attributeValue(value)
if av == nil {
continue
}
switch key {
case ochttp.PathAttribute:
(*out).AttributeMap[labelHTTPPath] = av
case ochttp.HostAttribute:
(*out).AttributeMap[labelHTTPHost] = av
case ochttp.MethodAttribute:
(*out).AttributeMap[labelHTTPMethod] = av
case ochttp.UserAgentAttribute:
(*out).AttributeMap[labelHTTPUserAgent] = av
case ochttp.StatusCodeAttribute:
(*out).AttributeMap[labelHTTPStatusCode] = av
default:
if len(key) > 128 {
dropped++
continue
}
(*out).AttributeMap[key] = av
}
}
(*out).DroppedAttributesCount = dropped
}
func attributeValue(v interface{}) *tracepb.AttributeValue {
switch value := v.(type) {
case bool:
return &tracepb.AttributeValue{
Value: &tracepb.AttributeValue_BoolValue{BoolValue: value},
}
case int64:
return &tracepb.AttributeValue{
Value: &tracepb.AttributeValue_IntValue{IntValue: value},
}
case string:
return &tracepb.AttributeValue{
Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(value, maxAttributeStringValue)},
}
}
return nil
}
// trunc returns a TruncatableString truncated to the given limit.
func trunc(s string, limit int) *tracepb.TruncatableString {
if len(s) > limit {
b := []byte(s[:limit])
for {
r, size := utf8.DecodeLastRune(b)
if r == utf8.RuneError && size == 1 {
b = b[:len(b)-1]
} else {
break
}
}
return &tracepb.TruncatableString{
Value: string(b),
TruncatedByteCount: clip32(len(s) - len(b)),
}
}
return &tracepb.TruncatableString{
Value: s,
TruncatedByteCount: 0,
}
}
// clip32 clips an int to the range of an int32.
func clip32(x int) int32 {
if x < math.MinInt32 {
return math.MinInt32
}
if x > math.MaxInt32 {
return math.MaxInt32
}
return int32(x)
}

View File

@@ -1,389 +0,0 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stackdriver
import (
"context"
"fmt"
"math/big"
"reflect"
"sort"
"strings"
"testing"
"time"
"go.opencensus.io/internal"
"github.com/golang/protobuf/proto"
timestamppb "github.com/golang/protobuf/ptypes/timestamp"
wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
"go.opencensus.io/trace"
tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2"
codepb "google.golang.org/genproto/googleapis/rpc/code"
statuspb "google.golang.org/genproto/googleapis/rpc/status"
)
const projectID = "testproject"
var (
traceID = trace.TraceID{0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f}
spanID = trace.SpanID{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
)
type spans []*tracepb.Span
func (s spans) Len() int { return len(s) }
func (s spans) Less(x, y int) bool { return s[x].DisplayName.Value < s[y].DisplayName.Value }
func (s spans) Swap(x, y int) { s[x], s[y] = s[y], s[x] }
type testExporter struct {
spans []*trace.SpanData
}
func (t *testExporter) ExportSpan(s *trace.SpanData) {
t.spans = append(t.spans, s)
}
func TestExportTrace(t *testing.T) {
ctx := context.Background()
var te testExporter
trace.RegisterExporter(&te)
defer trace.UnregisterExporter(&te)
ctx, span0 := trace.StartSpanWithRemoteParent(
ctx,
"span0",
trace.SpanContext{
TraceID: traceID,
SpanID: spanID,
TraceOptions: 1,
},
)
{
ctx1, span1 := trace.StartSpan(ctx, "span1")
{
_, span2 := trace.StartSpan(ctx1, "span2")
span2.AddMessageSendEvent(0x123, 1024, 512)
span2.Annotatef(nil, "in span%d", 2)
span2.Annotate(nil, big.NewRat(2, 4).String())
span2.AddAttributes(
trace.StringAttribute("key1", "value1"),
trace.StringAttribute("key2", "value2"))
span2.AddAttributes(trace.Int64Attribute("key1", 100))
span2.End()
}
{
ctx3, span3 := trace.StartSpan(ctx1, "span3")
span3.Annotate(nil, "in span3")
span3.AddMessageReceiveEvent(0x456, 2048, 1536)
span3.SetStatus(trace.Status{Code: int32(codepb.Code_UNAVAILABLE)})
span3.End()
{
_, span4 := trace.StartSpan(ctx3, "span4")
x := 42
a1 := []trace.Attribute{trace.StringAttribute("k1", "v1")}
a2 := []trace.Attribute{trace.StringAttribute("k2", "v2")}
a3 := []trace.Attribute{trace.StringAttribute("k3", "v3")}
a4 := map[string]interface{}{"k4": "v4"}
r := big.NewRat(2, 4)
span4.Annotate(a1, r.String())
span4.Annotatef(a2, "foo %d", x)
span4.Annotate(a3, "in span4")
span4.AddLink(trace.Link{TraceID: trace.TraceID{1, 2}, SpanID: trace.SpanID{3}, Type: trace.LinkTypeParent, Attributes: a4})
span4.End()
}
}
span1.End()
}
span0.End()
if len(te.spans) != 5 {
t.Errorf("got %d exported spans, want 5", len(te.spans))
}
var spbs spans
for _, s := range te.spans {
spbs = append(spbs, protoFromSpanData(s, "testproject"))
}
sort.Sort(spbs)
for i, want := range []string{
spanID.String(),
spbs[0].SpanId,
spbs[1].SpanId,
spbs[1].SpanId,
spbs[3].SpanId,
} {
if got := spbs[i].ParentSpanId; got != want {
t.Errorf("span %d: got ParentSpanID %q want %q", i, got, want)
}
}
checkTime := func(ts **timestamppb.Timestamp) {
if *ts == nil {
t.Error("expected timestamp")
}
*ts = nil
}
for _, span := range spbs {
checkTime(&span.StartTime)
checkTime(&span.EndTime)
if span.TimeEvents != nil {
for _, te := range span.TimeEvents.TimeEvent {
checkTime(&te.Time)
}
}
if want := fmt.Sprintf("projects/testproject/traces/%s/spans/%s", traceID, span.SpanId); span.Name != want {
t.Errorf("got span name %q want %q", span.Name, want)
}
span.Name, span.SpanId, span.ParentSpanId = "", "", ""
}
expectedSpans := spans{
&tracepb.Span{
DisplayName: trunc("span0", 128),
SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: false},
Attributes: &tracepb.Span_Attributes{
AttributeMap: map[string]*tracepb.AttributeValue{
agentLabel: {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(internal.UserAgent, len(internal.UserAgent))}},
},
},
},
&tracepb.Span{
DisplayName: trunc("span1", 128),
SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: true},
Attributes: &tracepb.Span_Attributes{
AttributeMap: map[string]*tracepb.AttributeValue{
agentLabel: {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(internal.UserAgent, len(internal.UserAgent))}},
},
},
},
&tracepb.Span{
DisplayName: trunc("span2", 128),
Attributes: &tracepb.Span_Attributes{
AttributeMap: map[string]*tracepb.AttributeValue{
"key2": {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc("value2", 256)}},
"key1": {Value: &tracepb.AttributeValue_IntValue{IntValue: 100}},
agentLabel: {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(internal.UserAgent, len(internal.UserAgent))}},
},
},
TimeEvents: &tracepb.Span_TimeEvents{
TimeEvent: []*tracepb.Span_TimeEvent{
{
Value: &tracepb.Span_TimeEvent_Annotation_{
Annotation: &tracepb.Span_TimeEvent_Annotation{
Description: trunc("in span2", 256),
},
},
},
{
Value: &tracepb.Span_TimeEvent_Annotation_{
Annotation: &tracepb.Span_TimeEvent_Annotation{
Description: trunc("1/2", 256),
},
},
},
{
Value: &tracepb.Span_TimeEvent_MessageEvent_{
MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{
Type: tracepb.Span_TimeEvent_MessageEvent_SENT,
Id: 0x123,
UncompressedSizeBytes: 1024,
CompressedSizeBytes: 512,
},
},
},
},
},
SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: true},
},
&tracepb.Span{
DisplayName: trunc("span3", 128),
Attributes: &tracepb.Span_Attributes{
AttributeMap: map[string]*tracepb.AttributeValue{
agentLabel: {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(internal.UserAgent, len(internal.UserAgent))}},
},
},
TimeEvents: &tracepb.Span_TimeEvents{
TimeEvent: []*tracepb.Span_TimeEvent{
{
Value: &tracepb.Span_TimeEvent_Annotation_{
Annotation: &tracepb.Span_TimeEvent_Annotation{
Description: trunc("in span3", 256),
},
},
},
{
Value: &tracepb.Span_TimeEvent_MessageEvent_{
MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{
Type: tracepb.Span_TimeEvent_MessageEvent_RECEIVED,
Id: 0x456,
UncompressedSizeBytes: 2048,
CompressedSizeBytes: 1536,
},
},
},
},
},
Status: &statuspb.Status{
Code: 14,
},
SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: true},
},
&tracepb.Span{
DisplayName: trunc("span4", 128),
Attributes: &tracepb.Span_Attributes{
AttributeMap: map[string]*tracepb.AttributeValue{
agentLabel: {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(internal.UserAgent, len(internal.UserAgent))}},
},
},
TimeEvents: &tracepb.Span_TimeEvents{
TimeEvent: []*tracepb.Span_TimeEvent{
{
Value: &tracepb.Span_TimeEvent_Annotation_{
Annotation: &tracepb.Span_TimeEvent_Annotation{
Description: trunc("1/2", 256),
Attributes: &tracepb.Span_Attributes{
AttributeMap: map[string]*tracepb.AttributeValue{
"k1": {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc("v1", 256)}},
},
},
},
},
},
{
Value: &tracepb.Span_TimeEvent_Annotation_{
Annotation: &tracepb.Span_TimeEvent_Annotation{
Description: trunc("foo 42", 256),
Attributes: &tracepb.Span_Attributes{
AttributeMap: map[string]*tracepb.AttributeValue{
"k2": {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc("v2", 256)}},
},
},
},
},
},
{
Value: &tracepb.Span_TimeEvent_Annotation_{
Annotation: &tracepb.Span_TimeEvent_Annotation{
Description: trunc("in span4", 256),
Attributes: &tracepb.Span_Attributes{
AttributeMap: map[string]*tracepb.AttributeValue{
"k3": {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc("v3", 256)}},
},
},
},
},
},
},
},
Links: &tracepb.Span_Links{
Link: []*tracepb.Span_Link{
{
TraceId: "01020000000000000000000000000000",
SpanId: "0300000000000000",
Type: tracepb.Span_Link_PARENT_LINKED_SPAN,
Attributes: &tracepb.Span_Attributes{
AttributeMap: map[string]*tracepb.AttributeValue{
"k4": {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc("v4", 256)}},
},
},
},
},
},
SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: true},
},
}
if !reflect.DeepEqual(spbs, expectedSpans) {
var got, want []string
for _, s := range spbs {
got = append(got, proto.MarshalTextString(s))
}
for _, s := range expectedSpans {
want = append(want, proto.MarshalTextString(s))
}
t.Errorf("got spans:\n%s\nwant:\n%s", strings.Join(got, "\n"), strings.Join(want, "\n"))
}
}
func TestEnums(t *testing.T) {
for _, test := range []struct {
x trace.LinkType
y tracepb.Span_Link_Type
}{
{trace.LinkTypeUnspecified, tracepb.Span_Link_TYPE_UNSPECIFIED},
{trace.LinkTypeChild, tracepb.Span_Link_CHILD_LINKED_SPAN},
{trace.LinkTypeParent, tracepb.Span_Link_PARENT_LINKED_SPAN},
} {
if test.x != trace.LinkType(test.y) {
t.Errorf("got link type values %d and %d, want equal", test.x, test.y)
}
}
for _, test := range []struct {
x trace.MessageEventType
y tracepb.Span_TimeEvent_MessageEvent_Type
}{
{trace.MessageEventTypeUnspecified, tracepb.Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED},
{trace.MessageEventTypeSent, tracepb.Span_TimeEvent_MessageEvent_SENT},
{trace.MessageEventTypeRecv, tracepb.Span_TimeEvent_MessageEvent_RECEIVED},
} {
if test.x != trace.MessageEventType(test.y) {
t.Errorf("got network event type values %d and %d, want equal", test.x, test.y)
}
}
}
func BenchmarkProto(b *testing.B) {
sd := &trace.SpanData{
SpanContext: trace.SpanContext{
TraceID: traceID,
SpanID: spanID,
},
Name: "foo",
StartTime: time.Now().Add(-time.Second),
EndTime: time.Now(),
Attributes: map[string]interface{}{"foo": "bar"},
Annotations: []trace.Annotation{
{
Time: time.Now().Add(-time.Millisecond),
Message: "hello, world",
Attributes: map[string]interface{}{"foo": "bar"},
},
},
MessageEvents: []trace.MessageEvent{
{
Time: time.Now().Add(-time.Microsecond),
EventType: 1,
MessageID: 2,
UncompressedByteSize: 4,
CompressedByteSize: 3,
},
},
Status: trace.Status{
Code: 42,
Message: "failed",
},
HasRemoteParent: true,
}
var x int
for i := 0; i < b.N; i++ {
s := protoFromSpanData(sd, `testproject`)
x += len(s.Name)
}
if x == 0 {
fmt.Println(x)
}
}

View File

@@ -1,62 +0,0 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stackdriver
import (
"context"
"testing"
"time"
"go.opencensus.io/trace"
)
func TestBundling(t *testing.T) {
exporter := newTraceExporterWithClient(Options{
ProjectID: "fakeProjectID",
BundleDelayThreshold: time.Second / 10,
BundleCountThreshold: 10,
}, nil)
ch := make(chan []*trace.SpanData)
exporter.uploadFn = func(spans []*trace.SpanData) {
ch <- spans
}
trace.RegisterExporter(exporter)
for i := 0; i < 35; i++ {
_, span := trace.StartSpan(context.Background(), "span", trace.WithSampler(trace.AlwaysSample()))
span.End()
}
// Read the first three bundles.
<-ch
<-ch
<-ch
// Test that the fourth bundle isn't sent early.
select {
case <-ch:
t.Errorf("bundle sent too early")
case <-time.After(time.Second / 20):
<-ch
}
// Test that there aren't extra bundles.
select {
case <-ch:
t.Errorf("too many bundles sent")
case <-time.After(time.Second / 5):
}
}

27
vendor/go.opencensus.io/exporterutil/version.go generated vendored Normal file
View File

@@ -0,0 +1,27 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package exporterutil contains common utilities for exporter implementations.
//
// Deprecated: Don't use this package.
package exporterutil
import "go.opencensus.io"
// Version is the current release version of OpenCensus in use. It is made
// available for exporters to include in User-Agent-like metadata.
// Deprecated: Use opencensus.Version().
var Version = opencensus.Version()
// TODO(jbd): Remove this package at the next release.

88
vendor/go.opencensus.io/internal/check/version.go generated vendored Normal file
View File

@@ -0,0 +1,88 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Command version checks that the version string matches the latest Git tag.
// This is expected to pass only on the master branch.
package main
import (
"bytes"
"fmt"
"log"
"os"
"os/exec"
"sort"
"strconv"
"strings"
opencensus "go.opencensus.io"
)
func main() {
cmd := exec.Command("git", "tag")
var buf bytes.Buffer
cmd.Stdout = &buf
err := cmd.Run()
if err != nil {
log.Fatal(err)
}
var versions []version
for _, vStr := range strings.Split(buf.String(), "\n") {
if len(vStr) == 0 {
continue
}
versions = append(versions, parseVersion(vStr))
}
sort.Slice(versions, func(i, j int) bool {
return versionLess(versions[i], versions[j])
})
latest := versions[len(versions)-1]
codeVersion := parseVersion("v" + opencensus.Version())
if !versionLess(latest, codeVersion) {
fmt.Printf("exporter.Version is out of date with Git tags. Got %s; want something greater than %s\n", opencensus.Version(), latest)
os.Exit(1)
}
fmt.Printf("exporter.Version is up-to-date: %s\n", opencensus.Version())
}
type version [3]int
func versionLess(v1, v2 version) bool {
for c := 0; c < 3; c++ {
if diff := v1[c] - v2[c]; diff != 0 {
return diff < 0
}
}
return false
}
func parseVersion(vStr string) version {
split := strings.Split(vStr[1:], ".")
var (
v version
err error
)
for i := 0; i < 3; i++ {
v[i], err = strconv.Atoi(split[i])
if err != nil {
fmt.Printf("Unrecognized version tag %q: %s\n", vStr, err)
os.Exit(2)
}
}
return v
}
func (v version) String() string {
return fmt.Sprintf("%d.%d.%d", v[0], v[1], v[2])
}

View File

@@ -14,11 +14,16 @@
package internal // import "go.opencensus.io/internal"
import "time"
import (
"fmt"
"time"
"go.opencensus.io"
)
// UserAgent is the user agent to be added to the outgoing
// requests from the exporters.
const UserAgent = "opencensus-go [0.8.0]"
var UserAgent = fmt.Sprintf("opencensus-go [%s]", opencensus.Version())
// MonotonicEndTime returns the end time at present
// but offset from start, monotonically.

View File

@@ -29,7 +29,7 @@ import (
func statsExamples() {
ctx := context.Background()
videoSize := stats.Int64("my.org/video_size", "processed video size", "MB")
videoSize := stats.Int64("example.com/video_size", "processed video size", "MB")
// START aggs
distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32)
@@ -41,12 +41,12 @@ func statsExamples() {
// START view
if err := view.Register(&view.View{
Name: "my.org/video_size_distribution",
Name: "example.com/video_size_distribution",
Description: "distribution of processed video size over time",
Measure: videoSize,
Aggregation: view.Distribution(0, 1<<32, 2<<32, 3<<32),
}); err != nil {
log.Fatalf("Failed to subscribe to view: %v", err)
log.Fatalf("Failed to register view: %v", err)
}
// END view

View File

@@ -24,11 +24,11 @@ import (
func tagsExamples() {
ctx := context.Background()
osKey, err := tag.NewKey("my.org/keys/user-os")
osKey, err := tag.NewKey("example.com/keys/user-os")
if err != nil {
log.Fatal(err)
}
userIDKey, err := tag.NewKey("my.org/keys/user-id")
userIDKey, err := tag.NewKey("example.com/keys/user-id")
if err != nil {
log.Fatal(err)
}

View File

@@ -24,7 +24,9 @@ func traceExamples() {
ctx := context.Background()
// START startend
ctx, span := trace.StartSpan(ctx, "your choice of name")
ctx, span := trace.StartSpan(ctx, "cache.Get")
defer span.End()
// Do work to get from cache.
// END startend
}

View File

@@ -14,3 +14,8 @@
// Package opencensus contains Go support for OpenCensus.
package opencensus // import "go.opencensus.io"
// Version is the current release version of OpenCensus in use.
func Version() string {
return "0.15.0"
}

View File

@@ -31,6 +31,7 @@ type ClientHandler struct {
StartOptions trace.StartOptions
}
// HandleConn exists to satisfy gRPC stats.Handler.
func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) {
// no-op
}

View File

@@ -31,9 +31,9 @@ var (
ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds)
)
// Predefined views may be subscribed to collect data for the above measures.
// Predefined views may be registered to collect data for the above measures.
// As always, you may also define your own custom views over measures collected by this
// package. These are declared as a convenience only; none are subscribed by
// package. These are declared as a convenience only; none are registered by
// default.
var (
ClientSentBytesPerRPCView = &view.View{
@@ -91,15 +91,6 @@ var (
TagKeys: []tag.Key{KeyClientMethod},
Aggregation: DefaultMillisecondsDistribution,
}
// Deprecated: This view is going to be removed, if you need it please define it
// yourself.
ClientRequestCountView = &view.View{
Name: "Count of request messages per client RPC",
TagKeys: []tag.Key{KeyClientMethod},
Measure: ClientRoundtripLatency,
Aggregation: view.Count(),
}
)
// DefaultClientViews are the default client views provided by this package.

View File

@@ -23,7 +23,7 @@ import (
)
func ExampleClientHandler() {
// Subscribe views to collect data.
// Register views to collect data.
if err := view.Register(ocgrpc.DefaultClientViews...); err != nil {
log.Fatal(err)
}
@@ -38,7 +38,7 @@ func ExampleClientHandler() {
}
func ExampleServerHandler() {
// Subscribe to views to collect data.
// Register views to collect data.
if err := view.Register(ocgrpc.DefaultServerViews...); err != nil {
log.Fatal(err)
}

View File

@@ -15,6 +15,7 @@
package ocgrpc
import (
"sync"
"testing"
"time"
@@ -127,9 +128,12 @@ func TestServerHandler(t *testing.T) {
}
type traceExporter struct {
mu sync.Mutex
buffer []*trace.SpanData
}
func (e *traceExporter) ExportSpan(sd *trace.SpanData) {
e.mu.Lock()
e.buffer = append(e.buffer, sd)
e.mu.Unlock()
}

View File

@@ -34,9 +34,9 @@ var (
// mechanism to load these defaults from a common repository/config shared by
// all supported languages. Likely a serialized protobuf of these defaults.
// Predefined views may be subscribed to collect data for the above measures.
// Predefined views may be registered to collect data for the above measures.
// As always, you may also define your own custom views over measures collected by this
// package. These are declared as a convenience only; none are subscribed by
// package. These are declared as a convenience only; none are registered by
// default.
var (
ServerReceivedBytesPerRPCView = &view.View{

View File

@@ -56,10 +56,16 @@ var (
DefaultMessageCountDistribution = view.Distribution(0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536)
)
// Server tags are applied to the context used to process each RPC, as well as
// the measures at the end of each RPC.
var (
KeyServerMethod, _ = tag.NewKey("grpc_server_method")
KeyClientMethod, _ = tag.NewKey("grpc_client_method")
KeyServerStatus, _ = tag.NewKey("grpc_server_status")
)
// Client tags are applied to measures at the end of each RPC.
var (
KeyClientMethod, _ = tag.NewKey("grpc_client_method")
KeyClientStatus, _ = tag.NewKey("grpc_client_status")
)

View File

@@ -16,14 +16,18 @@ package ochttp
import (
"net/http"
"net/http/httptrace"
"go.opencensus.io/trace"
"go.opencensus.io/trace/propagation"
)
// Transport is an http.RoundTripper that instruments all outgoing requests with
// stats and tracing. The zero value is intended to be a useful default, but for
// now it's recommended that you explicitly set Propagation.
// OpenCensus stats and tracing.
//
// The zero value is intended to be a useful default, but for
// now it's recommended that you explicitly set Propagation, since the default
// for this may change.
type Transport struct {
// Base may be set to wrap another http.RoundTripper that does the actual
// requests. By default http.DefaultTransport is used.
@@ -43,17 +47,34 @@ type Transport struct {
// for spans started by this transport.
StartOptions trace.StartOptions
// NameFromRequest holds the function to use for generating the span name
// from the information found in the outgoing HTTP Request. By default the
// name equals the URL Path.
FormatSpanName func(*http.Request) string
// NewClientTrace may be set to a function allowing the current *trace.Span
// to be annotated with HTTP request event information emitted by the
// httptrace package.
NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace
// TODO: Implement tag propagation for HTTP.
}
// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request.
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
rt := t.base()
if isHealthEndpoint(req.URL.Path) {
return rt.RoundTrip(req)
}
// TODO: remove excessive nesting of http.RoundTrippers here.
format := t.Propagation
if format == nil {
format = defaultFormat
}
spanNameFormatter := t.FormatSpanName
if spanNameFormatter == nil {
spanNameFormatter = spanNameFromURL
}
rt = &traceTransport{
base: rt,
format: format,
@@ -61,6 +82,8 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
Sampler: t.StartOptions.Sampler,
SpanKind: trace.SpanKindClient,
},
formatSpanName: spanNameFormatter,
newClientTrace: t.NewClientTrace,
}
rt = statsTransport{base: rt}
return rt.RoundTrip(req)

View File

@@ -25,10 +25,11 @@ import (
"go.opencensus.io/trace/propagation"
)
// B3 headers that OpenCensus understands.
const (
traceIDHeader = "X-B3-TraceId"
spanIDHeader = "X-B3-SpanId"
sampledHeader = "X-B3-Sampled"
TraceIDHeader = "X-B3-TraceId"
SpanIDHeader = "X-B3-SpanId"
SampledHeader = "X-B3-Sampled"
)
// HTTPFormat implements propagation.HTTPFormat to propagate
@@ -45,15 +46,15 @@ var _ propagation.HTTPFormat = (*HTTPFormat)(nil)
// SpanContextFromRequest extracts a B3 span context from incoming requests.
func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
tid, ok := parseTraceID(req.Header.Get(traceIDHeader))
tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader))
if !ok {
return trace.SpanContext{}, false
}
sid, ok := parseSpanID(req.Header.Get(spanIDHeader))
sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader))
if !ok {
return trace.SpanContext{}, false
}
sampled, _ := parseSampled(req.Header.Get(sampledHeader))
sampled, _ := ParseSampled(req.Header.Get(SampledHeader))
return trace.SpanContext{
TraceID: tid,
SpanID: sid,
@@ -61,7 +62,8 @@ func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanCon
}, true
}
func parseTraceID(tid string) (trace.TraceID, bool) {
// ParseTraceID parses the value of the X-B3-TraceId header.
func ParseTraceID(tid string) (trace.TraceID, bool) {
if tid == "" {
return trace.TraceID{}, false
}
@@ -82,7 +84,8 @@ func parseTraceID(tid string) (trace.TraceID, bool) {
return traceID, true
}
func parseSpanID(sid string) (spanID trace.SpanID, ok bool) {
// ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers.
func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) {
if sid == "" {
return trace.SpanID{}, false
}
@@ -90,12 +93,13 @@ func parseSpanID(sid string) (spanID trace.SpanID, ok bool) {
if err != nil {
return trace.SpanID{}, false
}
start := (8 - len(b))
start := 8 - len(b)
copy(spanID[start:], b)
return spanID, true
}
func parseSampled(sampled string) (trace.TraceOptions, bool) {
// ParseSampled parses the value of the X-B3-Sampled header.
func ParseSampled(sampled string) (trace.TraceOptions, bool) {
switch sampled {
case "true", "1":
return trace.TraceOptions(1), true
@@ -106,8 +110,8 @@ func parseSampled(sampled string) (trace.TraceOptions, bool) {
// SpanContextToRequest modifies the given request to include B3 headers.
func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
req.Header.Set(traceIDHeader, hex.EncodeToString(sc.TraceID[:]))
req.Header.Set(spanIDHeader, hex.EncodeToString(sc.SpanID[:]))
req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:]))
req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:]))
var sampled string
if sc.IsSampled() {
@@ -115,5 +119,5 @@ func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Reques
} else {
sampled = "0"
}
req.Header.Set(sampledHeader, sampled)
req.Header.Set(SampledHeader, sampled)
}

View File

@@ -33,9 +33,9 @@ func TestHTTPFormat_FromRequest(t *testing.T) {
name: "128-bit trace ID + 64-bit span ID; sampled=1",
makeReq: func() *http.Request {
req, _ := http.NewRequest("GET", "http://example.com", nil)
req.Header.Set(traceIDHeader, "463ac35c9f6413ad48485a3953bb6124")
req.Header.Set(spanIDHeader, "0020000000000001")
req.Header.Set(sampledHeader, "1")
req.Header.Set(TraceIDHeader, "463ac35c9f6413ad48485a3953bb6124")
req.Header.Set(SpanIDHeader, "0020000000000001")
req.Header.Set(SampledHeader, "1")
return req
},
wantSc: trace.SpanContext{
@@ -49,9 +49,9 @@ func TestHTTPFormat_FromRequest(t *testing.T) {
name: "short trace ID + short span ID; sampled=1",
makeReq: func() *http.Request {
req, _ := http.NewRequest("GET", "http://example.com", nil)
req.Header.Set(traceIDHeader, "000102")
req.Header.Set(spanIDHeader, "000102")
req.Header.Set(sampledHeader, "1")
req.Header.Set(TraceIDHeader, "000102")
req.Header.Set(SpanIDHeader, "000102")
req.Header.Set(SampledHeader, "1")
return req
},
wantSc: trace.SpanContext{
@@ -65,9 +65,9 @@ func TestHTTPFormat_FromRequest(t *testing.T) {
name: "64-bit trace ID + 64-bit span ID; sampled=0",
makeReq: func() *http.Request {
req, _ := http.NewRequest("GET", "http://example.com", nil)
req.Header.Set(traceIDHeader, "0020000000000001")
req.Header.Set(spanIDHeader, "0020000000000001")
req.Header.Set(sampledHeader, "0")
req.Header.Set(TraceIDHeader, "0020000000000001")
req.Header.Set(SpanIDHeader, "0020000000000001")
req.Header.Set(SampledHeader, "0")
return req
},
wantSc: trace.SpanContext{
@@ -81,8 +81,8 @@ func TestHTTPFormat_FromRequest(t *testing.T) {
name: "128-bit trace ID + 64-bit span ID; no sampling header",
makeReq: func() *http.Request {
req, _ := http.NewRequest("GET", "http://example.com", nil)
req.Header.Set(traceIDHeader, "463ac35c9f6413ad48485a3953bb6124")
req.Header.Set(spanIDHeader, "0020000000000001")
req.Header.Set(TraceIDHeader, "463ac35c9f6413ad48485a3953bb6124")
req.Header.Set(SpanIDHeader, "0020000000000001")
return req
},
wantSc: trace.SpanContext{
@@ -96,8 +96,8 @@ func TestHTTPFormat_FromRequest(t *testing.T) {
name: "invalid trace ID + 64-bit span ID; no sampling header",
makeReq: func() *http.Request {
req, _ := http.NewRequest("GET", "http://example.com", nil)
req.Header.Set(traceIDHeader, "")
req.Header.Set(spanIDHeader, "0020000000000001")
req.Header.Set(TraceIDHeader, "")
req.Header.Set(SpanIDHeader, "0020000000000001")
return req
},
wantSc: trace.SpanContext{},
@@ -107,8 +107,8 @@ func TestHTTPFormat_FromRequest(t *testing.T) {
name: "128-bit trace ID; invalid span ID; no sampling header",
makeReq: func() *http.Request {
req, _ := http.NewRequest("GET", "http://example.com", nil)
req.Header.Set(traceIDHeader, "463ac35c9f6413ad48485a3953bb6124")
req.Header.Set(spanIDHeader, "")
req.Header.Set(TraceIDHeader, "463ac35c9f6413ad48485a3953bb6124")
req.Header.Set(SpanIDHeader, "")
return req
},
wantSc: trace.SpanContext{},
@@ -118,9 +118,9 @@ func TestHTTPFormat_FromRequest(t *testing.T) {
name: "128-bit trace ID + 64-bit span ID; sampled=true",
makeReq: func() *http.Request {
req, _ := http.NewRequest("GET", "http://example.com", nil)
req.Header.Set(traceIDHeader, "463ac35c9f6413ad48485a3953bb6124")
req.Header.Set(spanIDHeader, "0020000000000001")
req.Header.Set(sampledHeader, "true")
req.Header.Set(TraceIDHeader, "463ac35c9f6413ad48485a3953bb6124")
req.Header.Set(SpanIDHeader, "0020000000000001")
req.Header.Set(SampledHeader, "true")
return req
},
wantSc: trace.SpanContext{
@@ -134,9 +134,9 @@ func TestHTTPFormat_FromRequest(t *testing.T) {
name: "128-bit trace ID + 64-bit span ID; sampled=false",
makeReq: func() *http.Request {
req, _ := http.NewRequest("GET", "http://example.com", nil)
req.Header.Set(traceIDHeader, "463ac35c9f6413ad48485a3953bb6124")
req.Header.Set(spanIDHeader, "0020000000000001")
req.Header.Set(sampledHeader, "false")
req.Header.Set(TraceIDHeader, "463ac35c9f6413ad48485a3953bb6124")
req.Header.Set(SpanIDHeader, "0020000000000001")
req.Header.Set(SampledHeader, "false")
return req
},
wantSc: trace.SpanContext{

View File

@@ -29,7 +29,7 @@ import (
const (
supportedVersion = 0
maxVersion = 254
header = "Trace-Parent"
header = "traceparent"
)
var _ propagation.HTTPFormat = (*HTTPFormat)(nil)

View File

@@ -73,7 +73,7 @@ func TestHTTPFormat_FromRequest(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
req, _ := http.NewRequest("GET", "http://example.com", nil)
req.Header.Set("Trace-Parent", tt.header)
req.Header.Set("traceparent", tt.header)
gotSc, gotOk := f.SpanContextFromRequest(req)
if !reflect.DeepEqual(gotSc, tt.wantSc) {
@@ -106,7 +106,7 @@ func TestHTTPFormat_ToRequest(t *testing.T) {
req, _ := http.NewRequest("GET", "http://example.com", nil)
f.SpanContextToRequest(tt.sc, req)
h := req.Header.Get("Trace-Parent")
h := req.Header.Get("traceparent")
if got, want := h, tt.wantHeader; got != want {
t.Errorf("HTTPFormat.ToRequest() header = %v, want %v", got, want)
}

View File

@@ -15,10 +15,8 @@
package ochttp
import (
"bufio"
"context"
"errors"
"net"
"io"
"net/http"
"strconv"
"sync"
@@ -30,16 +28,19 @@ import (
"go.opencensus.io/trace/propagation"
)
// Handler is a http.Handler that is aware of the incoming request's span.
// Handler is an http.Handler wrapper to instrument your HTTP server with
// OpenCensus. It supports both stats and tracing.
//
// Tracing
//
// This handler is aware of the incoming request's span, reading it from request
// headers as configured using the Propagation field.
// The extracted span can be accessed from the incoming request's
// context.
//
// span := trace.FromContext(r.Context())
//
// The server span will be automatically ended at the end of ServeHTTP.
//
// Incoming propagation mechanism is determined by the given HTTP propagators.
type Handler struct {
// Propagation defines how traces are propagated. If unspecified,
// B3 propagation will be used.
@@ -60,6 +61,11 @@ type Handler struct {
// be added as a linked trace instead of being added as a parent of the
// current trace.
IsPublicEndpoint bool
// FormatSpanName holds the function to use for generating the span name
// from the information found in the incoming HTTP Request. By default the
// name equals the URL Path.
FormatSpanName func(*http.Request) string
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
@@ -76,7 +82,15 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) {
name := spanNameFromURL(r.URL)
if isHealthEndpoint(r.URL.Path) {
return r, func() {}
}
var name string
if h.FormatSpanName == nil {
name = spanNameFromURL(r)
} else {
name = h.FormatSpanName(r)
}
ctx := r.Context()
var span *trace.Span
sc, ok := h.extractSpanContext(r)
@@ -126,7 +140,7 @@ func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.Respo
track.reqSize = r.ContentLength
}
stats.Record(ctx, ServerRequestCount.M(1))
return track, track.end
return track.wrappedResponseWriter(), track.end
}
type trackingResponseWriter struct {
@@ -140,18 +154,8 @@ type trackingResponseWriter struct {
writer http.ResponseWriter
}
// Compile time assertion for ResponseWriter interface
var _ http.ResponseWriter = (*trackingResponseWriter)(nil)
var _ http.Hijacker = (*trackingResponseWriter)(nil)
var errHijackerUnimplemented = errors.New("ResponseWriter does not implement http.Hijacker")
func (t *trackingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
hj, ok := t.writer.(http.Hijacker)
if !ok {
return nil, nil, errHijackerUnimplemented
}
return hj.Hijack()
}
func (t *trackingResponseWriter) end() {
t.endOnce.Do(func() {
@@ -190,8 +194,231 @@ func (t *trackingResponseWriter) WriteHeader(statusCode int) {
t.statusLine = http.StatusText(t.statusCode)
}
func (t *trackingResponseWriter) Flush() {
if flusher, ok := t.writer.(http.Flusher); ok {
flusher.Flush()
// wrappedResponseWriter returns a wrapped version of the original
// ResponseWriter and only implements the same combination of additional
// interfaces as the original.
// This implementation is based on https://github.com/felixge/httpsnoop.
func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter {
var (
hj, i0 = t.writer.(http.Hijacker)
cn, i1 = t.writer.(http.CloseNotifier)
pu, i2 = t.writer.(http.Pusher)
fl, i3 = t.writer.(http.Flusher)
rf, i4 = t.writer.(io.ReaderFrom)
)
switch {
case !i0 && !i1 && !i2 && !i3 && !i4:
return struct {
http.ResponseWriter
}{t}
case !i0 && !i1 && !i2 && !i3 && i4:
return struct {
http.ResponseWriter
io.ReaderFrom
}{t, rf}
case !i0 && !i1 && !i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Flusher
}{t, fl}
case !i0 && !i1 && !i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Flusher
io.ReaderFrom
}{t, fl, rf}
case !i0 && !i1 && i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.Pusher
}{t, pu}
case !i0 && !i1 && i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.Pusher
io.ReaderFrom
}{t, pu, rf}
case !i0 && !i1 && i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Pusher
http.Flusher
}{t, pu, fl}
case !i0 && !i1 && i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Pusher
http.Flusher
io.ReaderFrom
}{t, pu, fl, rf}
case !i0 && i1 && !i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.CloseNotifier
}{t, cn}
case !i0 && i1 && !i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.CloseNotifier
io.ReaderFrom
}{t, cn, rf}
case !i0 && i1 && !i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.CloseNotifier
http.Flusher
}{t, cn, fl}
case !i0 && i1 && !i2 && i3 && i4:
return struct {
http.ResponseWriter
http.CloseNotifier
http.Flusher
io.ReaderFrom
}{t, cn, fl, rf}
case !i0 && i1 && i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.CloseNotifier
http.Pusher
}{t, cn, pu}
case !i0 && i1 && i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.CloseNotifier
http.Pusher
io.ReaderFrom
}{t, cn, pu, rf}
case !i0 && i1 && i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.CloseNotifier
http.Pusher
http.Flusher
}{t, cn, pu, fl}
case !i0 && i1 && i2 && i3 && i4:
return struct {
http.ResponseWriter
http.CloseNotifier
http.Pusher
http.Flusher
io.ReaderFrom
}{t, cn, pu, fl, rf}
case i0 && !i1 && !i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
}{t, hj}
case i0 && !i1 && !i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
io.ReaderFrom
}{t, hj, rf}
case i0 && !i1 && !i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.Flusher
}{t, hj, fl}
case i0 && !i1 && !i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.Flusher
io.ReaderFrom
}{t, hj, fl, rf}
case i0 && !i1 && i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.Pusher
}{t, hj, pu}
case i0 && !i1 && i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.Pusher
io.ReaderFrom
}{t, hj, pu, rf}
case i0 && !i1 && i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.Pusher
http.Flusher
}{t, hj, pu, fl}
case i0 && !i1 && i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.Pusher
http.Flusher
io.ReaderFrom
}{t, hj, pu, fl, rf}
case i0 && i1 && !i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
}{t, hj, cn}
case i0 && i1 && !i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
io.ReaderFrom
}{t, hj, cn, rf}
case i0 && i1 && !i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
http.Flusher
}{t, hj, cn, fl}
case i0 && i1 && !i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
http.Flusher
io.ReaderFrom
}{t, hj, cn, fl, rf}
case i0 && i1 && i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
http.Pusher
}{t, hj, cn, pu}
case i0 && i1 && i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
http.Pusher
io.ReaderFrom
}{t, hj, cn, pu, rf}
case i0 && i1 && i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
http.Pusher
http.Flusher
}{t, hj, cn, pu, fl}
case i0 && i1 && i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
http.Pusher
http.Flusher
io.ReaderFrom
}{t, hj, cn, pu, fl, rf}
default:
return struct {
http.ResponseWriter
}{t}
}
}

View File

@@ -14,6 +14,7 @@ import (
"strings"
"sync"
"testing"
"time"
"golang.org/x/net/http2"
@@ -138,32 +139,20 @@ func (trw *testResponseWriterHijacker) Hijack() (net.Conn, *bufio.ReadWriter, er
func TestUnitTestHandlerProxiesHijack(t *testing.T) {
tests := []struct {
w http.ResponseWriter
wantErr string
w http.ResponseWriter
hasHijack bool
}{
{httptest.NewRecorder(), "ResponseWriter does not implement http.Hijacker"},
{nil, "ResponseWriter does not implement http.Hijacker"},
{new(testResponseWriterHijacker), ""},
{httptest.NewRecorder(), false},
{nil, false},
{new(testResponseWriterHijacker), true},
}
for i, tt := range tests {
tw := &trackingResponseWriter{writer: tt.w}
conn, buf, err := tw.Hijack()
if tt.wantErr != "" {
if err == nil || !strings.Contains(err.Error(), tt.wantErr) {
t.Errorf("#%d got error (%v) want error substring (%q)", i, err, tt.wantErr)
}
if conn != nil {
t.Errorf("#%d inconsistent state got non-nil conn (%v)", i, conn)
}
if buf != nil {
t.Errorf("#%d inconsistent state got non-nil buf (%v)", i, buf)
}
continue
}
if err != nil {
t.Errorf("#%d got unexpected error %v", i, err)
w := tw.wrappedResponseWriter()
_, ttHijacker := w.(http.Hijacker)
if want, have := tt.hasHijack, ttHijacker; want != have {
t.Errorf("#%d Hijack got %t, want %t", i, have, want)
}
}
}
@@ -233,20 +222,28 @@ func TestHandlerProxiesHijack_HTTP1(t *testing.T) {
func TestHandlerProxiesHijack_HTTP2(t *testing.T) {
cst := httptest.NewUnstartedServer(&Handler{
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
conn, _, err := w.(http.Hijacker).Hijack()
if conn != nil {
data := fmt.Sprintf("Surprisingly got the Hijacker() Proto: %s", r.Proto)
fmt.Fprintf(conn, "%s 200\nContent-Length:%d\r\n\r\n%s", r.Proto, len(data), data)
conn.Close()
return
}
if _, ok := w.(http.Hijacker); ok {
conn, _, err := w.(http.Hijacker).Hijack()
if conn != nil {
data := fmt.Sprintf("Surprisingly got the Hijacker() Proto: %s", r.Proto)
fmt.Fprintf(conn, "%s 200\nContent-Length:%d\r\n\r\n%s", r.Proto, len(data), data)
conn.Close()
return
}
switch {
case err == nil:
fmt.Fprintf(w, "Unexpectedly did not encounter an error!")
default:
fmt.Fprintf(w, "Unexpected error: %v", err)
case strings.Contains(err.(error).Error(), "Hijack"):
switch {
case err == nil:
fmt.Fprintf(w, "Unexpectedly did not encounter an error!")
default:
fmt.Fprintf(w, "Unexpected error: %v", err)
case strings.Contains(err.(error).Error(), "Hijack"):
// Confirmed HTTP/2.0, let's stream to it
for i := 0; i < 5; i++ {
fmt.Fprintf(w, "%d\n", i)
w.(http.Flusher).Flush()
}
}
} else {
// Confirmed HTTP/2.0, let's stream to it
for i := 0; i < 5; i++ {
fmt.Fprintf(w, "%d\n", i)
@@ -299,8 +296,7 @@ func TestEnsureTrackingResponseWriterSetsStatusCode(t *testing.T) {
for _, tt := range tests {
t.Run(tt.want.Message, func(t *testing.T) {
span := trace.NewSpan("testing", nil, trace.StartOptions{Sampler: trace.AlwaysSample()})
ctx := trace.WithSpan(context.Background(), span)
ctx := context.Background()
prc, pwc := io.Pipe()
go func() {
pwc.Write([]byte("Foo"))
@@ -308,7 +304,13 @@ func TestEnsureTrackingResponseWriterSetsStatusCode(t *testing.T) {
}()
inRes := tt.res
inRes.Body = prc
tr := &traceTransport{base: &testResponseTransport{res: inRes}}
tr := &traceTransport{
base: &testResponseTransport{res: inRes},
formatSpanName: spanNameFromURL,
startOptions: trace.StartOptions{
Sampler: trace.AlwaysSample(),
},
}
req, err := http.NewRequest("POST", "https://example.org", bytes.NewReader([]byte("testing")))
if err != nil {
t.Fatalf("NewRequest error: %v", err)
@@ -351,3 +353,242 @@ var _ http.RoundTripper = (*testResponseTransport)(nil)
func (rb *testResponseTransport) RoundTrip(*http.Request) (*http.Response, error) {
return rb.res, nil
}
func TestHandlerImplementsHTTPPusher(t *testing.T) {
cst := setupAndStartServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
pusher, ok := w.(http.Pusher)
if !ok {
w.Write([]byte("false"))
return
}
err := pusher.Push("/static.css", &http.PushOptions{
Method: "GET",
Header: http.Header{"Accept-Encoding": r.Header["Accept-Encoding"]},
})
if err != nil && false {
// TODO: (@odeke-em) consult with Go stdlib for why trying
// to configure even an HTTP/2 server and HTTP/2 transport
// still return http.ErrNotSupported even without using ochttp.Handler.
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.Write([]byte("true"))
}), asHTTP2)
defer cst.Close()
tests := []struct {
rt http.RoundTripper
wantBody string
}{
{
rt: h1Transport(),
wantBody: "false",
},
{
rt: h2Transport(),
wantBody: "true",
},
{
rt: &Transport{Base: h1Transport()},
wantBody: "false",
},
{
rt: &Transport{Base: h2Transport()},
wantBody: "true",
},
}
for i, tt := range tests {
c := &http.Client{Transport: &Transport{Base: tt.rt}}
res, err := c.Get(cst.URL)
if err != nil {
t.Errorf("#%d: Unexpected error %v", i, err)
continue
}
body, _ := ioutil.ReadAll(res.Body)
_ = res.Body.Close()
if g, w := string(body), tt.wantBody; g != w {
t.Errorf("#%d: got = %q; want = %q", i, g, w)
}
}
}
const (
isNil = "isNil"
hang = "hang"
ended = "ended"
nonNotifier = "nonNotifier"
asHTTP1 = false
asHTTP2 = true
)
func setupAndStartServer(hf func(http.ResponseWriter, *http.Request), isHTTP2 bool) *httptest.Server {
cst := httptest.NewUnstartedServer(&Handler{
Handler: http.HandlerFunc(hf),
})
if isHTTP2 {
http2.ConfigureServer(cst.Config, new(http2.Server))
cst.TLS = cst.Config.TLSConfig
cst.StartTLS()
} else {
cst.Start()
}
return cst
}
func insecureTLS() *tls.Config { return &tls.Config{InsecureSkipVerify: true} }
func h1Transport() *http.Transport { return &http.Transport{TLSClientConfig: insecureTLS()} }
func h2Transport() *http.Transport {
tr := &http.Transport{TLSClientConfig: insecureTLS()}
http2.ConfigureTransport(tr)
return tr
}
type concurrentBuffer struct {
sync.RWMutex
bw *bytes.Buffer
}
func (cw *concurrentBuffer) Write(b []byte) (int, error) {
cw.Lock()
defer cw.Unlock()
return cw.bw.Write(b)
}
func (cw *concurrentBuffer) String() string {
cw.Lock()
defer cw.Unlock()
return cw.bw.String()
}
func handleCloseNotify(outLog io.Writer) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
cn, ok := w.(http.CloseNotifier)
if !ok {
fmt.Fprintln(outLog, nonNotifier)
return
}
ch := cn.CloseNotify()
if ch == nil {
fmt.Fprintln(outLog, isNil)
return
}
<-ch
fmt.Fprintln(outLog, ended)
})
}
func TestHandlerImplementsHTTPCloseNotify(t *testing.T) {
http1Log := &concurrentBuffer{bw: new(bytes.Buffer)}
http1Server := setupAndStartServer(handleCloseNotify(http1Log), asHTTP1)
http2Log := &concurrentBuffer{bw: new(bytes.Buffer)}
http2Server := setupAndStartServer(handleCloseNotify(http2Log), asHTTP2)
defer http1Server.Close()
defer http2Server.Close()
tests := []struct {
url string
want string
}{
{url: http1Server.URL, want: nonNotifier},
{url: http2Server.URL, want: ended},
}
transports := []struct {
name string
rt http.RoundTripper
}{
{name: "http2+ochttp", rt: &Transport{Base: h2Transport()}},
{name: "http1+ochttp", rt: &Transport{Base: h1Transport()}},
{name: "http1-ochttp", rt: h1Transport()},
{name: "http2-ochttp", rt: h2Transport()},
}
// Each transport invokes one of two server types, either HTTP/1 or HTTP/2
for _, trc := range transports {
// Try out all the transport combinations
for i, tt := range tests {
req, err := http.NewRequest("GET", tt.url, nil)
if err != nil {
t.Errorf("#%d: Unexpected error making request: %v", i, err)
continue
}
// Using a timeout to ensure that the request is cancelled and the server
// if its handler implements CloseNotify will see this as the client leaving.
ctx, cancel := context.WithTimeout(context.Background(), 80*time.Millisecond)
defer cancel()
req = req.WithContext(ctx)
client := &http.Client{Transport: trc.rt}
res, err := client.Do(req)
if err != nil && !strings.Contains(err.Error(), "context deadline exceeded") {
t.Errorf("#%d: %sClient Unexpected error %v", i, trc.name, err)
continue
}
if res != nil && res.Body != nil {
io.CopyN(ioutil.Discard, res.Body, 5)
_ = res.Body.Close()
}
}
}
// Wait for a couple of milliseconds for the GoAway frames to be properly propagated
<-time.After(150 * time.Millisecond)
wantHTTP1Log := strings.Repeat("ended\n", len(transports))
wantHTTP2Log := strings.Repeat("ended\n", len(transports))
if g, w := http1Log.String(), wantHTTP1Log; g != w {
t.Errorf("HTTP1Log got\n\t%q\nwant\n\t%q", g, w)
}
if g, w := http2Log.String(), wantHTTP2Log; g != w {
t.Errorf("HTTP2Log got\n\t%q\nwant\n\t%q", g, w)
}
}
func TestIgnoreHealthz(t *testing.T) {
var spans int
ts := httptest.NewServer(&Handler{
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
span := trace.FromContext(r.Context())
if span != nil {
spans++
}
fmt.Fprint(w, "ok")
}),
StartOptions: trace.StartOptions{
Sampler: trace.AlwaysSample(),
},
})
defer ts.Close()
client := &http.Client{}
for _, path := range []string{"/healthz", "/_ah/health"} {
resp, err := client.Get(ts.URL + path)
if err != nil {
t.Fatalf("Cannot GET %q: %v", path, err)
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatalf("Cannot read body for %q: %v", path, err)
}
if got, want := string(b), "ok"; got != want {
t.Fatalf("Body for %q = %q; want %q", path, got, want)
}
resp.Body.Close()
}
if spans > 0 {
t.Errorf("Got %v spans; want no spans", spans)
}
}

160
vendor/go.opencensus.io/plugin/ochttp/span_annotator.go generated vendored Normal file
View File

@@ -0,0 +1,160 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ochttp
import (
"crypto/tls"
"net/http"
"net/http/httptrace"
"strings"
"go.opencensus.io/trace"
)
type spanAnnotator struct {
sp *trace.Span
}
// NewSpanAnnotator returns a httptrace.ClientTrace which annotates all emitted
// httptrace events on the provided Span.
func NewSpanAnnotator(_ *http.Request, s *trace.Span) *httptrace.ClientTrace {
sa := spanAnnotator{sp: s}
return &httptrace.ClientTrace{
GetConn: sa.getConn,
GotConn: sa.gotConn,
PutIdleConn: sa.putIdleConn,
GotFirstResponseByte: sa.gotFirstResponseByte,
Got100Continue: sa.got100Continue,
DNSStart: sa.dnsStart,
DNSDone: sa.dnsDone,
ConnectStart: sa.connectStart,
ConnectDone: sa.connectDone,
TLSHandshakeStart: sa.tlsHandshakeStart,
TLSHandshakeDone: sa.tlsHandshakeDone,
WroteHeaders: sa.wroteHeaders,
Wait100Continue: sa.wait100Continue,
WroteRequest: sa.wroteRequest,
}
}
func (s spanAnnotator) getConn(hostPort string) {
attrs := []trace.Attribute{
trace.StringAttribute("httptrace.get_connection.host_port", hostPort),
}
s.sp.Annotate(attrs, "GetConn")
}
func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) {
attrs := []trace.Attribute{
trace.BoolAttribute("httptrace.got_connection.reused", info.Reused),
trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle),
}
if info.WasIdle {
attrs = append(attrs,
trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String()))
}
s.sp.Annotate(attrs, "GotConn")
}
// PutIdleConn implements a httptrace.ClientTrace hook
func (s spanAnnotator) putIdleConn(err error) {
var attrs []trace.Attribute
if err != nil {
attrs = append(attrs,
trace.StringAttribute("httptrace.put_idle_connection.error", err.Error()))
}
s.sp.Annotate(attrs, "PutIdleConn")
}
func (s spanAnnotator) gotFirstResponseByte() {
s.sp.Annotate(nil, "GotFirstResponseByte")
}
func (s spanAnnotator) got100Continue() {
s.sp.Annotate(nil, "Got100Continue")
}
func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) {
attrs := []trace.Attribute{
trace.StringAttribute("httptrace.dns_start.host", info.Host),
}
s.sp.Annotate(attrs, "DNSStart")
}
func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) {
var addrs []string
for _, addr := range info.Addrs {
addrs = append(addrs, addr.String())
}
attrs := []trace.Attribute{
trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")),
}
if info.Err != nil {
attrs = append(attrs,
trace.StringAttribute("httptrace.dns_done.error", info.Err.Error()))
}
s.sp.Annotate(attrs, "DNSDone")
}
func (s spanAnnotator) connectStart(network, addr string) {
attrs := []trace.Attribute{
trace.StringAttribute("httptrace.connect_start.network", network),
trace.StringAttribute("httptrace.connect_start.addr", addr),
}
s.sp.Annotate(attrs, "ConnectStart")
}
func (s spanAnnotator) connectDone(network, addr string, err error) {
attrs := []trace.Attribute{
trace.StringAttribute("httptrace.connect_done.network", network),
trace.StringAttribute("httptrace.connect_done.addr", addr),
}
if err != nil {
attrs = append(attrs,
trace.StringAttribute("httptrace.connect_done.error", err.Error()))
}
s.sp.Annotate(attrs, "ConnectDone")
}
func (s spanAnnotator) tlsHandshakeStart() {
s.sp.Annotate(nil, "TLSHandshakeStart")
}
func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) {
var attrs []trace.Attribute
if err != nil {
attrs = append(attrs,
trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error()))
}
s.sp.Annotate(attrs, "TLSHandshakeDone")
}
func (s spanAnnotator) wroteHeaders() {
s.sp.Annotate(nil, "WroteHeaders")
}
func (s spanAnnotator) wait100Continue() {
s.sp.Annotate(nil, "Wait100Continue")
}
func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) {
var attrs []trace.Attribute
if info.Err != nil {
attrs = append(attrs,
trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error()))
}
s.sp.Annotate(attrs, "WroteRequest")
}

View File

@@ -0,0 +1,104 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ochttp_test
import (
"errors"
"net/http"
"net/http/httptest"
"strings"
"sync"
"testing"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/trace"
)
func TestSpanAnnotator(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
resp.Write([]byte("Hello, world!"))
}))
defer server.Close()
recorder := &testExporter{}
trace.RegisterExporter(recorder)
tr := ochttp.Transport{NewClientTrace: ochttp.NewSpanAnnotator}
req, err := http.NewRequest("POST", server.URL, strings.NewReader("req-body"))
if err != nil {
t.Errorf("error creating request: %v", err)
}
resp, err := tr.RoundTrip(req)
if err != nil {
t.Errorf("response error: %v", err)
}
if err := resp.Body.Close(); err != nil {
t.Errorf("error closing response body: %v", err)
}
if got, want := resp.StatusCode, 200; got != want {
t.Errorf("resp.StatusCode=%d; want=%d", got, want)
}
if got, want := len(recorder.spans), 1; got != want {
t.Errorf("span count=%d; want=%d", got, want)
}
var annotations []string
for _, annotation := range recorder.spans[0].Annotations {
annotations = append(annotations, annotation.Message)
}
required := []string{
"GetConn", "GotConn", "GotFirstResponseByte", "ConnectStart",
"ConnectDone", "WroteHeaders", "WroteRequest",
}
if errs := requiredAnnotations(required, annotations); len(errs) > 0 {
for _, err := range errs {
t.Error(err)
}
}
}
type testExporter struct {
mu sync.Mutex
spans []*trace.SpanData
}
func (t *testExporter) ExportSpan(s *trace.SpanData) {
t.mu.Lock()
t.spans = append(t.spans, s)
t.mu.Unlock()
}
func requiredAnnotations(required []string, list []string) []error {
var errs []error
for _, item := range required {
var found bool
for _, v := range list {
if v == item {
found = true
}
}
if !found {
errs = append(errs, errors.New("missing expected annotation: "+item))
}
}
return errs
}

View File

@@ -41,6 +41,10 @@ var (
// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known.
var (
// Host is the value of the HTTP Host header.
//
// The value of this tag can be controlled by the HTTP client, so you need
// to watch out for potentially generating high-cardinality labels in your
// metrics backend if you use this tag in views.
Host, _ = tag.NewKey("http.host")
// StatusCode is the numeric HTTP response status code,
@@ -48,6 +52,10 @@ var (
StatusCode, _ = tag.NewKey("http.status")
// Path is the URL path (not including query string) in the request.
//
// The value of this tag can be controlled by the HTTP client, so you need
// to watch out for potentially generating high-cardinality labels in your
// metrics backend if you use this tag in views.
Path, _ = tag.NewKey("http.path")
// Method is the HTTP method of the request, capitalized (GET, POST, etc.).
@@ -61,7 +69,7 @@ var (
)
// Package ochttp provides some convenience views.
// You need to subscribe to the views for data to actually be collected.
// You need to register the views for data to actually be collected.
var (
ClientRequestCountView = &view.View{
Name: "opencensus.io/http/client/request_count",

View File

@@ -17,7 +17,7 @@ package ochttp
import (
"io"
"net/http"
"net/url"
"net/http/httptrace"
"go.opencensus.io/plugin/ochttp/propagation/b3"
"go.opencensus.io/trace"
@@ -39,9 +39,11 @@ const (
)
type traceTransport struct {
base http.RoundTripper
startOptions trace.StartOptions
format propagation.HTTPFormat
base http.RoundTripper
startOptions trace.StartOptions
format propagation.HTTPFormat
formatSpanName func(*http.Request) string
newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace
}
// TODO(jbd): Add message events for request and response size.
@@ -50,14 +52,19 @@ type traceTransport struct {
// The created span can follow a parent span, if a parent is presented in
// the request's context.
func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
name := spanNameFromURL(req.URL)
name := t.formatSpanName(req)
// TODO(jbd): Discuss whether we want to prefix
// outgoing requests with Sent.
_, span := trace.StartSpan(req.Context(), name,
ctx, span := trace.StartSpan(req.Context(), name,
trace.WithSampler(t.startOptions.Sampler),
trace.WithSpanKind(trace.SpanKindClient))
req = req.WithContext(trace.WithSpan(req.Context(), span))
if t.newClientTrace != nil {
req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span)))
} else {
req = req.WithContext(ctx)
}
if t.format != nil {
t.format.SpanContextToRequest(span.SpanContext(), req)
}
@@ -127,8 +134,8 @@ func (t *traceTransport) CancelRequest(req *http.Request) {
}
}
func spanNameFromURL(u *url.URL) string {
return u.Path
func spanNameFromURL(req *http.Request) string {
return req.URL.Path
}
func requestAttrs(r *http.Request) []trace.Attribute {
@@ -146,7 +153,7 @@ func responseAttrs(resp *http.Response) []trace.Attribute {
}
}
// HTTPStatusToTraceStatus converts the HTTP status code to a trace.Status that
// TraceStatus is a utility to convert the HTTP status code to a trace.Status that
// represents the outcome as closely as possible.
func TraceStatus(httpStatusCode int, statusLine string) trace.Status {
var code int32
@@ -197,3 +204,15 @@ var codeToStr = map[int32]string{
trace.StatusCodeDataLoss: `"DATA_LOSS"`,
trace.StatusCodeUnauthenticated: `"UNAUTHENTICATED"`,
}
func isHealthEndpoint(path string) bool {
// Health checking is pretty frequent and
// traces collected for health endpoints
// can be extremely noisy and expensive.
// Disable canonical health checking endpoints
// like /healthz and /_ah/health for now.
if path == "/healthz" || path == "/_ah/health" {
return true
}
return false
}

View File

@@ -25,7 +25,6 @@ import (
"log"
"net/http"
"net/http/httptest"
"net/url"
"reflect"
"strings"
"testing"
@@ -36,6 +35,14 @@ import (
"go.opencensus.io/trace"
)
type testExporter struct {
spans []*trace.SpanData
}
func (t *testExporter) ExportSpan(s *trace.SpanData) {
t.spans = append(t.spans, s)
}
type testTransport struct {
ch chan *http.Request
}
@@ -100,7 +107,7 @@ func TestTransport_RoundTrip(t *testing.T) {
req, _ := http.NewRequest("GET", "http://foo.com", nil)
if tt.parent != nil {
req = req.WithContext(trace.WithSpan(req.Context(), tt.parent))
req = req.WithContext(trace.NewContext(req.Context(), tt.parent))
}
rt.RoundTrip(req)
@@ -357,11 +364,67 @@ func TestSpanNameFromURL(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.u, func(t *testing.T) {
u, err := url.Parse(tt.u)
req, err := http.NewRequest("GET", tt.u, nil)
if err != nil {
t.Errorf("url.Parse() = %v", err)
t.Errorf("url issue = %v", err)
}
if got := spanNameFromURL(u); got != tt.want {
if got := spanNameFromURL(req); got != tt.want {
t.Errorf("spanNameFromURL() = %v, want %v", got, tt.want)
}
})
}
}
func TestFormatSpanName(t *testing.T) {
formatSpanName := func(r *http.Request) string {
return r.Method + " " + r.URL.Path
}
handler := &Handler{
Handler: http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
resp.Write([]byte("Hello, world!"))
}),
FormatSpanName: formatSpanName,
}
server := httptest.NewServer(handler)
defer server.Close()
client := &http.Client{
Transport: &Transport{FormatSpanName: formatSpanName},
}
tests := []struct {
u string
want string
}{
{
u: "/hello?q=a",
want: "GET /hello",
},
{
u: "/a/b?q=c",
want: "GET /a/b",
},
}
for _, tt := range tests {
t.Run(tt.u, func(t *testing.T) {
var te testExporter
trace.RegisterExporter(&te)
res, err := client.Get(server.URL + tt.u)
if err != nil {
t.Fatalf("error creating request: %v", err)
}
res.Body.Close()
trace.UnregisterExporter(&te)
if want, got := 2, len(te.spans); want != got {
t.Fatalf("got exported spans %#v, wanted two spans", te.spans)
}
if got := te.spans[0].Name; got != tt.want {
t.Errorf("spanNameFromURL() = %v, want %v", got, tt.want)
}
if got := te.spans[1].Name; got != tt.want {
t.Errorf("spanNameFromURL() = %v, want %v", got, tt.want)
}
})

View File

@@ -24,8 +24,10 @@ func ExampleRecord() {
ctx := context.Background()
// Measures are usually declared as package-private global variables.
openConns := stats.Int64("my.org/measure/openconns", "open connections", stats.UnitDimensionless)
openConns := stats.Int64("example.com/measure/openconns", "open connections", stats.UnitDimensionless)
// Instrumented packages call stats.Record() to record measuremens.
stats.Record(ctx, openConns.M(124)) // Record 124 open connections.
// Without any views or exporters registered, this statement has no observable effects.
}

View File

@@ -20,19 +20,31 @@ import (
"sync/atomic"
)
// Measure represents a type of metric to be tracked and recorded.
// For example, latency, request Mb/s, and response Mb/s are measures
// Measure represents a single numeric value to be tracked and recorded.
// For example, latency, request bytes, and response bytes could be measures
// to collect from a server.
//
// Each measure needs to be registered before being used.
// Measure constructors such as Int64 and
// Float64 automatically registers the measure
// by the given name.
// Each registered measure needs to be unique by name.
// Measures also have a description and a unit.
// Measures by themselves have no outside effects. In order to be exported,
// the measure needs to be used in a View. If no Views are defined over a
// measure, there is very little cost in recording it.
type Measure interface {
// Name returns the name of this measure.
//
// Measure names are globally unique (among all libraries linked into your program).
// We recommend prefixing the measure name with a domain name relevant to your
// project or application.
//
// Measure names are never sent over the wire or exported to backends.
// They are only used to create Views.
Name() string
// Description returns the human-readable description of this measure.
Description() string
// Unit returns the units for the values this measure takes on.
//
// Units are encoded according to the case-sensitive abbreviations from the
// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
Unit() string
}

View File

@@ -15,7 +15,7 @@
package stats
// Float64Measure is a measure of type float64.
// Float64Measure is a measure for float64 values.
type Float64Measure struct {
md *measureDescriptor
}
@@ -44,8 +44,10 @@ func (m *Float64Measure) M(v float64) Measurement {
return Measurement{m: m, v: v}
}
// Float64 creates a new measure of type Float64Measure.
// It never returns an error.
// Float64 creates a new measure for float64 values.
//
// See the documentation for interface Measure for more guidance on the
// parameters of this function.
func Float64(name, description, unit string) *Float64Measure {
mi := registerMeasureHandle(name, description, unit)
return &Float64Measure{mi}

View File

@@ -15,7 +15,7 @@
package stats
// Int64Measure is a measure of type int64.
// Int64Measure is a measure for int64 values.
type Int64Measure struct {
md *measureDescriptor
}
@@ -44,8 +44,10 @@ func (m *Int64Measure) M(v int64) Measurement {
return Measurement{m: m, v: float64(v)}
}
// Int64 creates a new measure of type Int64Measure.
// It never returns an error.
// Int64 creates a new measure for int64 values.
//
// See the documentation for interface Measure for more guidance on the
// parameters of this function.
func Int64(name, description, unit string) *Int64Measure {
mi := registerMeasureHandle(name, description, unit)
return &Int64Measure{mi}

View File

@@ -40,11 +40,12 @@ func (c *collector) addSample(s string, v float64) {
aggregator.addSample(v)
}
// collectRows returns a snapshot of the collected Row values.
func (c *collector) collectedRows(keys []tag.Key) []*Row {
var rows []*Row
rows := make([]*Row, 0, len(c.signatures))
for sig, aggregator := range c.signatures {
tags := decodeTags([]byte(sig), keys)
row := &Row{tags, aggregator}
row := &Row{Tags: tags, Data: aggregator.clone()}
rows = append(rows, row)
}
return rows

View File

@@ -23,11 +23,11 @@ import (
func Example() {
// Measures are usually declared and used by instrumented packages.
m := stats.Int64("my.org/measure/openconns", "open connections", stats.UnitDimensionless)
m := stats.Int64("example.com/measure/openconns", "open connections", stats.UnitDimensionless)
// Views are usually subscribed in your application main function.
// Views are usually registered in your application main function.
if err := view.Register(&view.View{
Name: "my.org/views/openconns",
Name: "example.com/views/openconns",
Description: "open connections",
Measure: m,
Aggregation: view.Distribution(0, 1000, 2000),
@@ -35,5 +35,5 @@ func Example() {
log.Fatal(err)
}
// Use stats.RegisterExporter to export collected data.
// Use view.RegisterExporter to export collected data.
}

View File

@@ -37,6 +37,8 @@ type Exporter interface {
// registered exporters. Once you no longer
// want data to be exported, invoke UnregisterExporter
// with the previously registered exporter.
//
// Binaries can register exporters, libraries shouldn't register exporters.
func RegisterExporter(e Exporter) {
exportersMu.Lock()
defer exportersMu.Unlock()

View File

@@ -29,7 +29,7 @@ import (
)
// View allows users to aggregate the recorded stats.Measurements.
// Views need to be passed to the Subscribe function to be before data will be
// Views need to be passed to the Register function to be before data will be
// collected and sent to Exporters.
type View struct {
Name string // Name of View. Must be unique. If unset, will default to the name of the Measure.
@@ -67,14 +67,14 @@ func (v *View) same(other *View) bool {
v.Measure.Name() == other.Measure.Name()
}
// canonicalized returns a validated View canonicalized by setting explicit
// canonicalize canonicalizes v by setting explicit
// defaults for Name and Description and sorting the TagKeys
func (v *View) canonicalize() error {
if v.Measure == nil {
return fmt.Errorf("cannot subscribe view %q: measure not set", v.Name)
return fmt.Errorf("cannot register view %q: measure not set", v.Name)
}
if v.Aggregation == nil {
return fmt.Errorf("cannot subscribe view %q: aggregation not set", v.Name)
return fmt.Errorf("cannot register view %q: aggregation not set", v.Name)
}
if v.Name == "" {
v.Name = v.Measure.Name()

View File

@@ -171,7 +171,7 @@ func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) {
}
ctx, err := tag.New(context.Background(), mods...)
if err != nil {
t.Errorf("%v: NewMap = %v", tc.label, err)
t.Errorf("%v: New = %v", tc.label, err)
}
view.addSample(tag.FromContext(ctx), r.f)
}
@@ -346,7 +346,7 @@ func TestViewSortedKeys(t *testing.T) {
Measure: m,
Aggregation: Sum(),
})
// Subscribe normalizes the view by sorting the tag keys, retrieve the normalized view
// Register normalizes the view by sorting the tag keys, retrieve the normalized view
v := Find("sort_keys")
want := []string{"a", "b", "c"}

View File

@@ -49,8 +49,8 @@ var defaultWorker *worker
var defaultReportingDuration = 10 * time.Second
// Find returns a subscribed view associated with this name.
// If no subscribed view is found, nil is returned.
// Find returns a registered view associated with this name.
// If no registered view is found, nil is returned.
func Find(name string) (v *View) {
req := &getViewByNameReq{
name: name,
@@ -62,7 +62,7 @@ func Find(name string) (v *View) {
}
// Register begins collecting data for the given views.
// Once a view is subscribed, it reports data to the registered exporters.
// Once a view is registered, it reports data to the registered exporters.
func Register(views ...*View) error {
for _, v := range views {
if err := v.canonicalize(); err != nil {
@@ -94,6 +94,8 @@ func Unregister(views ...*View) {
<-req.done
}
// RetrieveData gets a snapshot of the data collected for the the view registered
// with the given name. It is intended for testing only.
func RetrieveData(viewName string) ([]*Row, error) {
req := &retrieveDataReq{
now: time.Now(),
@@ -143,9 +145,7 @@ func (w *worker) start() {
for {
select {
case cmd := <-w.c:
if cmd != nil {
cmd.handleCommand(w)
}
cmd.handleCommand(w)
case <-w.timer.C:
w.reportUsage(time.Now())
case <-w.quit:
@@ -181,7 +181,7 @@ func (w *worker) tryRegisterView(v *View) (*viewInternal, error) {
}
if x, ok := w.views[vi.view.Name]; ok {
if !x.view.same(vi.view) {
return nil, fmt.Errorf("cannot subscribe view %q; a different view with the same name is already subscribed", v.Name)
return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name)
}
// the view is already registered so there is nothing to do and the
@@ -194,40 +194,30 @@ func (w *worker) tryRegisterView(v *View) (*viewInternal, error) {
return vi, nil
}
func (w *worker) reportUsage(now time.Time) {
for _, v := range w.views {
if !v.isSubscribed() {
continue
}
rows := v.collectedRows()
_, ok := w.startTimes[v]
if !ok {
w.startTimes[v] = now
}
// Make sure collector is never going
// to mutate the exported data.
rows = deepCopyRowData(rows)
viewData := &Data{
View: v.view,
Start: w.startTimes[v],
End: time.Now(),
Rows: rows,
}
exportersMu.Lock()
for e := range exporters {
e.ExportView(viewData)
}
exportersMu.Unlock()
func (w *worker) reportView(v *viewInternal, now time.Time) {
if !v.isSubscribed() {
return
}
rows := v.collectedRows()
_, ok := w.startTimes[v]
if !ok {
w.startTimes[v] = now
}
viewData := &Data{
View: v.view,
Start: w.startTimes[v],
End: time.Now(),
Rows: rows,
}
exportersMu.Lock()
for e := range exporters {
e.ExportView(viewData)
}
exportersMu.Unlock()
}
func deepCopyRowData(rows []*Row) []*Row {
newRows := make([]*Row, 0, len(rows))
for _, r := range rows {
newRows = append(newRows, &Row{
Data: r.Data.clone(),
Tags: r.Tags,
})
func (w *worker) reportUsage(now time.Time) {
for _, v := range w.views {
w.reportView(v, now)
}
return newRows
}

View File

@@ -73,7 +73,7 @@ func (cmd *registerViewReq) handleCommand(w *worker) {
}
}
// unregisterFromViewReq is the command to unsubscribe to a view. Has no
// unregisterFromViewReq is the command to unregister to a view. Has no
// impact on the data collection for client that are pulling data from the
// library.
type unregisterFromViewReq struct {
@@ -88,6 +88,9 @@ func (cmd *unregisterFromViewReq) handleCommand(w *worker) {
continue
}
// Report pending data for this view before removing it.
w.reportView(vi, time.Now())
vi.unsubscribe()
if !vi.isSubscribed() {
// this was the last subscription and view is not collecting anymore.
@@ -143,7 +146,7 @@ type recordReq struct {
func (cmd *recordReq) handleCommand(w *worker) {
for _, m := range cmd.ms {
if (m == stats.Measurement{}) { // not subscribed
if (m == stats.Measurement{}) { // not registered
continue
}
ref := w.getMeasureRef(m.Measure().Name())
@@ -154,7 +157,7 @@ func (cmd *recordReq) handleCommand(w *worker) {
}
// setReportingPeriodReq is the command to modify the duration between
// reporting the collected data to the subscribed clients.
// reporting the collected data to the registered clients.
type setReportingPeriodReq struct {
d time.Duration
c chan bool

View File

@@ -42,7 +42,7 @@ func Test_Worker_ViewRegistration(t *testing.T) {
}
tcs := []testCase{
{
"register and subscribe to v1ID",
"register v1ID",
[]registration{
{
sc1,
@@ -52,7 +52,7 @@ func Test_Worker_ViewRegistration(t *testing.T) {
},
},
{
"register v1ID+v2ID, susbsribe to v1ID",
"register v1ID+v2ID",
[]registration{
{
sc1,
@@ -62,7 +62,7 @@ func Test_Worker_ViewRegistration(t *testing.T) {
},
},
{
"register to v1ID; subscribe to v1ID and view with same ID",
"register to v1ID; ??? to v1ID and view with same ID",
[]registration{
{
sc1,
@@ -263,7 +263,7 @@ func TestReportUsage(t *testing.T) {
SetReportingPeriod(25 * time.Millisecond)
if err := Register(tt.view); err != nil {
t.Fatalf("%v: cannot subscribe: %v", tt.name, err)
t.Fatalf("%v: cannot register: %v", tt.name, err)
}
e := &countExporter{}
@@ -362,9 +362,45 @@ func TestWorkerStarttime(t *testing.T) {
e.Unlock()
}
func TestUnregisterReportsUsage(t *testing.T) {
restart()
ctx := context.Background()
m1 := stats.Int64("measure", "desc", "unit")
view1 := &View{Name: "count", Measure: m1, Aggregation: Count()}
m2 := stats.Int64("measure2", "desc", "unit")
view2 := &View{Name: "count2", Measure: m2, Aggregation: Count()}
SetReportingPeriod(time.Hour)
if err := Register(view1, view2); err != nil {
t.Fatalf("cannot register: %v", err)
}
e := &countExporter{}
RegisterExporter(e)
stats.Record(ctx, m1.M(1))
stats.Record(ctx, m2.M(1))
stats.Record(ctx, m2.M(1))
Unregister(view2)
// Unregister should only flush view2, so expect the count of 2.
want := int64(2)
e.Lock()
got := e.totalCount
e.Unlock()
if got != want {
t.Errorf("got count data = %v; want %v", got, want)
}
}
type countExporter struct {
sync.Mutex
count int64
count int64
totalCount int64
}
func (e *countExporter) ExportView(vd *Data) {
@@ -376,6 +412,7 @@ func (e *countExporter) ExportView(vd *Data) {
e.Lock()
defer e.Unlock()
e.count = d.Value
e.totalCount += d.Value
}
type vdExporter struct {

View File

@@ -30,7 +30,7 @@ var (
func ExampleNewKey() {
// Get a key to represent user OS.
key, err := tag.NewKey("my.org/keys/user-os")
key, err := tag.NewKey("example.com/keys/user-os")
if err != nil {
log.Fatal(err)
}
@@ -38,11 +38,11 @@ func ExampleNewKey() {
}
func ExampleNew() {
osKey, err := tag.NewKey("my.org/keys/user-os")
osKey, err := tag.NewKey("example.com/keys/user-os")
if err != nil {
log.Fatal(err)
}
userIDKey, err := tag.NewKey("my.org/keys/user-id")
userIDKey, err := tag.NewKey("example.com/keys/user-id")
if err != nil {
log.Fatal(err)
}

14
vendor/go.opencensus.io/tag/map.go generated vendored
View File

@@ -28,13 +28,13 @@ type Tag struct {
Value string
}
// Map is a map of tags. Use NewMap to build tag maps.
// Map is a map of tags. Use New to create a context containing
// a new Map.
type Map struct {
m map[Key]string
}
// Value returns the value for the key if a value
// for the key exists.
// Value returns the value for the key if a value for the key exists.
func (m *Map) Value(k Key) (string, bool) {
if m == nil {
return "", false
@@ -47,7 +47,7 @@ func (m *Map) String() string {
if m == nil {
return "nil"
}
var keys []Key
keys := make([]Key, 0, len(m.m))
for k := range m.m {
keys = append(keys, k)
}
@@ -83,8 +83,8 @@ func (m *Map) delete(k Key) {
delete(m.m, k)
}
func newMap(sizeHint int) *Map {
return &Map{m: make(map[Key]string, sizeHint)}
func newMap() *Map {
return &Map{m: make(map[Key]string)}
}
// Mutator modifies a tag map.
@@ -153,7 +153,7 @@ func Delete(k Key) Mutator {
// originated from the incoming context and modified
// with the provided mutators.
func New(ctx context.Context, mutator ...Mutator) (context.Context, error) {
m := newMap(0)
m := newMap()
orig := FromContext(ctx)
if orig != nil {
for k, v := range orig.m {

View File

@@ -176,46 +176,59 @@ func Encode(m *Map) []byte {
// Decode decodes the given []byte into a tag map.
func Decode(bytes []byte) (*Map, error) {
ts := newMap(0)
ts := newMap()
err := DecodeEach(bytes, ts.upsert)
if err != nil {
// no partial failures
return nil, err
}
return ts, nil
}
// DecodeEach decodes the given serialized tag map, calling handler for each
// tag key and value decoded.
func DecodeEach(bytes []byte, fn func(key Key, val string)) error {
eg := &encoderGRPC{
buf: bytes,
}
if len(eg.buf) == 0 {
return ts, nil
return nil
}
version := eg.readByte()
if version > tagsVersionID {
return nil, fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID)
return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID)
}
for !eg.readEnded() {
typ := keyType(eg.readByte())
if typ != keyTypeString {
return nil, fmt.Errorf("cannot decode: invalid key type: %q", typ)
return fmt.Errorf("cannot decode: invalid key type: %q", typ)
}
k, err := eg.readBytesWithVarintLen()
if err != nil {
return nil, err
return err
}
v, err := eg.readBytesWithVarintLen()
if err != nil {
return nil, err
return err
}
key, err := NewKey(string(k))
if err != nil {
return nil, err // no partial failures
return err
}
val := string(v)
if !checkValue(val) {
return nil, errInvalidValue // no partial failures
return errInvalidValue
}
fn(key, val)
if err != nil {
return err
}
ts.upsert(key, val)
}
return ts, nil
return nil
}

View File

@@ -80,7 +80,7 @@ func TestEncodeDecode(t *testing.T) {
}
ctx, err := New(context.Background(), mods...)
if err != nil {
t.Errorf("%v: NewMap = %v", tc.label, err)
t.Errorf("%v: New = %v", tc.label, err)
}
encoded := Encode(FromContext(ctx))

View File

@@ -33,7 +33,7 @@ func TestContext(t *testing.T) {
Insert(k2, "v2"),
)
got := FromContext(ctx)
want := newMap(2)
want := newMap()
want.insert(k1, "v1")
want.insert(k2, "v2")
@@ -51,7 +51,7 @@ func TestDo(t *testing.T) {
Insert(k2, "v2"),
)
got := FromContext(ctx)
want := newMap(2)
want := newMap()
want.insert(k1, "v1")
want.insert(k2, "v2")
Do(ctx, func(ctx context.Context) {
@@ -168,7 +168,7 @@ func TestNewMap(t *testing.T) {
}
}
func TestNewMapValidation(t *testing.T) {
func TestNewValidation(t *testing.T) {
tests := []struct {
err string
seed *Map
@@ -213,7 +213,7 @@ func TestNewMapValidation(t *testing.T) {
}
func makeTestTagMap(ids ...int) *Map {
m := newMap(len(ids))
m := newMap()
for _, v := range ids {
k, _ := NewKey(fmt.Sprintf("k%d", v))
m.m[k] = fmt.Sprintf("v%d", v)

View File

@@ -14,7 +14,10 @@
package trace
import "go.opencensus.io/trace/internal"
import (
"go.opencensus.io/trace/internal"
"sync"
)
// Config represents the global tracing configuration.
type Config struct {
@@ -25,16 +28,20 @@ type Config struct {
IDGenerator internal.IDGenerator
}
var configWriteMu sync.Mutex
// ApplyConfig applies changes to the global tracing configuration.
//
// Fields not provided in the given config are going to be preserved.
func ApplyConfig(cfg Config) {
c := config.Load().(*Config)
configWriteMu.Lock()
defer configWriteMu.Unlock()
c := *config.Load().(*Config)
if cfg.DefaultSampler != nil {
c.DefaultSampler = cfg.DefaultSampler
}
if cfg.IDGenerator != nil {
c.IDGenerator = cfg.IDGenerator
}
config.Store(c)
config.Store(&c)
}

View File

@@ -42,7 +42,7 @@ It is common to want to capture all the activity of a function call in a span. F
this to work, the function must take a context.Context as a parameter. Add these two
lines to the top of the function:
ctx, span := trace.StartSpan(ctx, "my.org/Run")
ctx, span := trace.StartSpan(ctx, "example.com/Run")
defer span.End()
StartSpan will create a new top-level span if the context

View File

@@ -16,6 +16,7 @@ package trace
import (
"sync"
"sync/atomic"
"time"
)
@@ -30,28 +31,43 @@ type Exporter interface {
ExportSpan(s *SpanData)
}
type exportersMap map[Exporter]struct{}
var (
exportersMu sync.Mutex
exporters map[Exporter]struct{}
exporterMu sync.Mutex
exporters atomic.Value
)
// RegisterExporter adds to the list of Exporters that will receive sampled
// trace spans.
//
// Binaries can register exporters, libraries shouldn't register exporters.
func RegisterExporter(e Exporter) {
exportersMu.Lock()
if exporters == nil {
exporters = make(map[Exporter]struct{})
exporterMu.Lock()
new := make(exportersMap)
if old, ok := exporters.Load().(exportersMap); ok {
for k, v := range old {
new[k] = v
}
}
exporters[e] = struct{}{}
exportersMu.Unlock()
new[e] = struct{}{}
exporters.Store(new)
exporterMu.Unlock()
}
// UnregisterExporter removes from the list of Exporters the Exporter that was
// registered with the given name.
func UnregisterExporter(e Exporter) {
exportersMu.Lock()
delete(exporters, e)
exportersMu.Unlock()
exporterMu.Lock()
new := make(exportersMap)
if old, ok := exporters.Load().(exportersMap); ok {
for k, v := range old {
new[k] = v
}
}
delete(new, e)
exporters.Store(new)
exporterMu.Unlock()
}
// SpanData contains all the information collected by a Span.

View File

@@ -98,13 +98,6 @@ func FromContext(ctx context.Context) *Span {
return s
}
// WithSpan returns a new context with the given Span attached.
//
// Deprecated: Use NewContext.
func WithSpan(parent context.Context, s *Span) context.Context {
return NewContext(parent, s)
}
// NewContext returns a new context with the given Span attached.
func NewContext(parent context.Context, s *Span) context.Context {
return context.WithValue(parent, contextKey{}, s)
@@ -154,6 +147,9 @@ func WithSampler(sampler Sampler) StartOption {
// StartSpan starts a new child span of the current span in the context. If
// there is no span in the context, creates a new trace and span.
//
// Returned context contains the newly created span. You can use it to
// propagate the returned span in process.
func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) {
var opts StartOptions
var parent SpanContext
@@ -174,6 +170,9 @@ func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Cont
//
// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is
// preferred for cases where the parent is propagated via an incoming request.
//
// Returned context contains the newly created span. You can use it to
// propagate the returned span in process.
func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) {
var opts StartOptions
for _, op := range o {
@@ -185,26 +184,6 @@ func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanCont
return NewContext(ctx, span), span
}
// NewSpan returns a new span.
//
// If parent is not nil, created span will be a child of the parent.
//
// Deprecated: Use StartSpan.
func NewSpan(name string, parent *Span, o StartOptions) *Span {
var parentSpanContext SpanContext
if parent != nil {
parentSpanContext = parent.SpanContext()
}
return startSpanInternal(name, parent != nil, parentSpanContext, false, o)
}
// NewSpanWithRemoteParent returns a new span with the given parent SpanContext.
//
// Deprecated: Use StartSpanWithRemoteParent.
func NewSpanWithRemoteParent(name string, parent SpanContext, o StartOptions) *Span {
return startSpanInternal(name, true, parent, true, o)
}
func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span {
span := &Span{}
span.spanContext = parent
@@ -269,19 +248,19 @@ func (s *Span) End() {
if s.executionTracerTaskEnd != nil {
s.executionTracerTaskEnd()
}
// TODO: optimize to avoid this call if sd won't be used.
sd := s.makeSpanData()
sd.EndTime = internal.MonotonicEndTime(sd.StartTime)
if s.spanStore != nil {
s.spanStore.finished(s, sd)
}
if s.spanContext.IsSampled() {
// TODO: consider holding exportersMu for less time.
exportersMu.Lock()
for e := range exporters {
e.ExportSpan(sd)
exp, _ := exporters.Load().(exportersMap)
mustExport := s.spanContext.IsSampled() && len(exp) > 0
if s.spanStore != nil || mustExport {
sd := s.makeSpanData()
sd.EndTime = internal.MonotonicEndTime(sd.StartTime)
if s.spanStore != nil {
s.spanStore.finished(s, sd)
}
if mustExport {
for e := range exp {
e.ExportSpan(sd)
}
}
exportersMu.Unlock()
}
})
}
@@ -310,6 +289,16 @@ func (s *Span) SpanContext() SpanContext {
return s.spanContext
}
// SetName sets the name of the span, if it is recording events.
func (s *Span) SetName(name string) {
if !s.IsRecordingEvents() {
return
}
s.mu.Lock()
s.data.Name = name
s.mu.Unlock()
}
// SetStatus sets the status of the span, if it is recording events.
func (s *Span) SetStatus(status Status) {
if !s.IsRecordingEvents() {
@@ -488,15 +477,11 @@ type defaultIDGenerator struct {
}
// NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
// mu should be held while this function is called.
func (gen *defaultIDGenerator) NewSpanID() [8]byte {
gen.Lock()
id := gen.nextSpanID
gen.nextSpanID += gen.spanIDInc
if gen.nextSpanID == 0 {
gen.nextSpanID += gen.spanIDInc
var id uint64
for id == 0 {
id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc)
}
gen.Unlock()
var sid [8]byte
binary.LittleEndian.PutUint64(sid[:], id)
return sid

View File

@@ -27,5 +27,6 @@ func startExecutionTracerTask(ctx context.Context, name string) (context.Context
// runtime/trace is not enabled.
return ctx, func() {}
}
return t.NewContext(ctx, name)
nctx, task := t.NewTask(ctx, name)
return nctx, task.End
}

View File

@@ -43,7 +43,7 @@ func TestStrings(t *testing.T) {
func TestFromContext(t *testing.T) {
want := &Span{}
ctx := WithSpan(context.Background(), want)
ctx := NewContext(context.Background(), want)
got := FromContext(ctx)
if got != want {
t.Errorf("got Span pointer %p want %p", got, want)
@@ -446,6 +446,57 @@ func TestMessageEvents(t *testing.T) {
}
}
func TestSetSpanName(t *testing.T) {
want := "SpanName-1"
span := startSpan(StartOptions{})
span.SetName(want)
got, err := endSpan(span)
if err != nil {
t.Fatal(err)
}
if got.Name != want {
t.Errorf("span.Name=%q; want %q", got.Name, want)
}
}
func TestSetSpanNameUnsampledSpan(t *testing.T) {
var nilSpanData *SpanData
span := startSpan(StartOptions{Sampler: NeverSample()})
span.SetName("NoopName")
if want, got := nilSpanData, span.data; want != got {
t.Errorf("span.data=%+v; want %+v", got, want)
}
}
func TestSetSpanNameAfterSpanEnd(t *testing.T) {
want := "SpanName-2"
span := startSpan(StartOptions{})
span.SetName(want)
got, err := endSpan(span)
if err != nil {
t.Fatal(err)
}
// updating name after span.End
span.SetName("NoopName")
// exported span should not be updated by previous call to SetName
if got.Name != want {
t.Errorf("span.Name=%q; want %q", got.Name, want)
}
// span should not be exported again
var te testExporter
RegisterExporter(&te)
span.End()
UnregisterExporter(&te)
if len(te.spans) != 0 {
t.Errorf("got exported spans %#v, wanted no spans", te.spans)
}
}
func TestSetSpanStatus(t *testing.T) {
span := startSpan(StartOptions{})
span.SetStatus(Status{Code: int32(1), Message: "request failed"})

View File

@@ -22,7 +22,7 @@ import (
)
func Example() {
// Both /debug/tracez and /debug/rpcz will be served.
http.Handle("/debug/", http.StripPrefix("/debug", zpages.Handler))
log.Fatal(http.ListenAndServe(":9999", nil))
// Both /debug/tracez and /debug/rpcz will be served on the default mux.
zpages.Handle(nil, "/debug")
log.Fatal(http.ListenAndServe("127.0.0.1:9999", nil))
}

View File

@@ -56,7 +56,7 @@ var (
}
)
func init() {
func registerRPCViews() {
views := make([]*view.View, 0, len(viewType))
for v := range viewType {
views = append(views, v)
@@ -261,7 +261,7 @@ func (s snapExporter) ExportView(vd *view.Data) {
// Update field of s corresponding to the view.
switch vd.View {
case ocgrpc.ClientCompletedRPCsView:
if _, ok := haveResetErrors[method]; ok {
if _, ok := haveResetErrors[method]; !ok {
haveResetErrors[method] = struct{}{}
s.ErrorsTotal = 0
}
@@ -288,7 +288,7 @@ func (s snapExporter) ExportView(vd *view.Data) {
// currently unused
case ocgrpc.ServerCompletedRPCsView:
if _, ok := haveResetErrors[method]; ok {
if _, ok := haveResetErrors[method]; !ok {
haveResetErrors[method] = struct{}{}
s.ErrorsTotal = 0
}

View File

@@ -75,10 +75,6 @@ var (
}
)
func init() {
internal.LocalSpanStoreEnabled = true
}
func canonicalCodeString(code int32) string {
if code < 0 || int(code) >= len(canonicalCodes) {
return "error code " + strconv.FormatInt(int64(code), 10)

View File

@@ -32,15 +32,39 @@ package zpages // import "go.opencensus.io/zpages"
import (
"net/http"
"path"
"sync"
"go.opencensus.io/internal"
)
// Handler is an http.Handler that serves the zpages.
// TODO(ramonza): Remove Handler to make initialization lazy.
// Handler is deprecated: Use Handle.
var Handler http.Handler
func init() {
zpagesMux := http.NewServeMux()
zpagesMux.HandleFunc("/rpcz", rpczHandler)
zpagesMux.HandleFunc("/tracez", tracezHandler)
zpagesMux.Handle("/public/", http.FileServer(fs))
Handler = zpagesMux
mux := http.NewServeMux()
Handle(mux, "/")
Handler = mux
}
// Handle adds the z-pages to the given ServeMux rooted at pathPrefix.
func Handle(mux *http.ServeMux, pathPrefix string) {
enable()
if mux == nil {
mux = http.DefaultServeMux
}
mux.HandleFunc(path.Join(pathPrefix, "rpcz"), rpczHandler)
mux.HandleFunc(path.Join(pathPrefix, "tracez"), tracezHandler)
mux.Handle(path.Join(pathPrefix, "public/"), http.FileServer(fs))
}
var enableOnce sync.Once
func enable() {
enableOnce.Do(func() {
internal.LocalSpanStoreEnabled = true
registerRPCViews()
})
}

View File

@@ -21,6 +21,10 @@ import (
"testing"
"time"
"fmt"
"net/http"
"net/http/httptest"
"go.opencensus.io/trace"
)
@@ -85,3 +89,41 @@ func TestTraceRows(t *testing.T) {
t.Errorf("writeTextTraces: got %q want %q\n", buf.String(), want)
}
}
func TestGetZPages(t *testing.T) {
mux := http.NewServeMux()
Handle(mux, "/debug")
server := httptest.NewServer(mux)
defer server.Close()
tests := []string{"/debug/rpcz", "/debug/tracez"}
for _, tt := range tests {
t.Run(fmt.Sprintf("GET %s", tt), func(t *testing.T) {
res, err := http.Get(server.URL + tt)
if err != nil {
t.Error(err)
return
}
if got, want := res.StatusCode, http.StatusOK; got != want {
t.Errorf("res.StatusCode = %d; want %d", got, want)
}
})
}
}
func TestGetZPages_default(t *testing.T) {
server := httptest.NewServer(Handler)
defer server.Close()
tests := []string{"/rpcz", "/tracez"}
for _, tt := range tests {
t.Run(fmt.Sprintf("GET %s", tt), func(t *testing.T) {
res, err := http.Get(server.URL + tt)
if err != nil {
t.Error(err)
return
}
if got, want := res.StatusCode, http.StatusOK; got != want {
t.Errorf("res.StatusCode = %d; want %d", got, want)
}
})
}
}