The opencensus API changes between 0.6.0 and 0.9.0 (#980)

We get some useful features in later versions; update so as to not
pin downstream consumers (extensions) to an older version.
This commit is contained in:
jan grant
2018-05-09 14:55:00 +01:00
committed by GitHub
parent feab2f0e0f
commit 91e58afa55
109 changed files with 2924 additions and 2344 deletions

8
Gopkg.lock generated
View File

@@ -5,6 +5,7 @@
name = "git.apache.org/thrift.git"
packages = ["lib/go/thrift"]
revision = "272470790ad6db791bd6f9db399b2cd2d5879f74"
source = "github.com/apache/thrift"
[[projects]]
branch = "master"
@@ -529,10 +530,11 @@
"stats/view",
"tag",
"trace",
"trace/internal",
"trace/propagation"
]
revision = "6e3f034057826b530038d93267906ec3c012183f"
version = "v0.6.0"
revision = "10cec2c05ea2cfb8b0d856711daedc49d8a45c56"
version = "v0.9.0"
[[projects]]
branch = "master"
@@ -664,6 +666,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "321ea984c523241adc23f36302d387cebbcc05a56812fc3555d82c9c5928274c"
inputs-digest = "5ff01d4a02d97ec5447f99d45f47e593bb94c4581f07baefad209f25d0b88785"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -72,7 +72,7 @@ ignored = ["github.com/fnproject/fn/cli"]
[[constraint]]
name = "go.opencensus.io"
version = "0.6.0"
version = "0.9.0"
[[override]]
name = "git.apache.org/thrift.git"

View File

@@ -19,7 +19,6 @@ import (
"github.com/sirupsen/logrus"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"go.opencensus.io/trace"
)
@@ -1013,7 +1012,9 @@ func (c *container) FsSize() uint64 { return c.fsSize }
// WriteStat publishes each metric in the specified Stats structure as a histogram metric
func (c *container) WriteStat(ctx context.Context, stat drivers.Stat) {
for key, value := range stat.Metrics {
stats.Record(ctx, stats.FindMeasure("docker_stats_"+key).(*stats.Int64Measure).M(int64(value)))
if m, ok := measures[key]; ok {
stats.Record(ctx, m.M(int64(value)))
}
}
c.statsMu.Lock()
@@ -1023,42 +1024,19 @@ func (c *container) WriteStat(ctx context.Context, stat drivers.Stat) {
c.statsMu.Unlock()
}
var measures map[string]*stats.Int64Measure
func init() {
// TODO this is nasty figure out how to use opencensus to not have to declare these
keys := []string{"net_rx", "net_tx", "mem_limit", "mem_usage", "disk_read", "disk_write", "cpu_user", "cpu_total", "cpu_kernel"}
// TODO necessary?
appKey, err := tag.NewKey("fn_appname")
if err != nil {
logrus.Fatal(err)
}
pathKey, err := tag.NewKey("fn_path")
if err != nil {
logrus.Fatal(err)
}
measures = make(map[string]*stats.Int64Measure)
for _, key := range keys {
units := "bytes"
if strings.Contains(key, "cpu") {
units = "cpu"
}
dockerStatsDist, err := stats.Int64("docker_stats_"+key, "docker container stats for "+key, units)
if err != nil {
logrus.Fatal(err)
}
v, err := view.New(
"docker_stats_"+key,
"docker container stats for "+key,
[]tag.Key{appKey, pathKey},
dockerStatsDist,
view.Distribution(),
)
if err != nil {
logrus.Fatalf("cannot create view: %v", err)
}
if err := v.Subscribe(); err != nil {
logrus.Fatal(err)
}
measures[key] = makeMeasure("docker_stats_"+key, "docker container stats for "+key, units, view.Distribution())
}
}

View File

@@ -102,101 +102,10 @@ type dockerWrap struct {
}
func init() {
// TODO doing this at each call site seems not the intention of the library since measurements
// need to be created and views registered. doing this up front seems painful but maybe there
// are benefits?
// TODO do we have to do this? the measurements will be tagged on the context, will they be propagated
// or we have to white list them in the view for them to show up? test...
var err error
appKey, err := tag.NewKey("fn_appname")
if err != nil {
logrus.Fatal(err)
}
pathKey, err := tag.NewKey("fn_path")
if err != nil {
logrus.Fatal(err)
}
{
dockerRetriesMeasure, err = stats.Int64("docker_api_retries", "docker api retries", "")
if err != nil {
logrus.Fatal(err)
}
v, err := view.New(
"docker_api_retries",
"number of times we've retried docker API upon failure",
[]tag.Key{appKey, pathKey},
dockerRetriesMeasure,
view.Sum(),
)
if err != nil {
logrus.Fatalf("cannot create view: %v", err)
}
if err := v.Subscribe(); err != nil {
logrus.Fatal(err)
}
}
{
dockerTimeoutMeasure, err = stats.Int64("docker_api_timeout", "docker api timeouts", "")
if err != nil {
logrus.Fatal(err)
}
v, err := view.New(
"docker_api_timeout_count",
"number of times we've timed out calling docker API",
[]tag.Key{appKey, pathKey},
dockerTimeoutMeasure,
view.Count(),
)
if err != nil {
logrus.Fatalf("cannot create view: %v", err)
}
if err := v.Subscribe(); err != nil {
logrus.Fatal(err)
}
}
{
dockerErrorMeasure, err = stats.Int64("docker_api_error", "docker api errors", "")
if err != nil {
logrus.Fatal(err)
}
v, err := view.New(
"docker_api_error_count",
"number of unrecoverable errors from docker API",
[]tag.Key{appKey, pathKey},
dockerErrorMeasure,
view.Count(),
)
if err != nil {
logrus.Fatalf("cannot create view: %v", err)
}
if err := v.Subscribe(); err != nil {
logrus.Fatal(err)
}
}
{
dockerOOMMeasure, err = stats.Int64("docker_oom", "docker oom", "")
if err != nil {
logrus.Fatal(err)
}
v, err := view.New(
"docker_oom_count",
"number of docker container oom",
[]tag.Key{appKey, pathKey},
dockerOOMMeasure,
view.Count(),
)
if err != nil {
logrus.Fatalf("cannot create view: %v", err)
}
if err := v.Subscribe(); err != nil {
logrus.Fatal(err)
}
}
dockerRetriesMeasure = makeMeasure("docker_api_retries", "docker api retries", "", view.Sum())
dockerTimeoutMeasure = makeMeasure("docker_api_timeout", "docker api timeouts", "", view.Count())
dockerErrorMeasure = makeMeasure("docker_api_error", "docker api errors", "", view.Count())
dockerOOMMeasure = makeMeasure("docker_oom", "docker oom", "", view.Count())
}
var (
@@ -447,3 +356,29 @@ func (d *dockerWrap) Stats(opts docker.StatsOptions) (err error) {
//})
//return err
}
func makeMeasure(name string, desc string, unit string, agg *view.Aggregation) *stats.Int64Measure {
appKey, err := tag.NewKey("fn_appname")
if err != nil {
logrus.Fatal(err)
}
pathKey, err := tag.NewKey("fn_path")
if err != nil {
logrus.Fatal(err)
}
measure := stats.Int64(name, desc, unit)
err = view.Register(
&view.View{
Name: name,
Description: desc,
TagKeys: []tag.Key{appKey, pathKey},
Measure: measure,
Aggregation: agg,
},
)
if err != nil {
logrus.WithError(err).Fatal("cannot create view")
}
return measure
}

View File

@@ -5,10 +5,8 @@ import (
"sync"
"time"
"github.com/sirupsen/logrus"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
)
type RequestStateType int
@@ -140,76 +138,44 @@ func (c *containerState) UpdateState(ctx context.Context, newState ContainerStat
// update old state stats
gaugeKey := containerGaugeKeys[oldState]
if gaugeKey != "" {
stats.Record(ctx, stats.FindMeasure(gaugeKey).(*stats.Int64Measure).M(-1))
stats.Record(ctx, containerGaugeMeasures[oldState].M(-1))
}
timeKey := containerTimeKeys[oldState]
if timeKey != "" {
stats.Record(ctx, stats.FindMeasure(timeKey).(*stats.Int64Measure).M(int64(now.Sub(before).Round(time.Millisecond))))
stats.Record(ctx, containerTimeMeasures[oldState].M(int64(now.Sub(before).Round(time.Millisecond))))
}
// update new state stats
gaugeKey = containerGaugeKeys[newState]
if gaugeKey != "" {
stats.Record(ctx, stats.FindMeasure(gaugeKey).(*stats.Int64Measure).M(1))
stats.Record(ctx, containerGaugeMeasures[newState].M(1))
}
}
var (
containerGaugeMeasures []*stats.Int64Measure
containerTimeMeasures []*stats.Int64Measure
)
func init() {
// TODO(reed): do we have to do this? the measurements will be tagged on the context, will they be propagated
// or we have to white list them in the view for them to show up? test...
appKey, err := tag.NewKey("fn_appname")
if err != nil {
logrus.Fatal(err)
}
pathKey, err := tag.NewKey("fn_path")
if err != nil {
logrus.Fatal(err)
}
for _, key := range containerGaugeKeys {
containerGaugeMeasures = make([]*stats.Int64Measure, len(containerGaugeKeys))
for i, key := range containerGaugeKeys {
if key == "" { // leave nil intentionally, let it panic
continue
}
measure, err := stats.Int64(key, "containers in state "+key, "")
if err != nil {
logrus.Fatal(err)
}
v, err := view.New(
key,
"containers in state "+key,
[]tag.Key{appKey, pathKey},
measure,
view.Count(),
)
if err != nil {
logrus.Fatalf("cannot create view: %v", err)
}
if err := v.Subscribe(); err != nil {
logrus.Fatal(err)
}
containerGaugeMeasures[i] = makeMeasure(key, "containers in state "+key, "", view.Count())
}
for _, key := range containerTimeKeys {
containerTimeMeasures = make([]*stats.Int64Measure, len(containerTimeKeys))
for i, key := range containerTimeKeys {
if key == "" {
continue
}
measure, err := stats.Int64(key, "time spent in container state "+key, "ms")
if err != nil {
logrus.Fatal(err)
}
v, err := view.New(
key,
"time spent in container state "+key,
[]tag.Key{appKey, pathKey},
measure,
view.Distribution(),
)
if err != nil {
logrus.Fatalf("cannot create view: %v", err)
}
if err := v.Subscribe(); err != nil {
logrus.Fatal(err)
}
containerTimeMeasures[i] = makeMeasure(key, "time spent in container state "+key, "ms", view.Distribution())
}
}

View File

@@ -79,13 +79,17 @@ var (
)
func init() {
// TODO(reed): doing this at each call site seems not the intention of the library since measurements
// need to be created and views registered. doing this up front seems painful but maybe there
// are benefits?
queuedMeasure = makeMeasure(queuedMetricName, "calls currently queued against agent", "", view.Sum())
callsMeasure = makeMeasure(callsMetricName, "calls created in agent", "", view.Sum())
runningMeasure = makeMeasure(runningMetricName, "calls currently running in agent", "", view.Sum())
completedMeasure = makeMeasure(completedMetricName, "calls completed in agent", "", view.Sum())
failedMeasure = makeMeasure(failedMetricName, "calls failed in agent", "", view.Sum())
timedoutMeasure = makeMeasure(timedoutMetricName, "calls timed out in agent", "", view.Sum())
errorsMeasure = makeMeasure(errorsMetricName, "calls errored in agent", "", view.Sum())
serverBusyMeasure = makeMeasure(serverBusyMetricName, "calls where server was too busy in agent", "", view.Sum())
}
// TODO(reed): do we have to do this? the measurements will be tagged on the context, will they be propagated
// or we have to white list them in the view for them to show up? test...
var err error
func makeMeasure(name string, desc string, unit string, agg *view.Aggregation) *stats.Int64Measure {
appKey, err := tag.NewKey("fn_appname")
if err != nil {
logrus.Fatal(err)
@@ -95,163 +99,18 @@ func init() {
logrus.Fatal(err)
}
{
queuedMeasure, err = stats.Int64(queuedMetricName, "calls currently queued against agent", "")
if err != nil {
logrus.Fatal(err)
}
v, err := view.New(
queuedMetricName,
"calls currently queued to agent",
[]tag.Key{appKey, pathKey},
queuedMeasure,
view.Sum(),
measure := stats.Int64(name, desc, unit)
err = view.Register(
&view.View{
Name: name,
Description: desc,
TagKeys: []tag.Key{appKey, pathKey},
Measure: measure,
Aggregation: agg,
},
)
if err != nil {
logrus.Fatalf("cannot create view: %v", err)
}
if err := v.Subscribe(); err != nil {
logrus.Fatal(err)
}
}
{
callsMeasure, err = stats.Int64(callsMetricName, "calls created in agent", "")
if err != nil {
logrus.Fatal(err)
}
v, err := view.New(
callsMetricName,
"calls created in agent",
[]tag.Key{appKey, pathKey},
callsMeasure,
view.Sum(),
)
if err != nil {
logrus.Fatalf("cannot create view: %v", err)
}
if err := v.Subscribe(); err != nil {
logrus.Fatal(err)
}
}
{
runningMeasure, err = stats.Int64(runningMetricName, "calls currently running in agent", "")
if err != nil {
logrus.Fatal(err)
}
v, err := view.New(
runningMetricName,
"calls currently running in agent",
[]tag.Key{appKey, pathKey},
runningMeasure,
view.Sum(),
)
if err != nil {
logrus.Fatalf("cannot create view: %v", err)
}
if err := v.Subscribe(); err != nil {
logrus.Fatal(err)
}
}
{
completedMeasure, err = stats.Int64(completedMetricName, "calls completed in agent", "")
if err != nil {
logrus.Fatal(err)
}
v, err := view.New(
completedMetricName,
"calls completed in agent",
[]tag.Key{appKey, pathKey},
completedMeasure,
view.Sum(),
)
if err != nil {
logrus.Fatalf("cannot create view: %v", err)
}
if err := v.Subscribe(); err != nil {
logrus.Fatal(err)
}
}
{
failedMeasure, err = stats.Int64(failedMetricName, "calls failed in agent", "")
if err != nil {
logrus.Fatal(err)
}
v, err := view.New(
failedMetricName,
"calls failed in agent",
[]tag.Key{appKey, pathKey},
failedMeasure,
view.Sum(),
)
if err != nil {
logrus.Fatalf("cannot create view: %v", err)
}
if err := v.Subscribe(); err != nil {
logrus.Fatal(err)
}
}
{
timedoutMeasure, err = stats.Int64(timedoutMetricName, "calls timed out in agent", "")
if err != nil {
logrus.Fatal(err)
}
v, err := view.New(
timedoutMetricName,
"calls timed out in agent",
[]tag.Key{appKey, pathKey},
timedoutMeasure,
view.Sum(),
)
if err != nil {
logrus.Fatalf("cannot create view: %v", err)
}
if err := v.Subscribe(); err != nil {
logrus.Fatal(err)
}
}
{
errorsMeasure, err = stats.Int64(errorsMetricName, "calls errored in agent", "")
if err != nil {
logrus.Fatal(err)
}
v, err := view.New(
errorsMetricName,
"calls errored in agent",
[]tag.Key{appKey, pathKey},
errorsMeasure,
view.Sum(),
)
if err != nil {
logrus.Fatalf("cannot create view: %v", err)
}
if err := v.Subscribe(); err != nil {
logrus.Fatal(err)
}
}
{
serverBusyMeasure, err = stats.Int64(serverBusyMetricName, "calls where server was too busy in agent", "")
if err != nil {
logrus.Fatal(err)
}
v, err := view.New(
serverBusyMetricName,
"calls where server was too busy in agent",
[]tag.Key{appKey, pathKey},
serverBusyMeasure,
view.Sum(),
)
if err != nil {
logrus.Fatalf("cannot create view: %v", err)
}
if err := v.Subscribe(); err != nil {
logrus.Fatal(err)
}
logrus.WithError(err).Fatal("cannot create view")
}
return measure
}

View File

@@ -429,42 +429,34 @@ func init() {
}
{
uploadSizeMeasure, err = stats.Int64("s3_log_upload_size", "uploaded log size", "byte")
if err != nil {
logrus.Fatal(err)
}
v, err := view.New(
"s3_log_upload_size",
"uploaded log size",
[]tag.Key{appKey, pathKey},
uploadSizeMeasure,
view.Distribution(),
uploadSizeMeasure = stats.Int64("s3_log_upload_size", "uploaded log size", "byte")
err = view.Register(
&view.View{
Name: "s3_log_upload_size",
Description: "uploaded log size",
TagKeys: []tag.Key{appKey, pathKey},
Measure: uploadSizeMeasure,
Aggregation: view.Distribution(),
},
)
if err != nil {
logrus.Fatalf("cannot create view: %v", err)
}
if err := v.Subscribe(); err != nil {
logrus.Fatal(err)
logrus.WithError(err).Fatal("cannot create view")
}
}
{
downloadSizeMeasure, err = stats.Int64("s3_log_download_size", "downloaded log size", "byte")
if err != nil {
logrus.Fatal(err)
}
v, err := view.New(
"s3_log_download_size",
"downloaded log size",
[]tag.Key{appKey, pathKey},
downloadSizeMeasure,
view.Distribution(),
downloadSizeMeasure = stats.Int64("s3_log_download_size", "downloaded log size", "byte")
err = view.Register(
&view.View{
Name: "s3_log_download_size",
Description: "downloaded log size",
TagKeys: []tag.Key{appKey, pathKey},
Measure: uploadSizeMeasure,
Aggregation: view.Distribution(),
},
)
if err != nil {
logrus.Fatalf("cannot create view: %v", err)
}
if err := v.Subscribe(); err != nil {
logrus.Fatal(err)
logrus.WithError(err).Fatal("cannot create view")
}
}
}

View File

@@ -576,7 +576,7 @@ func WithJaeger(jaegerURL string) ServerOption {
logrus.WithFields(logrus.Fields{"url": jaegerURL}).Info("exporting spans to jaeger")
// TODO don't do this. testing parity.
trace.SetDefaultSampler(trace.AlwaysSample())
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
return nil
}
}
@@ -595,7 +595,7 @@ func WithZipkin(zipkinURL string) ServerOption {
logrus.WithFields(logrus.Fields{"url": zipkinURL}).Info("exporting spans to zipkin")
// TODO don't do this. testing parity.
trace.SetDefaultSampler(trace.AlwaysSample())
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
return nil
}
}

1
vendor/go.opencensus.io/.gitignore generated vendored
View File

@@ -2,4 +2,3 @@
# go.opencensus.io/exporter/aws
/exporter/aws/

View File

@@ -13,9 +13,14 @@ notifications:
before_script:
- GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any
- PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any
- curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh # Install latest dep release
- go get github.com/rakyll/embedmd
script:
- if [ -n "$(gofmt -s -l .)" ]; then echo "gofmt the following files:"; gofmt -s -l .; exit 1; fi
- embedmd -d README.md # Ensure embedded code is up-to-date
- dep ensure -v
- go build ./... # Ensure dependency updates don't break build
- if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi
- go vet ./...
- go test -v -race $PKGS # Run all the tests with the race detector enabled
- 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi'

247
vendor/go.opencensus.io/Gopkg.lock generated vendored Normal file
View File

@@ -0,0 +1,247 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "cloud.google.com/go"
packages = [
"compute/metadata",
"internal/version",
"monitoring/apiv3",
"trace/apiv2"
]
revision = "29f476ffa9c4cd4fd14336b6043090ac1ad76733"
version = "v0.21.0"
[[projects]]
branch = "master"
name = "git.apache.org/thrift.git"
packages = ["lib/go/thrift"]
revision = "606f1ef31447526b908244933d5b716397a6bad8"
source = "github.com/apache/thrift"
[[projects]]
branch = "master"
name = "github.com/beorn7/perks"
packages = ["quantile"]
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
name = "github.com/golang/protobuf"
packages = [
"proto",
"protoc-gen-go/descriptor",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/empty",
"ptypes/timestamp",
"ptypes/wrappers"
]
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
version = "v1.0.0"
[[projects]]
name = "github.com/googleapis/gax-go"
packages = ["."]
revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f"
version = "v2.0.0"
[[projects]]
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
version = "v1.0.0"
[[projects]]
name = "github.com/openzipkin/zipkin-go"
packages = [
".",
"idgenerator",
"model",
"propagation",
"reporter",
"reporter/http"
]
revision = "f197ec29e729f226d23370ea60f0e49b8f44ccf4"
version = "v0.1.0"
[[projects]]
name = "github.com/prometheus/client_golang"
packages = [
"prometheus",
"prometheus/promhttp"
]
revision = "c5b7fccd204277076155f10851dad72b76a49317"
version = "v0.8.0"
[[projects]]
branch = "master"
name = "github.com/prometheus/client_model"
packages = ["go"]
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
[[projects]]
branch = "master"
name = "github.com/prometheus/common"
packages = [
"expfmt",
"internal/bitbucket.org/ww/goautoneg",
"model"
]
revision = "d0f7cd64bda49e08b22ae8a730aa57aa0db125d6"
[[projects]]
branch = "master"
name = "github.com/prometheus/procfs"
packages = [
".",
"internal/util",
"nfs",
"xfs"
]
revision = "8b1c2da0d56deffdbb9e48d4414b4e674bd8083e"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = [
"context",
"context/ctxhttp",
"http2",
"http2/hpack",
"idna",
"internal/timeseries",
"lex/httplex",
"trace"
]
revision = "61147c48b25b599e5b561d2e9c4f3e1ef489ca41"
[[projects]]
branch = "master"
name = "golang.org/x/oauth2"
packages = [
".",
"google",
"internal",
"jws",
"jwt"
]
revision = "921ae394b9430ed4fb549668d7b087601bd60a81"
[[projects]]
branch = "master"
name = "golang.org/x/sync"
packages = ["semaphore"]
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
[[projects]]
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable"
]
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
name = "google.golang.org/api"
packages = [
"googleapi/transport",
"internal",
"iterator",
"option",
"support/bundler",
"transport",
"transport/grpc",
"transport/http"
]
revision = "fca24fcb41126b846105a93fb9e30f416bdd55ce"
[[projects]]
name = "google.golang.org/appengine"
packages = [
".",
"internal",
"internal/app_identity",
"internal/base",
"internal/datastore",
"internal/log",
"internal/modules",
"internal/remote_api",
"internal/socket",
"internal/urlfetch",
"socket",
"urlfetch"
]
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "google.golang.org/genproto"
packages = [
"googleapis/api/annotations",
"googleapis/api/distribution",
"googleapis/api/label",
"googleapis/api/metric",
"googleapis/api/monitoredres",
"googleapis/devtools/cloudtrace/v2",
"googleapis/monitoring/v3",
"googleapis/rpc/code",
"googleapis/rpc/status",
"protobuf/field_mask"
]
revision = "51d0944304c3cbce4afe9e5247e21100037bff78"
[[projects]]
name = "google.golang.org/grpc"
packages = [
".",
"balancer",
"balancer/base",
"balancer/roundrobin",
"codes",
"connectivity",
"credentials",
"credentials/oauth",
"encoding",
"encoding/proto",
"grpclb/grpc_lb_v1/messages",
"grpclog",
"internal",
"keepalive",
"metadata",
"naming",
"peer",
"reflection",
"reflection/grpc_reflection_v1alpha",
"resolver",
"resolver/dns",
"resolver/passthrough",
"stats",
"status",
"tap",
"transport"
]
revision = "d11072e7ca9811b1100b80ca0269ac831f06d024"
version = "v1.11.3"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "1be7e5255452682d433fe616bb0987e00cb73c1172fe797b9b7a6fd2c1f53d37"
solver-name = "gps-cdcl"
solver-version = 1

44
vendor/go.opencensus.io/Gopkg.toml generated vendored Normal file
View File

@@ -0,0 +1,44 @@
[[constraint]]
name = "cloud.google.com/go"
version = "0.21.0"
[[constraint]]
branch = "master"
name = "git.apache.org/thrift.git"
source = "github.com/apache/thrift"
[[constraint]]
name = "github.com/golang/protobuf"
version = "1.0.0"
[[constraint]]
name = "github.com/openzipkin/zipkin-go"
version = "0.1.0"
[[constraint]]
name = "github.com/prometheus/client_golang"
version = "0.8.0"
[[constraint]]
branch = "master"
name = "golang.org/x/net"
[[constraint]]
branch = "master"
name = "golang.org/x/oauth2"
[[constraint]]
branch = "master"
name = "google.golang.org/api"
[[constraint]]
branch = "master"
name = "google.golang.org/genproto"
[[constraint]]
name = "google.golang.org/grpc"
version = "1.11.3"
[prune]
go-tests = true
unused-packages = true

49
vendor/go.opencensus.io/README.md generated vendored
View File

@@ -9,16 +9,15 @@ OpenCensus Go is a Go implementation of OpenCensus, a toolkit for
collecting application performance and behavior monitoring data.
Currently it consists of three major components: tags, stats, and tracing.
This project is still at a very early stage of development. The API is changing
rapidly, vendoring is recommended.
## Installation
```
$ go get -u go.opencensus.io
```
The API of this project is still evolving, see: [Deprecation Policy](#deprecation-policy).
The use of vendoring or a dependency management tool is recommended.
## Prerequisites
OpenCensus Go libraries require Go 1.8 or later.
@@ -53,17 +52,14 @@ then add additional custom instrumentation if needed.
## Tags
Tags represent propagated key-value pairs. They are propagated using context.Context
in the same process or can be encoded to be transmitted on the wire and decoded back
to a tag.Map at the destination.
Tags represent propagated key-value pairs. They are propagated using `context.Context`
in the same process or can be encoded to be transmitted on the wire. Usually, this will
be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler`
for gRPC.
Package tag provides a builder to create tag maps and put it
into the current context.
To propagate a tag map to downstream methods and RPCs, New
will add the produced tag map to the current context.
If there is already a tag map in the current context, it will be replaced.
Package tag allows adding or modifying tags in the current context.
[embedmd]:# (tags.go new)
[embedmd]:# (internal/readme/tags.go new)
```go
ctx, err = tag.New(ctx,
tag.Insert(osKey, "macOS-10.12.5"),
@@ -91,7 +87,7 @@ Measurements are data points associated with a measure.
Recording implicitly tags the set of Measurements with the tags from the
provided context:
[embedmd]:# (stats.go record)
[embedmd]:# (internal/readme/stats.go record)
```go
stats.Record(ctx, videoSize.M(102478))
```
@@ -103,25 +99,23 @@ set of recorded data points (measurements).
Views have two parts: the tags to group by and the aggregation type used.
Currently four types of aggregations are supported:
Currently three types of aggregations are supported:
* CountAggregation is used to count the number of times a sample was recorded.
* DistributionAggregation is used to provide a histogram of the values of the samples.
* SumAggregation is used to sum up all sample values.
* MeanAggregation is used to calculate the mean of sample values.
[embedmd]:# (stats.go aggs)
[embedmd]:# (internal/readme/stats.go aggs)
```go
distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32)
countAgg := view.Count()
sumAgg := view.Sum()
meanAgg := view.Mean()
```
Here we create a view with the DistributionAggregation over our measure.
[embedmd]:# (stats.go view)
[embedmd]:# (internal/readme/stats.go view)
```go
if err = view.Subscribe(&view.View{
if err := view.Register(&view.View{
Name: "my.org/video_size_distribution",
Description: "distribution of processed video size over time",
Measure: videoSize,
@@ -136,7 +130,7 @@ exported via the registered exporters.
## Traces
[embedmd]:# (trace.go startend)
[embedmd]:# (internal/readme/trace.go startend)
```go
ctx, span := trace.StartSpan(ctx, "your choice of name")
defer span.End()
@@ -147,7 +141,7 @@ defer span.End()
OpenCensus tags can be applied as profiler labels
for users who are on Go 1.9 and above.
[embedmd]:# (tags.go profiler)
[embedmd]:# (internal/readme/tags.go profiler)
```go
ctx, err = tag.New(ctx,
tag.Insert(osKey, "macOS-10.12.5"),
@@ -167,6 +161,15 @@ A screenshot of the CPU profile from the program above:
![CPU profile](https://i.imgur.com/jBKjlkw.png)
## Deprecation Policy
Before version 1.0.0, the following deprecation policy will be observed:
No backwards-incompatible changes will be made except for the removal of symbols that have
been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release
removing the *Deprecated* functionality will be made no sooner than 28 days after the first
release in which the functionality was marked *Deprecated*.
[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master
[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go
[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true
@@ -181,7 +184,7 @@ A screenshot of the CPU profile from the program above:
[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace
[exporter-prom]: https://godoc.org/go.opencensus.io/exporter/prometheus
[exporter-stackdriver]: https://godoc.org/go.opencensus.io/exporter/stackdriver
[exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver
[exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin
[exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger
[exporter-xray]: https://github.com/census-instrumentation/opencensus-go-exporter-aws

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package exporter
package exporter // import "go.opencensus.io/examples/exporter"
import (
"log"

View File

@@ -7,7 +7,7 @@ This example uses:
* Debugging exporters to print stats and traces to stdout.
```
$ go get go.opencensus.io/examples/grpc
$ go get go.opencensus.io/examples/grpc/...
```
First, run the server:

View File

@@ -37,8 +37,8 @@ func main() {
// the collected data.
view.RegisterExporter(&exporter.PrintExporter{})
// Subscribe to collect client request count.
if err := ocgrpc.ClientErrorCountView.Subscribe(); err != nil {
// Register the view to collect gRPC client stats.
if err := view.Register(ocgrpc.DefaultClientViews...); err != nil {
log.Fatal(err)
}

View File

@@ -31,7 +31,6 @@ import (
"go.opencensus.io/zpages"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
)
const port = ":50051"
@@ -56,8 +55,8 @@ func main() {
// the collected data.
view.RegisterExporter(&exporter.PrintExporter{})
// Subscribe to collect server request count.
if err := view.Subscribe(ocgrpc.DefaultServerViews...); err != nil {
// Register the views to collect server request count.
if err := view.Register(ocgrpc.DefaultServerViews...); err != nil {
log.Fatal(err)
}
@@ -70,8 +69,7 @@ func main() {
// stats handler to enable stats and tracing.
s := grpc.NewServer(grpc.StatsHandler(&ocgrpc.ServerHandler{}))
pb.RegisterGreeterServer(s, &server{})
// Register reflection service on gRPC server.
reflection.Register(s)
if err := s.Serve(lis); err != nil {
log.Fatalf("Failed to serve: %v", err)
}

View File

@@ -11,7 +11,7 @@ It has these top-level messages:
HelloRequest
HelloReply
*/
package helloworld
package helloworld // import "go.opencensus.io/examples/grpc/proto"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"

View File

@@ -53,15 +53,12 @@ func main() {
if err != nil {
log.Fatal(err)
}
videoSize, err = stats.Int64("my.org/measure/video_size", "size of processed videos", "MBy")
if err != nil {
log.Fatalf("Video size measure not created: %v", err)
}
videoSize = stats.Int64("my.org/measure/video_size", "size of processed videos", stats.UnitBytes)
// Create view to see the processed video size
// distribution broken down by frontend.
// Subscribe will allow view data to be exported.
if err := view.Subscribe(&view.View{
// Register will allow view data to be exported.
if err := view.Register(&view.View{
Name: "my.org/views/video_size",
Description: "processed video size over time",
TagKeys: []tag.Key{frontendKey},

View File

@@ -7,7 +7,7 @@ This example uses:
* Debugging exporters to print stats and traces to stdout.
```
$ go get go.opencensus.io/examples/http
$ go get go.opencensus.io/examples/http/...
```
First, run the server:

View File

@@ -35,7 +35,7 @@ func main() {
trace.RegisterExporter(exporter)
// Always trace for this demo.
trace.SetDefaultSampler(trace.AlwaysSample())
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
// Report stats at every second.
view.SetReportingPeriod(1 * time.Second)

View File

@@ -37,7 +37,7 @@ func main() {
trace.RegisterExporter(exporter)
// Always trace for this demo.
trace.SetDefaultSampler(trace.AlwaysSample())
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
// Report stats at every second.
view.SetReportingPeriod(1 * time.Second)
@@ -50,12 +50,14 @@ func main() {
r, _ := http.NewRequest("GET", "https://example.com", nil)
// Propagate the trace header info in the outgoing requests.
r = req.WithContext(req.Context())
r = r.WithContext(req.Context())
resp, err := client.Do(r)
if err != nil {
log.Println(err)
} else {
// TODO: handle response
resp.Body.Close()
}
_ = resp // handle response
})
log.Fatal(http.ListenAndServe(":50030", &ochttp.Handler{}))
}

View File

@@ -39,7 +39,7 @@ func main() {
trace.RegisterExporter(exporter)
// For demoing purposes, always sample.
trace.SetDefaultSampler(trace.AlwaysSample())
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
ctx, span := trace.StartSpan(ctx, "/foo")
bar(ctx)

View File

@@ -1,7 +1,7 @@
// Autogenerated by Thrift Compiler (0.11.0)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package jaeger
package jaeger // import "go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger"
import (
"bytes"

View File

@@ -98,7 +98,7 @@ func Test_spanDataToThrift(t *testing.T) {
},
},
},
Status: trace.Status{Code: 2, Message: "error"},
Status: trace.Status{Code: trace.StatusCodeUnknown, Message: "error"},
},
want: &gen.Span{
TraceIdLow: 651345242494996240,

View File

@@ -28,6 +28,13 @@ import (
"go.opencensus.io/stats/view"
)
// Create measures. The program will record measures for the size of
// processed videos and the number of videos marked as spam.
var (
videoCount = stats.Int64("my.org/measures/video_count", "number of processed videos", stats.UnitDimensionless)
videoSize = stats.Int64("my.org/measures/video_size", "size of processed video", stats.UnitBytes)
)
func main() {
ctx := context.Background()
@@ -37,22 +44,11 @@ func main() {
}
view.RegisterExporter(exporter)
// Create measures. The program will record measures for the size of
// processed videos and the number of videos marked as spam.
videoCount, err := stats.Int64("my.org/measures/video_count", "number of processed videos", "")
if err != nil {
log.Fatalf("Video count measure not created: %v", err)
}
videoSize, err := stats.Int64("my.org/measures/video_size", "size of processed video", "MBy")
if err != nil {
log.Fatalf("Video size measure not created: %v", err)
}
// Create view to see the number of processed videos cumulatively.
// Create view to see the amount of video processed
// Subscribe will allow view data to be exported.
// Once no longer needed, you can unsubscribe from the view.
if err = view.Subscribe(
if err = view.Register(
&view.View{
Name: "video_count",
Description: "number of videos processed over time",

View File

@@ -23,6 +23,7 @@ import (
"fmt"
"log"
"net/http"
"sort"
"sync"
"go.opencensus.io/internal"
@@ -33,10 +34,6 @@ import (
"github.com/prometheus/client_golang/prometheus/promhttp"
)
const (
defaultNamespace = "opencensus"
)
// Exporter exports stats to Prometheus, users need
// to register the exporter as an http.Handler to be
// able to export.
@@ -71,9 +68,6 @@ func NewExporter(o Options) (*Exporter, error) {
}
func newExporter(o Options) (*Exporter, error) {
if o.Namespace == "" {
o.Namespace = defaultNamespace
}
if o.Registry == nil {
o.Registry = prometheus.NewRegistry()
}
@@ -144,10 +138,8 @@ func (o *Options) onError(err error) {
// ExportView exports to the Prometheus if view data has one or more rows.
// Each OpenCensus AggregationData will be converted to
// corresponding Prometheus Metric: SumData will be converted
// to Untyped Metric, CountData will be Counter Metric,
// DistributionData will be Histogram Metric, and MeanData
// will be Summary Metric. Please note the Summary Metric from
// MeanData does not have any quantiles.
// to Untyped Metric, CountData will be a Counter Metric,
// DistributionData will be a Histogram Metric.
func (e *Exporter) ExportView(vd *view.Data) {
if len(vd.Rows) == 0 {
return
@@ -232,20 +224,40 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
func (c *collector) toMetric(desc *prometheus.Desc, v *view.View, row *view.Row) (prometheus.Metric, error) {
switch data := row.Data.(type) {
case *view.CountData:
return prometheus.NewConstMetric(desc, prometheus.CounterValue, float64(*data), tagValues(row.Tags)...)
return prometheus.NewConstMetric(desc, prometheus.CounterValue, float64(data.Value), tagValues(row.Tags)...)
case *view.DistributionData:
points := make(map[float64]uint64)
// Histograms are cumulative in Prometheus.
// 1. Sort buckets in ascending order but, retain
// their indices for reverse lookup later on.
// TODO: If there is a guarantee that distribution elements
// are always sorted, then skip the sorting.
indicesMap := make(map[float64]int)
buckets := make([]float64, 0, len(v.Aggregation.Buckets))
for i, b := range v.Aggregation.Buckets {
points[b] = uint64(data.CountPerBucket[i])
if _, ok := indicesMap[b]; !ok {
indicesMap[b] = i
buckets = append(buckets, b)
}
}
sort.Float64s(buckets)
// 2. Now that the buckets are sorted by magnitude
// we can create cumulative indicesmap them back by reverse index
cumCount := uint64(0)
for _, b := range buckets {
i := indicesMap[b]
cumCount += uint64(data.CountPerBucket[i])
points[b] = cumCount
}
return prometheus.NewConstHistogram(desc, uint64(data.Count), data.Sum(), points, tagValues(row.Tags)...)
case *view.MeanData:
return prometheus.NewConstSummary(desc, uint64(data.Count), data.Sum(), make(map[float64]float64), tagValues(row.Tags)...)
case *view.SumData:
return prometheus.NewConstMetric(desc, prometheus.UntypedValue, float64(*data), tagValues(row.Tags)...)
return prometheus.NewConstMetric(desc, prometheus.UntypedValue, data.Value, tagValues(row.Tags)...)
case *view.LastValueData:
return prometheus.NewConstMetric(desc, prometheus.UntypedValue, data.Value, tagValues(row.Tags)...)
default:
return nil, fmt.Errorf("aggregation %T is not yet supported", v.Aggregation)
@@ -285,7 +297,11 @@ func tagValues(t []tag.Tag) []string {
}
func viewName(namespace string, v *view.View) string {
return namespace + "_" + internal.Sanitize(v.Name)
var name string
if namespace != "" {
name = namespace + "_"
}
return name + internal.Sanitize(v.Name)
}
func viewSignature(namespace string, v *view.View) string {

View File

@@ -18,7 +18,6 @@ import (
"context"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/http/httptest"
"strings"
@@ -34,10 +33,7 @@ import (
)
func newView(measureName string, agg *view.Aggregation) *view.View {
m, err := stats.Int64(measureName, "bytes", stats.UnitBytes)
if err != nil {
log.Fatal(err)
}
m := stats.Int64(measureName, "bytes", stats.UnitBytes)
return &view.View{
Name: "foo",
Description: "bar",
@@ -48,11 +44,7 @@ func newView(measureName string, agg *view.Aggregation) *view.View {
func TestOnlyCumulativeWindowSupported(t *testing.T) {
// See Issue https://github.com/census-instrumentation/opencensus-go/issues/214.
count1 := view.CountData(1)
mean1 := view.MeanData{
Mean: 4.5,
Count: 5,
}
count1 := &view.CountData{Value: 1}
tests := []struct {
vds *view.Data
want int
@@ -67,16 +59,7 @@ func TestOnlyCumulativeWindowSupported(t *testing.T) {
vds: &view.Data{
View: newView("TestOnlyCumulativeWindowSupported/m2", view.Count()),
Rows: []*view.Row{
{Data: &count1},
},
},
want: 1,
},
2: {
vds: &view.Data{
View: newView("TestOnlyCumulativeWindowSupported/m3", view.Mean()),
Rows: []*view.Row{
{Data: &mean1},
{Data: count1},
},
},
want: 1,
@@ -143,11 +126,9 @@ func TestCollectNonRacy(t *testing.T) {
}()
for i := 0; i < 1e3; i++ {
count1 := view.CountData(1)
mean1 := &view.MeanData{Mean: 4.5, Count: 5}
count1 := &view.CountData{Value: 1}
vds := []*view.Data{
{View: newView(fmt.Sprintf("TestCollectNonRacy/m1-%d", i), view.Mean()), Rows: []*view.Row{{Data: mean1}}},
{View: newView(fmt.Sprintf("TestCollectNonRacy/m2-%d", i), view.Count()), Rows: []*view.Row{{Data: &count1}}},
{View: newView(fmt.Sprintf("TestCollectNonRacy/m2-%d", i), view.Count()), Rows: []*view.Row{{Data: count1}}},
}
for _, v := range vds {
exp.ExportView(v)
@@ -190,28 +171,24 @@ func TestCollectNonRacy(t *testing.T) {
}()
}
type mCreator struct {
m *stats.Int64Measure
err error
}
type mSlice []*stats.Int64Measure
func (mc *mCreator) createAndAppend(measures *mSlice, name, desc, unit string) {
mc.m, mc.err = stats.Int64(name, desc, unit)
*measures = append(*measures, mc.m)
func (measures *mSlice) createAndAppend(name, desc, unit string) {
m := stats.Int64(name, desc, unit)
*measures = append(*measures, m)
}
type vCreator struct {
v *view.View
err error
}
type vCreator []*view.View
func (vc *vCreator) createAndSubscribe(name, description string, keys []tag.Key, measure stats.Measure, agg *view.Aggregation) {
vc.v, vc.err = view.New(name, description, keys, measure, agg)
if err := vc.v.Subscribe(); err != nil {
vc.err = err
func (vc *vCreator) createAndAppend(name, description string, keys []tag.Key, measure stats.Measure, agg *view.Aggregation) {
v := &view.View{
Name: name,
Description: description,
TagKeys: keys,
Measure: measure,
Aggregation: agg,
}
*vc = append(*vc, v)
}
func TestMetricsEndpointOutput(t *testing.T) {
@@ -223,22 +200,21 @@ func TestMetricsEndpointOutput(t *testing.T) {
names := []string{"foo", "bar", "baz"}
measures := make(mSlice, 0)
mc := &mCreator{}
var measures mSlice
for _, name := range names {
mc.createAndAppend(&measures, "tests/"+name, name, "")
}
if mc.err != nil {
t.Errorf("failed to create measures: %v", err)
measures.createAndAppend("tests/"+name, name, "")
}
vc := &vCreator{}
var vc vCreator
for _, m := range measures {
vc.createAndSubscribe(m.Name(), m.Description(), nil, m, view.Count())
vc.createAndAppend(m.Name(), m.Description(), nil, m, view.Count())
}
if vc.err != nil {
if err := view.Register(vc...); err != nil {
t.Fatalf("failed to create views: %v", err)
}
defer view.Unregister(vc...)
view.SetReportingPeriod(time.Millisecond)
for _, m := range measures {
@@ -251,7 +227,8 @@ func TestMetricsEndpointOutput(t *testing.T) {
var i int
var output string
for {
if i == 10000 {
time.Sleep(10 * time.Millisecond)
if i == 1000 {
t.Fatal("no output at /metrics (10s wait)")
}
i++
@@ -271,7 +248,6 @@ func TestMetricsEndpointOutput(t *testing.T) {
if output != "" {
break
}
time.Sleep(time.Millisecond)
}
if strings.Contains(output, "collected before with the same name and label values") {
@@ -283,8 +259,95 @@ func TestMetricsEndpointOutput(t *testing.T) {
}
for _, name := range names {
if !strings.Contains(output, "opencensus_tests_"+name+" 1") {
if !strings.Contains(output, "tests_"+name+" 1") {
t.Fatalf("measurement missing in output: %v", name)
}
}
}
func TestCumulativenessFromHistograms(t *testing.T) {
exporter, err := newExporter(Options{})
if err != nil {
t.Fatalf("failed to create prometheus exporter: %v", err)
}
view.RegisterExporter(exporter)
reportPeriod := time.Millisecond
view.SetReportingPeriod(reportPeriod)
m := stats.Float64("tests/bills", "payments by denomination", stats.UnitDimensionless)
v := &view.View{
Name: "cash/register",
Description: "this is a test",
Measure: m,
// Intentionally used repeated elements in the ascending distribution.
// to ensure duplicate distribution items are handles.
Aggregation: view.Distribution(1, 5, 5, 5, 5, 10, 20, 50, 100, 250),
}
if err := view.Register(v); err != nil {
t.Fatalf("Register error: %v", err)
}
defer view.Unregister(v)
// Give the reporter ample time to process registration
<-time.After(10 * reportPeriod)
values := []float64{0.25, 245.67, 12, 1.45, 199.9, 7.69, 187.12}
// We want the results that look like this:
// 1: [0.25] | 1 + prev(i) = 1 + 0 = 1
// 5: [1.45] | 1 + prev(i) = 1 + 1 = 2
// 10: [] | 1 + prev(i) = 1 + 2 = 3
// 20: [12] | 1 + prev(i) = 1 + 3 = 4
// 50: [] | 0 + prev(i) = 0 + 4 = 4
// 100: [] | 0 + prev(i) = 0 + 4 = 4
// 250: [187.12, 199.9, 245.67] | 3 + prev(i) = 3 + 4 = 7
wantLines := []string{
`cash_register_bucket{le="1"} 1`,
`cash_register_bucket{le="5"} 2`,
`cash_register_bucket{le="10"} 3`,
`cash_register_bucket{le="20"} 4`,
`cash_register_bucket{le="50"} 4`,
`cash_register_bucket{le="100"} 4`,
`cash_register_bucket{le="250"} 7`,
`cash_register_bucket{le="+Inf"} 7`,
`cash_register_sum 654.0799999999999`, // Summation of the input values
`cash_register_count 7`,
}
ctx := context.Background()
ms := make([]stats.Measurement, len(values))
for _, value := range values {
mx := m.M(value)
ms = append(ms, mx)
}
stats.Record(ctx, ms...)
// Give the recorder ample time to process recording
<-time.After(10 * reportPeriod)
cst := httptest.NewServer(exporter)
defer cst.Close()
res, err := http.Get(cst.URL)
if err != nil {
t.Fatalf("http.Get error: %v", err)
}
blob, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("Read body error: %v", err)
}
str := strings.Trim(string(blob), "\n")
lines := strings.Split(str, "\n")
nonComments := make([]string, 0, len(lines))
for _, line := range lines {
if !strings.Contains(line, "#") {
nonComments = append(nonComments, line)
}
}
got := strings.Join(nonComments, "\n")
want := strings.Join(wantLines, "\n")
if got != want {
t.Fatalf("\ngot:\n%s\n\nwant:\n%s\n", got, want)
}
}

View File

@@ -1,58 +0,0 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stackdriver_test
import (
"log"
"net/http"
"go.opencensus.io/exporter/stackdriver"
"go.opencensus.io/exporter/stackdriver/propagation"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/stats/view"
"go.opencensus.io/trace"
)
func Example() {
exporter, err := stackdriver.NewExporter(stackdriver.Options{ProjectID: "google-project-id"})
if err != nil {
log.Fatal(err)
}
// Export to Stackdriver Monitoring.
view.RegisterExporter(exporter)
// Subscribe views to see stats in Stackdriver Monitoring.
if err := view.Subscribe(
ochttp.ClientLatencyView,
ochttp.ClientResponseBytesView,
); err != nil {
log.Fatal(err)
}
// Export to Stackdriver Trace.
trace.RegisterExporter(exporter)
// Automatically add a Stackdriver trace header to outgoing requests:
client := &http.Client{
Transport: &ochttp.Transport{
Propagation: &propagation.HTTPFormat{},
},
}
_ = client // use client
// All outgoing requests from client will include a Stackdriver Trace header.
// See the ochttp package for how to handle incoming requests.
}

View File

@@ -28,6 +28,10 @@ import (
"go.opencensus.io/stats/view"
)
// Create measures. The program will record measures for the size of
// processed videos and the nubmer of videos marked as spam.
var videoSize = stats.Int64("my.org/measure/video_size", "size of processed videos", stats.UnitBytes)
func main() {
ctx := context.Background()
@@ -49,20 +53,13 @@ func main() {
}
view.RegisterExporter(exporter)
// Create measures. The program will record measures for the size of
// processed videos and the nubmer of videos marked as spam.
videoSize, err := stats.Int64("my.org/measure/video_size", "size of processed videos", "MBy")
if err != nil {
log.Fatalf("Video size measure not created: %v", err)
}
// Set reporting period to report data at every second.
view.SetReportingPeriod(1 * time.Second)
// Create view to see the processed video size cumulatively.
// Subscribe will allow view data to be exported.
// Once no longer need, you can unsubscribe from the view.
if err := view.Subscribe(&view.View{
if err := view.Register(&view.View{
Name: "my.org/views/video_size_cum",
Description: "processed video size over time",
Measure: videoSize,
@@ -71,11 +68,15 @@ func main() {
log.Fatalf("Cannot subscribe to the view: %v", err)
}
// Record data points.
stats.Record(ctx, videoSize.M(25648))
processVideo(ctx)
// Wait for a duration longer than reporting duration to ensure the stats
// library reports the collected data.
fmt.Println("Wait longer than the reporting duration...")
time.Sleep(1 * time.Minute)
}
func processVideo(ctx context.Context) {
// Do some processing and record stats.
stats.Record(ctx, videoSize.M(25648))
}

View File

@@ -12,19 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package stackdriver contains the OpenCensus exporters for
// Stackdriver Monitoring and Stackdriver Tracing.
// Package stackdriver has moved.
//
// Please note that the Stackdriver exporter is currently experimental.
//
// The package uses Application Default Credentials to authenticate. See
// https://developers.google.com/identity/protocols/application-default-credentials
// Deprecated: Use contrib.go.opencensus.io/exporter/stackdriver instead.
package stackdriver // import "go.opencensus.io/exporter/stackdriver"
import (
"context"
"errors"
"fmt"
"log"
"time"
traceapi "cloud.google.com/go/trace/apiv2"
@@ -49,10 +46,15 @@ type Options struct {
// Optional.
OnError func(err error)
// ClientOptions are additional options to be passed
// MonitoringClientOptions are additional options to be passed
// to the underlying Stackdriver Monitoring API client.
// Optional.
ClientOptions []option.ClientOption
MonitoringClientOptions []option.ClientOption
// TraceClientOptions are additional options to be passed
// to the underlying Stackdriver Trace API client.
// Optional.
TraceClientOptions []option.ClientOption
// BundleDelayThreshold determines the max amount of time
// the exporter can wait before uploading view data to
@@ -130,3 +132,11 @@ func (e *Exporter) Flush() {
e.statsExporter.Flush()
e.traceExporter.Flush()
}
func (o Options) handleError(err error) {
if o.OnError != nil {
o.OnError(err)
return
}
log.Printf("Error exporting to Stackdriver: %v", err)
}

View File

@@ -36,7 +36,11 @@ func TestExport(t *testing.T) {
t.Skip("STACKDRIVER_TEST_PROJECT_ID not set")
}
exporter, err := NewExporter(Options{ProjectID: projectID})
var exportErrors []error
exporter, err := NewExporter(Options{ProjectID: projectID, OnError: func(err error) {
exportErrors = append(exportErrors, err)
}})
if err != nil {
t.Fatal(err)
}
@@ -47,16 +51,24 @@ func TestExport(t *testing.T) {
view.RegisterExporter(exporter)
defer view.UnregisterExporter(exporter)
trace.SetDefaultSampler(trace.AlwaysSample())
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
span := trace.NewSpan("custom-span", nil, trace.StartOptions{})
_, span := trace.StartSpan(context.Background(), "custom-span")
time.Sleep(10 * time.Millisecond)
span.End()
// Test HTTP spans
handler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
_, backgroundSpan := trace.StartSpan(context.Background(), "BackgroundWork")
spanContext := backgroundSpan.SpanContext()
time.Sleep(10 * time.Millisecond)
backgroundSpan.End()
_, span := trace.StartSpan(req.Context(), "Sleep")
span.AddLink(trace.Link{Type: trace.LinkTypeChild, TraceID: spanContext.TraceID, SpanID: spanContext.SpanID})
time.Sleep(150 * time.Millisecond) // do work
span.End()
rw.Write([]byte("Hello, world!"))
})
server := httptest.NewServer(&ochttp.Handler{Handler: handler})
@@ -81,6 +93,10 @@ func TestExport(t *testing.T) {
// Flush twice to expose issue of exporter creating traces internally (#557)
exporter.Flush()
exporter.Flush()
for _, err := range exportErrors {
t.Error(err)
}
}
func TestGRPC(t *testing.T) {
@@ -100,7 +116,7 @@ func TestGRPC(t *testing.T) {
view.RegisterExporter(exporter)
defer view.UnregisterExporter(exporter)
trace.SetDefaultSampler(trace.AlwaysSample())
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
client, done := testpb.NewTestClient(t)
defer done()

View File

@@ -18,8 +18,6 @@ import (
"context"
"errors"
"fmt"
"log"
"net/url"
"os"
"path"
"strconv"
@@ -28,6 +26,7 @@ import (
"time"
"go.opencensus.io/internal"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"go.opencensus.io/trace"
@@ -42,8 +41,6 @@ import (
metricpb "google.golang.org/genproto/googleapis/api/metric"
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
const maxTimeSeriesPerUpload = 200
@@ -92,7 +89,7 @@ func newStatsExporter(o Options) (*statsExporter, error) {
seenProjects[o.ProjectID] = true
opts := append(o.ClientOptions, option.WithUserAgent(internal.UserAgent))
opts := append(o.MonitoringClientOptions, option.WithUserAgent(internal.UserAgent))
client, err := monitoring.NewMetricClient(context.Background(), opts...)
if err != nil {
return nil, err
@@ -125,9 +122,9 @@ func (e *statsExporter) ExportView(vd *view.Data) {
case bundler.ErrOversizedItem:
go e.handleUpload(vd)
case bundler.ErrOverflow:
e.onError(errors.New("failed to upload: buffer full"))
e.o.handleError(errors.New("failed to upload: buffer full"))
default:
e.onError(err)
e.o.handleError(err)
}
}
@@ -145,7 +142,7 @@ func getTaskValue() string {
// of Data, as well as error handling.
func (e *statsExporter) handleUpload(vds ...*view.Data) {
if err := e.uploadStats(vds); err != nil {
e.onError(err)
e.o.handleError(err)
}
}
@@ -157,32 +154,23 @@ func (e *statsExporter) Flush() {
e.bundler.Flush()
}
func (e *statsExporter) onError(err error) {
if e.o.OnError != nil {
e.o.OnError(err)
return
}
log.Printf("Failed to export to Stackdriver Monitoring: %v", err)
}
func (e *statsExporter) uploadStats(vds []*view.Data) error {
span := trace.NewSpan(
ctx, span := trace.StartSpan(
context.Background(),
"go.opencensus.io/exporter/stackdriver.uploadStats",
nil,
trace.StartOptions{Sampler: trace.NeverSample()},
trace.WithSampler(trace.NeverSample()),
)
ctx := trace.WithSpan(context.Background(), span)
defer span.End()
for _, vd := range vds {
if err := e.createMeasure(ctx, vd); err != nil {
span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
return err
}
}
for _, req := range e.makeReq(vds, maxTimeSeriesPerUpload) {
if err := e.c.CreateTimeSeries(ctx, req); err != nil {
span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
// TODO(jbd): Don't fail fast here, batch errors?
return err
}
@@ -205,7 +193,7 @@ func (e *statsExporter) makeReq(vds []*view.Data, limit int) []*monitoringpb.Cre
for _, row := range vd.Rows {
ts := &monitoringpb.TimeSeries{
Metric: &metricpb.Metric{
Type: namespacedViewName(vd.View.Name, false),
Type: namespacedViewName(vd.View.Name),
Labels: newLabels(row.Tags, e.taskValue),
},
Resource: resource,
@@ -243,53 +231,53 @@ func (e *statsExporter) createMeasure(ctx context.Context, vd *view.Data) error
viewName := vd.View.Name
if md, ok := e.createdViews[viewName]; ok {
return equalAggTagKeys(md, agg, tagKeys)
return equalMeasureAggTagKeys(md, m, agg, tagKeys)
}
metricName := monitoring.MetricMetricDescriptorPath(e.o.ProjectID, namespacedViewName(viewName, true))
md, err := getMetricDescriptor(ctx, e.c, &monitoringpb.GetMetricDescriptorRequest{
Name: metricName,
})
if err == nil {
if err := equalAggTagKeys(md, agg, tagKeys); err != nil {
return err
}
e.createdViews[viewName] = md
return nil
}
if grpc.Code(err) != codes.NotFound {
return err
}
var metricKind metricpb.MetricDescriptor_MetricKind
metricType := namespacedViewName(viewName)
var valueType metricpb.MetricDescriptor_ValueType
unit := m.Unit()
switch agg.Type {
case view.AggTypeCount:
valueType = metricpb.MetricDescriptor_INT64
// If the aggregation type is count, which counts the number of recorded measurements, the unit must be "1",
// because this view does not apply to the recorded values.
unit = stats.UnitDimensionless
case view.AggTypeSum:
switch m.(type) {
case *stats.Int64Measure:
valueType = metricpb.MetricDescriptor_INT64
case *stats.Float64Measure:
valueType = metricpb.MetricDescriptor_DOUBLE
case view.AggTypeMean:
valueType = metricpb.MetricDescriptor_DISTRIBUTION
}
case view.AggTypeDistribution:
valueType = metricpb.MetricDescriptor_DISTRIBUTION
case view.AggTypeLastValue:
switch m.(type) {
case *stats.Int64Measure:
valueType = metricpb.MetricDescriptor_INT64
case *stats.Float64Measure:
valueType = metricpb.MetricDescriptor_DOUBLE
}
default:
return fmt.Errorf("unsupported aggregation type: %s", agg.Type.String())
}
metricKind = metricpb.MetricDescriptor_CUMULATIVE
metricKind := metricpb.MetricDescriptor_CUMULATIVE
displayNamePrefix := defaultDisplayNamePrefix
if e.o.MetricPrefix != "" {
displayNamePrefix = e.o.MetricPrefix
}
md, err = createMetricDescriptor(ctx, e.c, &monitoringpb.CreateMetricDescriptorRequest{
Name: monitoring.MetricProjectPath(e.o.ProjectID),
md, err := createMetricDescriptor(ctx, e.c, &monitoringpb.CreateMetricDescriptorRequest{
Name: fmt.Sprintf("projects/%s", e.o.ProjectID),
MetricDescriptor: &metricpb.MetricDescriptor{
Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", e.o.ProjectID, metricType),
DisplayName: path.Join(displayNamePrefix, viewName),
Description: m.Description(),
Unit: m.Unit(),
Type: namespacedViewName(viewName, false),
Description: vd.View.Description,
Unit: unit,
Type: metricType,
MetricKind: metricKind,
ValueType: valueType,
Labels: newLabelDescriptors(vd.View.TagKeys),
@@ -323,28 +311,19 @@ func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue {
switch v := r.Data.(type) {
case *view.CountData:
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
Int64Value: int64(*v),
Int64Value: v.Value,
}}
case *view.SumData:
switch vd.Measure.(type) {
case *stats.Int64Measure:
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
Int64Value: int64(v.Value),
}}
case *stats.Float64Measure:
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
DoubleValue: float64(*v),
}}
case *view.MeanData:
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{
DistributionValue: &distributionpb.Distribution{
Count: int64(v.Count),
Mean: v.Mean,
SumOfSquaredDeviation: 0,
BucketOptions: &distributionpb.Distribution_BucketOptions{
Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{
ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{
Bounds: []float64{0},
},
},
},
BucketCounts: []int64{0, int64(v.Count)},
},
DoubleValue: v.Value,
}}
}
case *view.DistributionData:
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{
DistributionValue: &distributionpb.Distribution{
@@ -366,16 +345,23 @@ func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue {
BucketCounts: v.CountPerBucket,
},
}}
case *view.LastValueData:
switch vd.Measure.(type) {
case *stats.Int64Measure:
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
Int64Value: int64(v.Value),
}}
case *stats.Float64Measure:
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
DoubleValue: v.Value,
}}
}
}
return nil
}
func namespacedViewName(v string, escaped bool) string {
p := path.Join("opencensus", v)
if escaped {
p = url.PathEscape(p)
}
return path.Join("custom.googleapis.com", p)
func namespacedViewName(v string) string {
return path.Join("custom.googleapis.com", "opencensus", v)
}
func newLabels(tags []tag.Tag, taskValue string) map[string]string {
@@ -404,15 +390,21 @@ func newLabelDescriptors(keys []tag.Key) []*labelpb.LabelDescriptor {
return labelDescriptors
}
func equalAggTagKeys(md *metricpb.MetricDescriptor, agg *view.Aggregation, keys []tag.Key) error {
func equalMeasureAggTagKeys(md *metricpb.MetricDescriptor, m stats.Measure, agg *view.Aggregation, keys []tag.Key) error {
var aggTypeMatch bool
switch md.ValueType {
case metricpb.MetricDescriptor_INT64:
aggTypeMatch = agg.Type == view.AggTypeCount
if _, ok := m.(*stats.Int64Measure); !(ok || agg.Type == view.AggTypeCount) {
return fmt.Errorf("stackdriver metric descriptor was not created as int64")
}
aggTypeMatch = agg.Type == view.AggTypeCount || agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue
case metricpb.MetricDescriptor_DOUBLE:
aggTypeMatch = agg.Type == view.AggTypeSum
if _, ok := m.(*stats.Float64Measure); !ok {
return fmt.Errorf("stackdriver metric descriptor was not created as double")
}
aggTypeMatch = agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue
case metricpb.MetricDescriptor_DISTRIBUTION:
aggTypeMatch = agg.Type == view.AggTypeMean || agg.Type == view.AggTypeDistribution
aggTypeMatch = agg.Type == view.AggTypeDistribution
}
if !aggTypeMatch {

View File

@@ -26,15 +26,12 @@ import (
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"google.golang.org/api/option"
distributionpb "google.golang.org/genproto/googleapis/api/distribution"
"google.golang.org/genproto/googleapis/api/label"
"google.golang.org/genproto/googleapis/api/metric"
metricpb "google.golang.org/genproto/googleapis/api/metric"
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
var authOptions = []option.ClientOption{option.WithGRPCConn(&grpc.ClientConn{})}
@@ -42,7 +39,7 @@ var authOptions = []option.ClientOption{option.WithGRPCConn(&grpc.ClientConn{})}
func TestRejectBlankProjectID(t *testing.T) {
ids := []string{"", " ", " "}
for _, projectID := range ids {
opts := Options{ProjectID: projectID, ClientOptions: authOptions}
opts := Options{ProjectID: projectID, MonitoringClientOptions: authOptions}
exp, err := newStatsExporter(opts)
if err == nil || exp != nil {
t.Errorf("%q ProjectID must be rejected: NewExporter() = %v err = %q", projectID, exp, err)
@@ -55,7 +52,7 @@ func TestRejectBlankProjectID(t *testing.T) {
func TestNewExporterSingletonPerProcess(t *testing.T) {
ids := []string{"open-census.io", "x", "fakeProjectID"}
for _, projectID := range ids {
opts := Options{ProjectID: projectID, ClientOptions: authOptions}
opts := Options{ProjectID: projectID, MonitoringClientOptions: authOptions}
exp, err := newStatsExporter(opts)
if err != nil {
t.Errorf("NewExporter() projectID = %q err = %q", projectID, err)
@@ -73,10 +70,7 @@ func TestNewExporterSingletonPerProcess(t *testing.T) {
}
func TestExporter_makeReq(t *testing.T) {
m, err := stats.Float64("test-measure", "measure desc", "unit")
if err != nil {
t.Fatal(err)
}
m := stats.Float64("test-measure", "measure desc", "unit")
key, err := tag.NewKey("test_key")
if err != nil {
@@ -99,18 +93,12 @@ func TestExporter_makeReq(t *testing.T) {
start := time.Now()
end := start.Add(time.Minute)
count1 := view.CountData(10)
count2 := view.CountData(16)
sum1 := view.SumData(5.5)
sum2 := view.SumData(-11.1)
mean1 := view.MeanData{
Mean: 3.3,
Count: 7,
}
mean2 := view.MeanData{
Mean: -7.7,
Count: 5,
}
count1 := &view.CountData{Value: 10}
count2 := &view.CountData{Value: 16}
sum1 := &view.SumData{Value: 5.5}
sum2 := &view.SumData{Value: -11.1}
last1 := view.LastValueData{Value: 100}
last2 := view.LastValueData{Value: 200}
taskValue := getTaskValue()
tests := []struct {
@@ -122,7 +110,7 @@ func TestExporter_makeReq(t *testing.T) {
{
name: "count agg + timeline",
projID: "proj-id",
vd: newTestViewData(v, start, end, &count1, &count2),
vd: newTestViewData(v, start, end, count1, count2),
want: []*monitoringpb.CreateTimeSeriesRequest{{
Name: monitoring.MetricProjectPath("proj-id"),
TimeSeries: []*monitoringpb.TimeSeries{
@@ -190,7 +178,7 @@ func TestExporter_makeReq(t *testing.T) {
{
name: "sum agg + timeline",
projID: "proj-id",
vd: newTestViewData(v, start, end, &sum1, &sum2),
vd: newTestViewData(v, start, end, sum1, sum2),
want: []*monitoringpb.CreateTimeSeriesRequest{{
Name: monitoring.MetricProjectPath("proj-id"),
TimeSeries: []*monitoringpb.TimeSeries{
@@ -256,9 +244,9 @@ func TestExporter_makeReq(t *testing.T) {
}},
},
{
name: "mean agg + timeline",
name: "last value agg",
projID: "proj-id",
vd: newTestViewData(v, start, end, &mean1, &mean2),
vd: newTestViewData(v, start, end, &last1, &last2),
want: []*monitoringpb.CreateTimeSeriesRequest{{
Name: monitoring.MetricProjectPath("proj-id"),
TimeSeries: []*monitoringpb.TimeSeries{
@@ -285,20 +273,8 @@ func TestExporter_makeReq(t *testing.T) {
Nanos: int32(end.Nanosecond()),
},
},
Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{
DistributionValue: &distributionpb.Distribution{
Count: 7,
Mean: 3.3,
SumOfSquaredDeviation: 0,
BucketOptions: &distributionpb.Distribution_BucketOptions{
Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{
ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{
Bounds: []float64{0},
},
},
},
BucketCounts: []int64{0, 7},
},
Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
DoubleValue: 100,
}},
},
},
@@ -326,20 +302,8 @@ func TestExporter_makeReq(t *testing.T) {
Nanos: int32(end.Nanosecond()),
},
},
Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{
DistributionValue: &distributionpb.Distribution{
Count: 5,
Mean: -7.7,
SumOfSquaredDeviation: 0,
BucketOptions: &distributionpb.Distribution_BucketOptions{
Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{
ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{
Bounds: []float64{0},
},
},
},
BucketCounts: []int64{0, 5},
},
Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
DoubleValue: 200,
}},
},
},
@@ -378,10 +342,7 @@ func TestExporter_makeReq(t *testing.T) {
}
func TestExporter_makeReq_batching(t *testing.T) {
m, err := stats.Float64("test-measure/makeReq_batching", "measure desc", "unit")
if err != nil {
t.Fatal(err)
}
m := stats.Float64("test-measure/makeReq_batching", "measure desc", "unit")
key, err := tag.NewKey("test_key")
if err != nil {
@@ -426,13 +387,13 @@ func TestExporter_makeReq_batching(t *testing.T) {
},
}
count1 := view.CountData(10)
count2 := view.CountData(16)
count1 := &view.CountData{Value: 10}
count2 := &view.CountData{Value: 16}
for _, tt := range tests {
var vds []*view.Data
for i := 0; i < tt.iter; i++ {
vds = append(vds, newTestViewData(v, time.Now(), time.Now(), &count1, &count2))
vds = append(vds, newTestViewData(v, time.Now(), time.Now(), count1, count2))
}
e := &statsExporter{}
@@ -457,58 +418,97 @@ func TestEqualAggWindowTagKeys(t *testing.T) {
tests := []struct {
name string
md *metricpb.MetricDescriptor
m stats.Measure
agg *view.Aggregation
keys []tag.Key
wantErr bool
}{
{
name: "count agg",
name: "count agg with in64 measure",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_INT64,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
m: stats.Int64("name", "", ""),
agg: view.Count(),
wantErr: false,
},
{
name: "sum agg",
name: "count agg with double measure",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_INT64,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
m: stats.Float64("name", "", ""),
agg: view.Count(),
wantErr: false,
},
{
name: "sum agg double",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_DOUBLE,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
m: stats.Float64("name", "", ""),
agg: view.Sum(),
wantErr: false,
},
{
name: "mean agg",
name: "sum agg int64",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_DISTRIBUTION,
ValueType: metricpb.MetricDescriptor_INT64,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
agg: view.Mean(),
m: stats.Int64("name", "", ""),
agg: view.Sum(),
wantErr: false,
},
{
name: "distribution agg - mismatch",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_DISTRIBUTION,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
agg: view.Count(),
wantErr: true,
},
{
name: "mean agg - mismatch",
name: "last value agg double",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_DOUBLE,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
agg: view.Mean(),
m: stats.Float64("name", "", ""),
agg: view.LastValue(),
wantErr: false,
},
{
name: "last value agg int64",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_INT64,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
m: stats.Int64("name", "", ""),
agg: view.LastValue(),
wantErr: false,
},
{
name: "distribution - mismatch",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_DISTRIBUTION,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
m: stats.Int64("name", "", ""),
agg: view.Count(),
wantErr: true,
},
{
name: "last value - measure mismatch",
md: &metricpb.MetricDescriptor{
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_INT64,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
m: stats.Float64("name", "", ""),
agg: view.LastValue(),
wantErr: true,
},
{
@@ -522,6 +522,7 @@ func TestEqualAggWindowTagKeys(t *testing.T) {
{Key: opencensusTaskKey},
},
},
m: stats.Int64("name", "", ""),
agg: view.Distribution(),
keys: []tag.Key{key1, key2},
wantErr: false,
@@ -532,6 +533,7 @@ func TestEqualAggWindowTagKeys(t *testing.T) {
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_DISTRIBUTION,
},
m: stats.Int64("name", "", ""),
agg: view.Distribution(),
keys: []tag.Key{key1, key2},
wantErr: true,
@@ -543,13 +545,14 @@ func TestEqualAggWindowTagKeys(t *testing.T) {
ValueType: metricpb.MetricDescriptor_INT64,
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
},
m: stats.Int64("name", "", ""),
agg: view.Count(),
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := equalAggTagKeys(tt.md, tt.agg, tt.keys)
err := equalMeasureAggTagKeys(tt.md, tt.m, tt.agg, tt.keys)
if err != nil && !tt.wantErr {
t.Errorf("equalAggTagKeys() = %q; want no error", err)
}
@@ -562,49 +565,62 @@ func TestEqualAggWindowTagKeys(t *testing.T) {
}
func TestExporter_createMeasure(t *testing.T) {
oldGetMetricDescriptor := getMetricDescriptor
oldCreateMetricDescriptor := createMetricDescriptor
defer func() {
getMetricDescriptor = oldGetMetricDescriptor
createMetricDescriptor = oldCreateMetricDescriptor
}()
key, _ := tag.NewKey("test-key-one")
m, err := stats.Float64("test-measure/TestExporter_createMeasure", "measure desc", "unit")
if err != nil {
t.Fatal(err)
}
m := stats.Float64("test-measure/TestExporter_createMeasure", "measure desc", stats.UnitMilliseconds)
v := &view.View{
Name: "testview",
Description: "desc",
Name: "test_view_sum",
Description: "view_description",
TagKeys: []tag.Key{key},
Measure: m,
Aggregation: view.Count(),
Aggregation: view.Sum(),
}
data := view.CountData(0)
vd := newTestViewData(v, time.Now(), time.Now(), &data, &data)
data := &view.CountData{Value: 0}
vd := newTestViewData(v, time.Now(), time.Now(), data, data)
e := &statsExporter{
createdViews: make(map[string]*metricpb.MetricDescriptor),
o: Options{ProjectID: "test_project"},
}
var getCalls, createCalls int
getMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
getCalls++
return nil, status.Error(codes.NotFound, "")
}
var createCalls int
createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
createCalls++
if got, want := mdr.MetricDescriptor.Name, "projects/test_project/metricDescriptors/custom.googleapis.com/opencensus/test_view_sum"; got != want {
t.Errorf("MetricDescriptor.Name = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.Type, "custom.googleapis.com/opencensus/test_view_sum"; got != want {
t.Errorf("MetricDescriptor.Type = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.ValueType, metricpb.MetricDescriptor_DOUBLE; got != want {
t.Errorf("MetricDescriptor.ValueType = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.MetricKind, metricpb.MetricDescriptor_CUMULATIVE; got != want {
t.Errorf("MetricDescriptor.MetricKind = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.Description, "view_description"; got != want {
t.Errorf("MetricDescriptor.Description = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.DisplayName, "OpenCensus/test_view_sum"; got != want {
t.Errorf("MetricDescriptor.DisplayName = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.Unit, stats.UnitMilliseconds; got != want {
t.Errorf("MetricDescriptor.Unit = %q; want %q", got, want)
}
return &metric.MetricDescriptor{
DisplayName: "display",
Description: "desc",
Unit: "unit",
Type: "hello",
DisplayName: "OpenCensus/test_view_sum",
Description: "view_description",
Unit: stats.UnitMilliseconds,
Type: "custom.googleapis.com/opencensus/test_view_sum",
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_INT64,
ValueType: metricpb.MetricDescriptor_DOUBLE,
Labels: newLabelDescriptors(vd.View.TagKeys),
}, nil
}
@@ -616,9 +632,6 @@ func TestExporter_createMeasure(t *testing.T) {
if err := e.createMeasure(ctx, vd); err != nil {
t.Errorf("Exporter.createMeasure() error = %v", err)
}
if count := getCalls; count != 1 {
t.Errorf("getMetricDescriptor needs to be called for once; called %v times", count)
}
if count := createCalls; count != 1 {
t.Errorf("createMetricDescriptor needs to be called for once; called %v times", count)
}
@@ -627,12 +640,73 @@ func TestExporter_createMeasure(t *testing.T) {
}
}
func TestExporter_makeReq_withCustomMonitoredResource(t *testing.T) {
m, err := stats.Float64("test-measure/TestExporter_makeReq_withCustomMonitoredResource", "measure desc", "unit")
if err != nil {
t.Fatal(err)
func TestExporter_createMeasure_CountAggregation(t *testing.T) {
oldCreateMetricDescriptor := createMetricDescriptor
defer func() {
createMetricDescriptor = oldCreateMetricDescriptor
}()
key, _ := tag.NewKey("test-key-one")
m := stats.Float64("test-measure/TestExporter_createMeasure", "measure desc", stats.UnitMilliseconds)
v := &view.View{
Name: "test_view_count",
Description: "view_description",
TagKeys: []tag.Key{key},
Measure: m,
Aggregation: view.Count(),
}
data := &view.CountData{Value: 0}
vd := newTestViewData(v, time.Now(), time.Now(), data, data)
e := &statsExporter{
createdViews: make(map[string]*metricpb.MetricDescriptor),
o: Options{ProjectID: "test_project"},
}
createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
if got, want := mdr.MetricDescriptor.Name, "projects/test_project/metricDescriptors/custom.googleapis.com/opencensus/test_view_count"; got != want {
t.Errorf("MetricDescriptor.Name = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.Type, "custom.googleapis.com/opencensus/test_view_count"; got != want {
t.Errorf("MetricDescriptor.Type = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.ValueType, metricpb.MetricDescriptor_INT64; got != want {
t.Errorf("MetricDescriptor.ValueType = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.MetricKind, metricpb.MetricDescriptor_CUMULATIVE; got != want {
t.Errorf("MetricDescriptor.MetricKind = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.Description, "view_description"; got != want {
t.Errorf("MetricDescriptor.Description = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.DisplayName, "OpenCensus/test_view_count"; got != want {
t.Errorf("MetricDescriptor.DisplayName = %q; want %q", got, want)
}
if got, want := mdr.MetricDescriptor.Unit, stats.UnitDimensionless; got != want {
t.Errorf("MetricDescriptor.Unit = %q; want %q", got, want)
}
return &metric.MetricDescriptor{
DisplayName: "OpenCensus/test_view_sum",
Description: "view_description",
Unit: stats.UnitDimensionless,
Type: "custom.googleapis.com/opencensus/test_view_count",
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
ValueType: metricpb.MetricDescriptor_INT64,
Labels: newLabelDescriptors(vd.View.TagKeys),
}, nil
}
ctx := context.Background()
if err := e.createMeasure(ctx, vd); err != nil {
t.Errorf("Exporter.createMeasure() error = %v", err)
}
}
func TestExporter_makeReq_withCustomMonitoredResource(t *testing.T) {
m := stats.Float64("test-measure/TestExporter_makeReq_withCustomMonitoredResource", "measure desc", "unit")
key, err := tag.NewKey("test_key")
if err != nil {
t.Fatal(err)
@@ -645,15 +719,15 @@ func TestExporter_makeReq_withCustomMonitoredResource(t *testing.T) {
Measure: m,
Aggregation: view.Count(),
}
if err := view.Subscribe(v); err != nil {
if err := view.Register(v); err != nil {
t.Fatal(err)
}
defer view.Unsubscribe(v)
defer view.Unregister(v)
start := time.Now()
end := start.Add(time.Minute)
count1 := view.CountData(10)
count2 := view.CountData(16)
count1 := &view.CountData{Value: 10}
count2 := &view.CountData{Value: 16}
taskValue := getTaskValue()
resource := &monitoredrespb.MonitoredResource{
@@ -670,7 +744,7 @@ func TestExporter_makeReq_withCustomMonitoredResource(t *testing.T) {
{
name: "count agg timeline",
projID: "proj-id",
vd: newTestViewData(v, start, end, &count1, &count2),
vd: newTestViewData(v, start, end, count1, count2),
want: []*monitoringpb.CreateTimeSeriesRequest{{
Name: monitoring.MetricProjectPath("proj-id"),
TimeSeries: []*monitoringpb.TimeSeries{

View File

@@ -31,6 +31,7 @@ import (
// Stackdriver.
//
type traceExporter struct {
o Options
projectID string
bundler *bundler.Bundler
// uploadFn defaults to uploadSpans; it can be replaced for tests.
@@ -42,7 +43,7 @@ type traceExporter struct {
var _ trace.Exporter = (*traceExporter)(nil)
func newTraceExporter(o Options) (*traceExporter, error) {
client, err := tracingclient.NewClient(context.Background(), o.ClientOptions...)
client, err := tracingclient.NewClient(context.Background(), o.TraceClientOptions...)
if err != nil {
return nil, fmt.Errorf("stackdriver: couldn't initialize trace client: %v", err)
}
@@ -53,6 +54,7 @@ func newTraceExporterWithClient(o Options, c *tracingclient.Client) *traceExport
e := &traceExporter{
projectID: o.ProjectID,
client: c,
o: o,
}
bundler := bundler.NewBundler((*trace.SpanData)(nil), func(bundle interface{}) {
e.uploadFn(bundle.([]*trace.SpanData))
@@ -93,7 +95,7 @@ func (e *traceExporter) ExportSpan(s *trace.SpanData) {
case bundler.ErrOverflow:
e.overflowLogger.log()
default:
log.Println("OpenCensus Stackdriver exporter: failed to upload span:", err)
e.o.handleError(err)
}
}
@@ -115,16 +117,18 @@ func (e *traceExporter) uploadSpans(spans []*trace.SpanData) {
req.Spans = append(req.Spans, protoFromSpanData(span, e.projectID))
}
// Create a never-sampled span to prevent traces associated with exporter.
span := trace.NewSpan("go.opencensus.io/exporter/stackdriver.uploadSpans", nil, trace.StartOptions{Sampler: trace.NeverSample()})
ctx, span := trace.StartSpan( // TODO: add timeouts
context.Background(),
"go.opencensus.io/exporter/stackdriver.uploadSpans",
trace.WithSampler(trace.NeverSample()),
)
defer span.End()
span.AddAttributes(trace.Int64Attribute("num_spans", int64(len(spans))))
ctx := trace.WithSpan(context.Background(), span) // TODO: add timeouts
err := e.client.BatchWriteSpans(ctx, &req)
if err != nil {
span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
// TODO: Allow configuring a logger for exporters.
log.Printf("OpenCensus Stackdriver exporter: failed to upload %d spans: %v", len(spans), err)
span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
e.o.handleError(err)
}
}

View File

@@ -15,7 +15,6 @@
package stackdriver
import (
"fmt"
"math"
"time"
"unicode/utf8"
@@ -144,7 +143,7 @@ func protoFromSpanData(s *trace.SpanData, projectID string) *tracepb.Span {
sp.Links.Link = make([]*tracepb.Span_Link, 0, len(s.Links))
for _, l := range s.Links {
link := &tracepb.Span_Link{
TraceId: fmt.Sprintf("projects/%s/traces/%s", projectID, l.TraceID),
TraceId: l.TraceID.String(),
SpanId: l.SpanID.String(),
Type: tracepb.Span_Link_Type(l.Type),
}
@@ -152,7 +151,6 @@ func protoFromSpanData(s *trace.SpanData, projectID string) *tracepb.Span {
sp.Links.Link = append(sp.Links.Link, link)
}
}
return sp
}

View File

@@ -57,19 +57,21 @@ func (t *testExporter) ExportSpan(s *trace.SpanData) {
}
func TestExportTrace(t *testing.T) {
ctx := context.Background()
var te testExporter
trace.RegisterExporter(&te)
defer trace.UnregisterExporter(&te)
span0 := trace.NewSpanWithRemoteParent(
ctx, span0 := trace.StartSpanWithRemoteParent(
ctx,
"span0",
trace.SpanContext{
TraceID: traceID,
SpanID: spanID,
TraceOptions: 1,
},
trace.StartOptions{})
ctx := trace.WithSpan(context.Background(), span0)
)
{
ctx1, span1 := trace.StartSpan(ctx, "span1")
{
@@ -289,7 +291,7 @@ func TestExportTrace(t *testing.T) {
Links: &tracepb.Span_Links{
Link: []*tracepb.Span_Link{
{
TraceId: "projects/testproject/traces/01020000000000000000000000000000",
TraceId: "01020000000000000000000000000000",
SpanId: "0300000000000000",
Type: tracepb.Span_Link_PARENT_LINKED_SPAN,
Attributes: &tracepb.Span_Attributes{

View File

@@ -35,9 +35,8 @@ func TestBundling(t *testing.T) {
}
trace.RegisterExporter(exporter)
trace.SetDefaultSampler(trace.AlwaysSample())
for i := 0; i < 35; i++ {
_, span := trace.StartSpan(context.Background(), "span")
_, span := trace.StartSpan(context.Background(), "span", trace.WithSampler(trace.AlwaysSample()))
span.End()
}

View File

@@ -42,7 +42,7 @@ func main() {
trace.RegisterExporter(exporter)
// For example purposes, sample every trace.
trace.SetDefaultSampler(trace.AlwaysSample())
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
ctx := context.Background()
foo(ctx)

View File

@@ -12,13 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
package internal // import "go.opencensus.io/internal"
import "time"
// UserAgent is the user agent to be added to the outgoing
// requests from the exporters.
const UserAgent = "opencensus-go-v0.4.0"
const UserAgent = "opencensus-go [0.8.0]"
// MonotonicEndTime returns the end time at present
// but offset from start, monotonically.

View File

@@ -2,5 +2,5 @@ Use the following commands to regenerate the README.
```bash
$ go get github.com/rakyll/embedmd
$ embedmd source.md > ../../README.md
$ embedmd -w ../../README.md
```

View File

@@ -1,141 +0,0 @@
# OpenCensus Libraries for Go
[![Build Status][travis-image]][travis-url]
[![Windows Build Status][appveyor-image]][appveyor-url]
[![GoDoc][godoc-image]][godoc-url]
[![Gitter chat][gitter-image]][gitter-url]
OpenCensus Go is a Go implementation of OpenCensus, a toolkit for
collecting application performance and behavior monitoring data.
Currently it consists of three major components: tags, stats, and tracing.
This project is still at a very early stage of development. The API is changing
rapidly, vendoring is recommended.
## Installation
```
$ go get -u go.opencensus.io
```
## Prerequisites
OpenCensus Go libraries require Go 1.8 or later.
## Exporters
OpenCensus can export instrumentation data to various backends.
Currently, OpenCensus supports:
* [Prometheus][exporter-prom] for stats
* [OpenZipkin][exporter-zipkin] for traces
* Stackdriver [Monitoring][exporter-stackdriver] and [Trace][exporter-stackdriver]
* [Jaeger][exporter-jaeger] for traces
* [AWS X-Ray][exporter-xray] for traces
## Overview
![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg)
In a microservices environment, a user request may go through
multiple services until there is a response. OpenCensus allows
you to instrument your services and collect diagnostics data all
through your services end-to-end.
Start with instrumenting HTTP and gRPC clients and servers,
then add additional custom instrumentation if needed.
* [HTTP guide](https://github.com/census-instrumentation/opencensus-go/tree/master/examples/http)
* [gRPC guide](https://github.com/census-instrumentation/opencensus-go/tree/master/examples/grpc)
## Tags
Tags represent propagated key-value pairs. They are propagated using context.Context
in the same process or can be encoded to be transmitted on the wire and decoded back
to a tag.Map at the destination.
Package tag provides a builder to create tag maps and put it
into the current context.
To propagate a tag map to downstream methods and RPCs, New
will add the produced tag map to the current context.
If there is already a tag map in the current context, it will be replaced.
[embedmd]:# (tags.go new)
## Stats
OpenCensus is a low-overhead framework even if instrumentation is always enabled.
In order to be so, it is optimized to make recording of data points fast
and separate from the data aggregation.
OpenCensus stats collection happens in two stages:
* Definition of measures and recording of data points
* Definition of views and aggregation of the recorded data
### Recording
Measurements are data points associated with a measure.
Recording implicitly tags the set of Measurements with the tags from the
provided context:
[embedmd]:# (stats.go record)
### Views
Views are how Measures are aggregated. You can think of them as queries over the
set of recorded data points (measurements).
Views have two parts: the tags to group by and the aggregation type used.
Currently four types of aggregations are supported:
* CountAggregation is used to count the number of times a sample was recorded.
* DistributionAggregation is used to provide a histogram of the values of the samples.
* SumAggregation is used to sum up all sample values.
* MeanAggregation is used to calculate the mean of sample values.
[embedmd]:# (stats.go aggs)
Here we create a view with the DistributionAggregation over our measure.
[embedmd]:# (stats.go view)
Subscribe begins collecting data for the view. Subscribed views' data will be
exported via the registered exporters.
## Traces
[embedmd]:# (trace.go startend)
## Profiles
OpenCensus tags can be applied as profiler labels
for users who are on Go 1.9 and above.
[embedmd]:# (tags.go profiler)
A screenshot of the CPU profile from the program above:
![CPU profile](https://i.imgur.com/jBKjlkw.png)
[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master
[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go
[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true
[appveyor-url]: https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/branch/master
[godoc-image]: https://godoc.org/go.opencensus.io?status.svg
[godoc-url]: https://godoc.org/go.opencensus.io
[gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg
[gitter-url]: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
[new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap
[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace
[exporter-prom]: https://godoc.org/go.opencensus.io/exporter/prometheus
[exporter-stackdriver]: https://godoc.org/go.opencensus.io/exporter/stackdriver
[exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin
[exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger
[exporter-xray]: https://github.com/census-instrumentation/opencensus-go-exporter-aws

View File

@@ -13,7 +13,7 @@
// limitations under the License.
// Package readme generates the README.
package readme
package readme // import "go.opencensus.io/internal/readme"
import (
"context"
@@ -29,27 +29,18 @@ import (
func statsExamples() {
ctx := context.Background()
videoSize, err := stats.Int64("my.org/video_size", "processed video size", "MB")
if err != nil {
log.Fatal(err)
}
m := stats.FindMeasure("my.org/video_size")
if m == nil {
log.Fatalln("measure not found")
}
videoSize := stats.Int64("my.org/video_size", "processed video size", "MB")
// START aggs
distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32)
countAgg := view.Count()
sumAgg := view.Sum()
meanAgg := view.Mean()
// END aggs
_, _, _, _ = distAgg, countAgg, sumAgg, meanAgg
_, _, _ = distAgg, countAgg, sumAgg
// START view
if err = view.Subscribe(&view.View{
if err := view.Register(&view.View{
Name: "my.org/video_size_distribution",
Description: "distribution of processed video size over time",
Measure: videoSize,

View File

@@ -15,7 +15,7 @@
// Package tagencoding contains the tag encoding
// used interally by the stats collector.
package tagencoding
package tagencoding // import "go.opencensus.io/internal/tagencoding"
type Values struct {
Buffer []byte

View File

@@ -11,7 +11,7 @@ It has these top-level messages:
FooRequest
FooResponse
*/
package testpb
package testpb // import "go.opencensus.io/internal/testpb"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"

View File

@@ -0,0 +1,65 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ocgrpc
import (
"testing"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func BenchmarkStatusCodeToString_OK(b *testing.B) {
st := status.New(codes.OK, "OK")
for i := 0; i < b.N; i++ {
s := statusCodeToString(st)
_ = s
}
}
func BenchmarkStatusCodeToString_Unauthenticated(b *testing.B) {
st := status.New(codes.Unauthenticated, "Unauthenticated")
for i := 0; i < b.N; i++ {
s := statusCodeToString(st)
_ = s
}
}
var codeToStringMap = map[codes.Code]string{
codes.OK: "OK",
codes.Canceled: "CANCELLED",
codes.Unknown: "UNKNOWN",
codes.InvalidArgument: "INVALID_ARGUMENT",
codes.DeadlineExceeded: "DEADLINE_EXCEEDED",
codes.NotFound: "NOT_FOUND",
codes.AlreadyExists: "ALREADY_EXISTS",
codes.PermissionDenied: "PERMISSION_DENIED",
codes.ResourceExhausted: "RESOURCE_EXHAUSTED",
codes.FailedPrecondition: "FAILED_PRECONDITION",
codes.Aborted: "ABORTED",
codes.OutOfRange: "OUT_OF_RANGE",
codes.Unimplemented: "UNIMPLEMENTED",
codes.Internal: "INTERNAL",
codes.Unavailable: "UNAVAILABLE",
codes.DataLoss: "DATA_LOSS",
codes.Unauthenticated: "UNAUTHENTICATED",
}
func BenchmarkMapAlternativeImpl_OK(b *testing.B) {
st := status.New(codes.OK, "OK")
for i := 0; i < b.N; i++ {
_ = codeToStringMap[st.Code()]
}
}

View File

@@ -44,7 +44,7 @@ func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) con
// HandleRPC implements per-RPC tracing and stats instrumentation.
func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
traceHandleRPC(ctx, rs)
c.statsHandleRPC(ctx, rs)
statsHandleRPC(ctx, rs)
}
// TagRPC implements per-RPC context management.

View File

@@ -23,14 +23,12 @@ import (
// The following variables are measures are recorded by ClientHandler:
var (
ClientErrorCount, _ = stats.Int64("grpc.io/client/error_count", "RPC Errors", stats.UnitNone)
ClientRequestBytes, _ = stats.Int64("grpc.io/client/request_bytes", "Request bytes", stats.UnitBytes)
ClientResponseBytes, _ = stats.Int64("grpc.io/client/response_bytes", "Response bytes", stats.UnitBytes)
ClientStartedCount, _ = stats.Int64("grpc.io/client/started_count", "Number of client RPCs (streams) started", stats.UnitNone)
ClientFinishedCount, _ = stats.Int64("grpc.io/client/finished_count", "Number of client RPCs (streams) finished", stats.UnitNone)
ClientRequestCount, _ = stats.Int64("grpc.io/client/request_count", "Number of client RPC request messages", stats.UnitNone)
ClientResponseCount, _ = stats.Int64("grpc.io/client/response_count", "Number of client RPC response messages", stats.UnitNone)
ClientRoundTripLatency, _ = stats.Float64("grpc.io/client/roundtrip_latency", "RPC roundtrip latency in msecs", stats.UnitMilliseconds)
ClientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless)
ClientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes)
ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless)
ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes)
ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds)
ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds)
)
// Predefined views may be subscribed to collect data for the above measures.
@@ -38,63 +36,78 @@ var (
// package. These are declared as a convenience only; none are subscribed by
// default.
var (
ClientErrorCountView = &view.View{
Name: "grpc.io/client/error_count",
Description: "RPC Errors",
TagKeys: []tag.Key{KeyStatus, KeyMethod},
Measure: ClientErrorCount,
Aggregation: view.Mean(),
ClientSentBytesPerRPCView = &view.View{
Measure: ClientSentBytesPerRPC,
Name: "grpc.io/client/sent_bytes_per_rpc",
Description: "Distribution of bytes sent per RPC, by method.",
TagKeys: []tag.Key{KeyClientMethod},
Aggregation: DefaultBytesDistribution,
}
ClientRoundTripLatencyView = &view.View{
ClientReceivedBytesPerRPCView = &view.View{
Measure: ClientReceivedBytesPerRPC,
Name: "grpc.io/client/received_bytes_per_rpc",
Description: "Distribution of bytes received per RPC, by method.",
TagKeys: []tag.Key{KeyClientMethod},
Aggregation: DefaultBytesDistribution,
}
ClientRoundtripLatencyView = &view.View{
Measure: ClientRoundtripLatency,
Name: "grpc.io/client/roundtrip_latency",
Description: "Latency in msecs",
TagKeys: []tag.Key{KeyMethod},
Measure: ClientRoundTripLatency,
Description: "Distribution of round-trip latency, by method.",
TagKeys: []tag.Key{KeyClientMethod},
Aggregation: DefaultMillisecondsDistribution,
}
ClientRequestBytesView = &view.View{
Name: "grpc.io/client/request_bytes",
Description: "Request bytes",
TagKeys: []tag.Key{KeyMethod},
Measure: ClientRequestBytes,
Aggregation: DefaultBytesDistribution,
ClientCompletedRPCsView = &view.View{
Measure: ClientRoundtripLatency,
Name: "grpc.io/client/completed_rpcs",
Description: "Count of RPCs by method and status.",
TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
Aggregation: view.Count(),
}
ClientResponseBytesView = &view.View{
Name: "grpc.io/client/response_bytes",
Description: "Response bytes",
TagKeys: []tag.Key{KeyMethod},
Measure: ClientResponseBytes,
Aggregation: DefaultBytesDistribution,
ClientSentMessagesPerRPCView = &view.View{
Measure: ClientSentMessagesPerRPC,
Name: "grpc.io/client/sent_messages_per_rpc",
Description: "Distribution of sent messages count per RPC, by method.",
TagKeys: []tag.Key{KeyClientMethod},
Aggregation: DefaultMessageCountDistribution,
}
ClientReceivedMessagesPerRPCView = &view.View{
Measure: ClientReceivedMessagesPerRPC,
Name: "grpc.io/client/received_messages_per_rpc",
Description: "Distribution of received messages count per RPC, by method.",
TagKeys: []tag.Key{KeyClientMethod},
Aggregation: DefaultMessageCountDistribution,
}
ClientServerLatencyView = &view.View{
Measure: ClientServerLatency,
Name: "grpc.io/client/server_latency",
Description: "Distribution of server latency as viewed by client, by method.",
TagKeys: []tag.Key{KeyClientMethod},
Aggregation: DefaultMillisecondsDistribution,
}
// Deprecated: This view is going to be removed, if you need it please define it
// yourself.
ClientRequestCountView = &view.View{
Name: "grpc.io/client/request_count",
Description: "Count of request messages per client RPC",
TagKeys: []tag.Key{KeyMethod},
Measure: ClientRequestCount,
Aggregation: DefaultMessageCountDistribution,
}
ClientResponseCountView = &view.View{
Name: "grpc.io/client/response_count",
Description: "Count of response messages per client RPC",
TagKeys: []tag.Key{KeyMethod},
Measure: ClientResponseCount,
Aggregation: DefaultMessageCountDistribution,
Name: "Count of request messages per client RPC",
TagKeys: []tag.Key{KeyClientMethod},
Measure: ClientRoundtripLatency,
Aggregation: view.Count(),
}
)
// DefaultClientViews are the default client views provided by this package.
var DefaultClientViews = []*view.View{
ClientErrorCountView,
ClientRoundTripLatencyView,
ClientRequestBytesView,
ClientResponseBytesView,
ClientRequestCountView,
ClientResponseCountView,
ClientSentBytesPerRPCView,
ClientReceivedBytesPerRPCView,
ClientRoundtripLatencyView,
ClientCompletedRPCsView,
}
// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count.

View File

@@ -1,71 +0,0 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package ocgrpc
import (
"reflect"
"runtime"
"testing"
"go.opencensus.io/stats/view"
)
func TestViewsAggregationsConform(t *testing.T) {
// See Issue https://github.com/census-instrumentation/opencensus-go/issues/210.
// This test ensures that the types of our Views match up with those
// from the Java reference at
// https://github.com/census-instrumentation/opencensus-java/blob/2b464864e3dd3f80e8e4c9dc72fccc225444a939/contrib/grpc_metrics/src/main/java/io/opencensus/contrib/grpc/metrics/RpcViewConstants.java#L113-L658
// Add any other defined views to be type checked during tests to ensure we don't regress.
assertTypeOf := func(v *view.View, wantSample *view.Aggregation) {
aggregation := v.Aggregation
gotValue := reflect.ValueOf(aggregation)
wantValue := reflect.ValueOf(wantSample)
if gotValue.Type() != wantValue.Type() {
_, _, line, _ := runtime.Caller(1)
t.Errorf("Item on line: %d got %T want %T", line, aggregation, wantSample)
}
}
assertTypeOf(ClientErrorCountView, view.Mean())
assertTypeOf(ClientRoundTripLatencyView, view.Distribution())
assertTypeOf(ClientRequestBytesView, view.Distribution())
assertTypeOf(ClientResponseBytesView, view.Distribution())
assertTypeOf(ClientRequestCountView, view.Distribution())
assertTypeOf(ClientResponseCountView, view.Distribution())
}
func TestStrictViewNames(t *testing.T) {
alreadySeen := make(map[string]int)
assertName := func(v *view.View, want string) {
_, _, line, _ := runtime.Caller(1)
if prevLine, ok := alreadySeen[v.Name]; ok {
t.Errorf("Item's Name on line %d was already used on line %d", line, prevLine)
return
}
if got := v.Name; got != want {
t.Errorf("Item on line: %d got %q want %q", line, got, want)
}
alreadySeen[v.Name] = line
}
assertName(ClientErrorCountView, "grpc.io/client/error_count")
assertName(ClientRoundTripLatencyView, "grpc.io/client/roundtrip_latency")
assertName(ClientRequestBytesView, "grpc.io/client/request_bytes")
assertName(ClientResponseBytesView, "grpc.io/client/response_bytes")
assertName(ClientRequestCountView, "grpc.io/client/request_count")
assertName(ClientResponseCountView, "grpc.io/client/response_count")
}

View File

@@ -0,0 +1,152 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package ocgrpc
import (
"regexp"
"strings"
"testing"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
)
var colSep = regexp.MustCompile(`\s*\|\s*`)
func TestSpecClientMeasures(t *testing.T) {
spec := `
| Measure name | Unit | Description |
|------------------------------------------|------|-----------------------------------------------------------------------------------------------|
| grpc.io/client/sent_messages_per_rpc | 1 | Number of messages sent in the RPC (always 1 for non-streaming RPCs). |
| grpc.io/client/sent_bytes_per_rpc | By | Total bytes sent across all request messages per RPC. |
| grpc.io/client/received_messages_per_rpc | 1 | Number of response messages received per RPC (always 1 for non-streaming RPCs). |
| grpc.io/client/received_bytes_per_rpc | By | Total bytes received across all response messages per RPC. |
| grpc.io/client/roundtrip_latency | ms | Time between first byte of request sent to last byte of response received, or terminal error. |
| grpc.io/client/server_latency | ms | Propagated from the server and should have the same value as "grpc.io/server/latency". |`
lines := strings.Split(spec, "\n")[3:]
type measureDef struct {
name string
unit string
desc string
}
measureDefs := make([]measureDef, 0, len(lines))
for _, line := range lines {
cols := colSep.Split(line, -1)[1:]
if len(cols) < 3 {
t.Fatalf("Invalid config line %#v", cols)
}
measureDefs = append(measureDefs, measureDef{cols[0], cols[1], cols[2]})
}
gotMeasures := []stats.Measure{
ClientSentMessagesPerRPC,
ClientSentBytesPerRPC,
ClientReceivedMessagesPerRPC,
ClientReceivedBytesPerRPC,
ClientRoundtripLatency,
ClientServerLatency,
}
if got, want := len(gotMeasures), len(measureDefs); got != want {
t.Fatalf("len(gotMeasures) = %d; want %d", got, want)
}
for i, m := range gotMeasures {
defn := measureDefs[i]
if got, want := m.Name(), defn.name; got != want {
t.Errorf("Name = %q; want %q", got, want)
}
if got, want := m.Unit(), defn.unit; got != want {
t.Errorf("%q: Unit = %q; want %q", defn.name, got, want)
}
if got, want := m.Description(), defn.desc; got != want {
t.Errorf("%q: Description = %q; want %q", defn.name, got, want)
}
}
}
func TestSpecClientViews(t *testing.T) {
defaultViewsSpec := `
| View name | Measure suffix | Aggregation | Tags |
|---------------------------------------|------------------------|--------------|------------------------------|
| grpc.io/client/sent_bytes_per_rpc | sent_bytes_per_rpc | distribution | client_method |
| grpc.io/client/received_bytes_per_rpc | received_bytes_per_rpc | distribution | client_method |
| grpc.io/client/roundtrip_latency | roundtrip_latency | distribution | client_method |
| grpc.io/client/completed_rpcs | roundtrip_latency | count | client_method, client_status |`
extraViewsSpec := `
| View name | Measure suffix | Aggregation | Tags suffix |
|------------------------------------------|---------------------------|--------------|---------------|
| grpc.io/client/sent_messages_per_rpc | sent_messages_per_rpc | distribution | client_method |
| grpc.io/client/received_messages_per_rpc | received_messages_per_rpc | distribution | client_method |
| grpc.io/client/server_latency | server_latency | distribution | client_method |`
lines := strings.Split(defaultViewsSpec, "\n")[3:]
lines = append(lines, strings.Split(extraViewsSpec, "\n")[3:]...)
type viewDef struct {
name string
measureSuffix string
aggregation string
tags string
}
viewDefs := make([]viewDef, 0, len(lines))
for _, line := range lines {
cols := colSep.Split(line, -1)[1:]
if len(cols) < 4 {
t.Fatalf("Invalid config line %#v", cols)
}
viewDefs = append(viewDefs, viewDef{cols[0], cols[1], cols[2], cols[3]})
}
views := DefaultClientViews
views = append(views, ClientSentMessagesPerRPCView, ClientReceivedMessagesPerRPCView, ClientServerLatencyView)
if got, want := len(views), len(viewDefs); got != want {
t.Fatalf("len(gotMeasures) = %d; want %d", got, want)
}
for i, v := range views {
defn := viewDefs[i]
if got, want := v.Name, defn.name; got != want {
t.Errorf("Name = %q; want %q", got, want)
}
if got, want := v.Measure.Name(), "grpc.io/client/"+defn.measureSuffix; got != want {
t.Errorf("%q: Measure.Name = %q; want %q", defn.name, got, want)
}
switch v.Aggregation.Type {
case view.AggTypeDistribution:
if got, want := "distribution", defn.aggregation; got != want {
t.Errorf("%q: Description = %q; want %q", defn.name, got, want)
}
case view.AggTypeCount:
if got, want := "count", defn.aggregation; got != want {
t.Errorf("%q: Description = %q; want %q", defn.name, got, want)
}
default:
t.Errorf("Invalid aggregation type")
}
wantTags := strings.Split(defn.tags, ", ")
if got, want := len(v.TagKeys), len(wantTags); got != want {
t.Errorf("len(TagKeys) = %d; want %d", got, want)
}
for j := range wantTags {
if got, want := v.TagKeys[j].Name(), "grpc_"+wantTags[j]; got != want {
t.Errorf("TagKeys[%d].Name() = %q; want %q", j, got, want)
}
}
}
}

View File

@@ -16,15 +16,12 @@
package ocgrpc
import (
"sync/atomic"
"time"
ocstats "go.opencensus.io/stats"
"go.opencensus.io/tag"
"golang.org/x/net/context"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
)
// statsTagRPC gets the tag.Map populated by the application code, serializes
@@ -48,81 +45,5 @@ func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo)
ctx = stats.SetTags(ctx, encoded)
}
// TODO(acetechnologist): should we be recording this later? What is the
// point of updating d.reqLen & d.reqCount if we update now?
record(ctx, d, "", ClientStartedCount.M(1))
return context.WithValue(ctx, grpcClientRPCKey, d)
}
// statsHandleRPC processes the RPC events.
func (h *ClientHandler) statsHandleRPC(ctx context.Context, s stats.RPCStats) {
switch st := s.(type) {
case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer:
// do nothing for client
case *stats.OutPayload:
h.handleRPCOutPayload(ctx, st)
case *stats.InPayload:
h.handleRPCInPayload(ctx, st)
case *stats.End:
h.handleRPCEnd(ctx, st)
default:
grpclog.Infof("unexpected stats: %T", st)
}
}
func (h *ClientHandler) handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) {
d, ok := ctx.Value(grpcClientRPCKey).(*rpcData)
if !ok {
if grpclog.V(2) {
grpclog.Infoln("clientHandler.handleRPCOutPayload failed to retrieve *rpcData from context")
}
return
}
record(ctx, d, "", ClientRequestBytes.M(int64(s.Length)))
atomic.AddInt64(&d.reqCount, 1)
}
func (h *ClientHandler) handleRPCInPayload(ctx context.Context, s *stats.InPayload) {
d, ok := ctx.Value(grpcClientRPCKey).(*rpcData)
if !ok {
if grpclog.V(2) {
grpclog.Infoln("failed to retrieve *rpcData from context")
}
return
}
record(ctx, d, "", ClientResponseBytes.M(int64(s.Length)))
atomic.AddInt64(&d.respCount, 1)
}
func (h *ClientHandler) handleRPCEnd(ctx context.Context, s *stats.End) {
d, ok := ctx.Value(grpcClientRPCKey).(*rpcData)
if !ok {
if grpclog.V(2) {
grpclog.Infoln("failed to retrieve *rpcData from context")
}
return
}
elapsedTime := time.Since(d.startTime)
reqCount := atomic.LoadInt64(&d.reqCount)
respCount := atomic.LoadInt64(&d.respCount)
m := []ocstats.Measurement{
ClientRequestCount.M(reqCount),
ClientResponseCount.M(respCount),
ClientFinishedCount.M(1),
ClientRoundTripLatency.M(float64(elapsedTime) / float64(time.Millisecond)),
}
var st string
if s.Error != nil {
s, ok := status.FromError(s.Error)
if ok {
st = s.Code().String()
}
m = append(m, ClientErrorCount.M(1))
}
record(ctx, d, st, m...)
return context.WithValue(ctx, rpcDataKey, d)
}

View File

@@ -58,8 +58,8 @@ func TestClientDefaultCollections(t *testing.T) {
}
tcs := []testCase{
{
"1",
[]*rpc{
label: "1",
rpcs: []*rpc{
{
[]tagPair{{k1, "v1"}},
&stats.RPCTagInfo{FullMethodName: "/package.service/method"},
@@ -72,46 +72,46 @@ func TestClientDefaultCollections(t *testing.T) {
&stats.End{Error: nil},
},
},
[]*wantData{
wants: []*wantData{
{
func() *view.View { return ClientRequestCountView },
func() *view.View { return ClientSentMessagesPerRPCView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyMethod, Value: "package.service/method"},
{Key: KeyClientMethod, Value: "package.service/method"},
},
Data: newDistributionData([]int64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0),
},
},
},
{
func() *view.View { return ClientResponseCountView },
func() *view.View { return ClientReceivedMessagesPerRPCView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyMethod, Value: "package.service/method"},
{Key: KeyClientMethod, Value: "package.service/method"},
},
Data: newDistributionData([]int64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0),
},
},
},
{
func() *view.View { return ClientRequestBytesView },
func() *view.View { return ClientSentBytesPerRPCView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyMethod, Value: "package.service/method"},
{Key: KeyClientMethod, Value: "package.service/method"},
},
Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0),
},
},
},
{
func() *view.View { return ClientResponseBytesView },
func() *view.View { return ClientReceivedBytesPerRPCView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyMethod, Value: "package.service/method"},
{Key: KeyClientMethod, Value: "package.service/method"},
},
Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0),
},
@@ -120,8 +120,8 @@ func TestClientDefaultCollections(t *testing.T) {
},
},
{
"2",
[]*rpc{
label: "2",
rpcs: []*rpc{
{
[]tagPair{{k1, "v1"}},
&stats.RPCTagInfo{FullMethodName: "/package.service/method"},
@@ -149,36 +149,24 @@ func TestClientDefaultCollections(t *testing.T) {
&stats.End{Error: status.Error(codes.Canceled, "canceled")},
},
},
[]*wantData{
wants: []*wantData{
{
func() *view.View { return ClientErrorCountView },
func() *view.View { return ClientSentMessagesPerRPCView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyStatus, Value: "Canceled"},
{Key: KeyMethod, Value: "package.service/method"},
},
Data: newMeanData(1, 1),
},
},
},
{
func() *view.View { return ClientRequestCountView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyMethod, Value: "package.service/method"},
{Key: KeyClientMethod, Value: "package.service/method"},
},
Data: newDistributionData([]int64{0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 2, 3, 2.5, 0.5),
},
},
},
{
func() *view.View { return ClientResponseCountView },
func() *view.View { return ClientReceivedMessagesPerRPCView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyMethod, Value: "package.service/method"},
{Key: KeyClientMethod, Value: "package.service/method"},
},
Data: newDistributionData([]int64{0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 1, 2, 1.5, 0.5),
},
@@ -187,8 +175,8 @@ func TestClientDefaultCollections(t *testing.T) {
},
},
{
"3",
[]*rpc{
label: "3",
rpcs: []*rpc{
{
[]tagPair{{k1, "v1"}},
&stats.RPCTagInfo{FullMethodName: "/package.service/method"},
@@ -229,67 +217,48 @@ func TestClientDefaultCollections(t *testing.T) {
&stats.End{Error: status.Error(codes.Aborted, "aborted")},
},
},
[]*wantData{
wants: []*wantData{
{
func() *view.View { return ClientErrorCountView },
func() *view.View { return ClientSentMessagesPerRPCView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyStatus, Value: "Canceled"},
{Key: KeyMethod, Value: "package.service/method"},
},
Data: newMeanData(1, 1),
},
{
Tags: []tag.Tag{
{Key: KeyStatus, Value: "Aborted"},
{Key: KeyMethod, Value: "package.service/method"},
},
Data: newMeanData(1, 1),
},
},
},
{
func() *view.View { return ClientRequestCountView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyMethod, Value: "package.service/method"},
{Key: KeyClientMethod, Value: "package.service/method"},
},
Data: newDistributionData([]int64{0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 2, 3, 2.666666666, 0.333333333*2),
},
},
},
{
func() *view.View { return ClientResponseCountView },
func() *view.View { return ClientReceivedMessagesPerRPCView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyMethod, Value: "package.service/method"},
{Key: KeyClientMethod, Value: "package.service/method"},
},
Data: newDistributionData([]int64{0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 2, 1.333333333, 0.333333333*2),
},
},
},
{
func() *view.View { return ClientRequestBytesView },
func() *view.View { return ClientSentBytesPerRPCView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyMethod, Value: "package.service/method"},
{Key: KeyClientMethod, Value: "package.service/method"},
},
Data: newDistributionData([]int64{0, 1, 1, 1, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 1, 65536, 13696.125, 481423542.982143*7),
Data: newDistributionData([]int64{0, 0, 0, 0, 0, 2 /*16384*/, 1 /*65536*/, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 20480, 66561, 36523, 1.355519318e+09),
},
},
},
{
func() *view.View { return ClientResponseBytesView },
func() *view.View { return ClientReceivedBytesPerRPCView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyMethod, Value: "package.service/method"},
{Key: KeyClientMethod, Value: "package.service/method"},
},
Data: newDistributionData([]int64{0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 1, 16384, 4864.25, 59678208.25*3),
Data: newDistributionData([]int64{0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 18432, 6485.666667, 2.1459558466666666e+08),
},
},
},
@@ -297,16 +266,25 @@ func TestClientDefaultCollections(t *testing.T) {
},
}
views := []*view.View{
ClientSentBytesPerRPCView,
ClientReceivedBytesPerRPCView,
ClientRoundtripLatencyView,
ClientCompletedRPCsView,
ClientSentMessagesPerRPCView,
ClientReceivedMessagesPerRPCView,
}
for _, tc := range tcs {
// Register views.
if err := view.Subscribe(DefaultClientViews...); err != nil {
if err := view.Register(views...); err != nil {
t.Error(err)
}
h := &ClientHandler{}
h.StartOptions.Sampler = trace.NeverSample()
for _, rpc := range tc.rpcs {
mods := []tag.Mutator{}
var mods []tag.Mutator
for _, t := range rpc.tags {
mods = append(mods, tag.Upsert(t.k, t.v))
}
@@ -318,11 +296,14 @@ func TestClientDefaultCollections(t *testing.T) {
ctx = stats.SetTags(context.Background(), encoded)
ctx = h.TagRPC(ctx, rpc.tagInfo)
for _, out := range rpc.outPayloads {
out.Client = true
h.HandleRPC(ctx, out)
}
for _, in := range rpc.inPayloads {
in.Client = true
h.HandleRPC(ctx, in)
}
rpc.end.Client = true
h.HandleRPC(ctx, rpc.end)
}
@@ -349,7 +330,7 @@ func TestClientDefaultCollections(t *testing.T) {
}
// Unregister views to cleanup.
view.Unsubscribe(DefaultClientViews...)
view.Unregister(views...)
}
}

View File

@@ -0,0 +1,239 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package ocgrpc_test
import (
"context"
"io"
"reflect"
"testing"
"go.opencensus.io/internal/testpb"
"go.opencensus.io/plugin/ocgrpc"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
)
var keyAccountId, _ = tag.NewKey("account_id")
func TestEndToEnd_Single(t *testing.T) {
view.Register(ocgrpc.DefaultClientViews...)
defer view.Unregister(ocgrpc.DefaultClientViews...)
view.Register(ocgrpc.DefaultServerViews...)
defer view.Unregister(ocgrpc.DefaultServerViews...)
extraViews := []*view.View{
ocgrpc.ServerReceivedMessagesPerRPCView,
ocgrpc.ClientReceivedMessagesPerRPCView,
ocgrpc.ServerSentMessagesPerRPCView,
ocgrpc.ClientSentMessagesPerRPCView,
}
view.Register(extraViews...)
defer view.Unregister(extraViews...)
client, done := testpb.NewTestClient(t)
defer done()
ctx := context.Background()
ctx, _ = tag.New(ctx, tag.Insert(keyAccountId, "abc123"))
var (
clientMethodTag = tag.Tag{Key: ocgrpc.KeyClientMethod, Value: "testpb.Foo/Single"}
serverMethodTag = tag.Tag{Key: ocgrpc.KeyServerMethod, Value: "testpb.Foo/Single"}
clientStatusOKTag = tag.Tag{Key: ocgrpc.KeyClientStatus, Value: "OK"}
serverStatusOKTag = tag.Tag{Key: ocgrpc.KeyServerStatus, Value: "OK"}
serverStatusUnknownTag = tag.Tag{Key: ocgrpc.KeyClientStatus, Value: "UNKNOWN"}
clientStatusUnknownTag = tag.Tag{Key: ocgrpc.KeyServerStatus, Value: "UNKNOWN"}
)
_, err := client.Single(ctx, &testpb.FooRequest{})
if err != nil {
t.Fatal(err)
}
checkCount(t, ocgrpc.ClientCompletedRPCsView, 1, clientMethodTag, clientStatusOKTag)
checkCount(t, ocgrpc.ServerCompletedRPCsView, 1, serverMethodTag, serverStatusOKTag)
_, _ = client.Single(ctx, &testpb.FooRequest{Fail: true})
checkCount(t, ocgrpc.ClientCompletedRPCsView, 1, clientMethodTag, serverStatusUnknownTag)
checkCount(t, ocgrpc.ServerCompletedRPCsView, 1, serverMethodTag, clientStatusUnknownTag)
tcs := []struct {
v *view.View
tags []tag.Tag
mean float64
}{
{ocgrpc.ClientSentMessagesPerRPCView, []tag.Tag{clientMethodTag}, 1.0},
{ocgrpc.ServerReceivedMessagesPerRPCView, []tag.Tag{serverMethodTag}, 1.0},
{ocgrpc.ClientReceivedMessagesPerRPCView, []tag.Tag{clientMethodTag}, 0.5},
{ocgrpc.ServerSentMessagesPerRPCView, []tag.Tag{serverMethodTag}, 0.5},
{ocgrpc.ClientSentBytesPerRPCView, []tag.Tag{clientMethodTag}, 1.0},
{ocgrpc.ServerReceivedBytesPerRPCView, []tag.Tag{serverMethodTag}, 1.0},
{ocgrpc.ClientReceivedBytesPerRPCView, []tag.Tag{clientMethodTag}, 0.0},
{ocgrpc.ServerSentBytesPerRPCView, []tag.Tag{serverMethodTag}, 0.0},
}
for _, tt := range tcs {
t.Run("view="+tt.v.Name, func(t *testing.T) {
dist := getDistribution(t, tt.v, tt.tags...)
if got, want := dist.Count, int64(2); got != want {
t.Errorf("Count = %d; want %d", got, want)
}
if got, want := dist.Mean, tt.mean; got != want {
t.Errorf("Mean = %v; want %v", got, want)
}
})
}
}
func TestEndToEnd_Stream(t *testing.T) {
view.Register(ocgrpc.DefaultClientViews...)
defer view.Unregister(ocgrpc.DefaultClientViews...)
view.Register(ocgrpc.DefaultServerViews...)
defer view.Unregister(ocgrpc.DefaultServerViews...)
extraViews := []*view.View{
ocgrpc.ServerReceivedMessagesPerRPCView,
ocgrpc.ClientReceivedMessagesPerRPCView,
ocgrpc.ServerSentMessagesPerRPCView,
ocgrpc.ClientSentMessagesPerRPCView,
}
view.Register(extraViews...)
defer view.Unregister(extraViews...)
client, done := testpb.NewTestClient(t)
defer done()
ctx := context.Background()
ctx, _ = tag.New(ctx, tag.Insert(keyAccountId, "abc123"))
var (
clientMethodTag = tag.Tag{Key: ocgrpc.KeyClientMethod, Value: "testpb.Foo/Multiple"}
serverMethodTag = tag.Tag{Key: ocgrpc.KeyServerMethod, Value: "testpb.Foo/Multiple"}
clientStatusOKTag = tag.Tag{Key: ocgrpc.KeyClientStatus, Value: "OK"}
serverStatusOKTag = tag.Tag{Key: ocgrpc.KeyServerStatus, Value: "OK"}
)
const msgCount = 3
stream, err := client.Multiple(ctx)
if err != nil {
t.Fatal(err)
}
for i := 0; i < msgCount; i++ {
stream.Send(&testpb.FooRequest{})
_, err := stream.Recv()
if err != nil {
t.Fatal(err)
}
}
if err := stream.CloseSend(); err != nil {
t.Fatal(err)
}
if _, err = stream.Recv(); err != io.EOF {
t.Fatal(err)
}
checkCount(t, ocgrpc.ClientCompletedRPCsView, 1, clientMethodTag, clientStatusOKTag)
checkCount(t, ocgrpc.ServerCompletedRPCsView, 1, serverMethodTag, serverStatusOKTag)
tcs := []struct {
v *view.View
tag tag.Tag
}{
{ocgrpc.ClientSentMessagesPerRPCView, clientMethodTag},
{ocgrpc.ServerReceivedMessagesPerRPCView, serverMethodTag},
{ocgrpc.ServerSentMessagesPerRPCView, serverMethodTag},
{ocgrpc.ClientReceivedMessagesPerRPCView, clientMethodTag},
}
for _, tt := range tcs {
serverSent := getDistribution(t, tt.v, tt.tag)
if got, want := serverSent.Mean, float64(msgCount); got != want {
t.Errorf("%q.Count = %v; want %v", ocgrpc.ServerSentMessagesPerRPCView.Name, got, want)
}
}
}
func checkCount(t *testing.T, v *view.View, want int64, tags ...tag.Tag) {
if got, ok := getCount(t, v, tags...); ok && got != want {
t.Errorf("View[name=%q].Row[tags=%v].Data = %d; want %d", v.Name, tags, got, want)
}
}
func getCount(t *testing.T, v *view.View, tags ...tag.Tag) (int64, bool) {
if len(tags) != len(v.TagKeys) {
t.Errorf("Invalid tag specification, want %#v tags got %#v", v.TagKeys, tags)
return 0, false
}
for i := range v.TagKeys {
if tags[i].Key != v.TagKeys[i] {
t.Errorf("Invalid tag specification, want %#v tags got %#v", v.TagKeys, tags)
return 0, false
}
}
rows, err := view.RetrieveData(v.Name)
if err != nil {
t.Fatal(err)
}
var foundRow *view.Row
for _, row := range rows {
if reflect.DeepEqual(row.Tags, tags) {
foundRow = row
break
}
}
if foundRow == nil {
var gotTags [][]tag.Tag
for _, row := range rows {
gotTags = append(gotTags, row.Tags)
}
t.Errorf("Failed to find row with keys %v among:\n%v", tags, gotTags)
return 0, false
}
return foundRow.Data.(*view.CountData).Value, true
}
func getDistribution(t *testing.T, v *view.View, tags ...tag.Tag) *view.DistributionData {
if len(tags) != len(v.TagKeys) {
t.Fatalf("Invalid tag specification, want %#v tags got %#v", v.TagKeys, tags)
return nil
}
for i := range v.TagKeys {
if tags[i].Key != v.TagKeys[i] {
t.Fatalf("Invalid tag specification, want %#v tags got %#v", v.TagKeys, tags)
return nil
}
}
rows, err := view.RetrieveData(v.Name)
if err != nil {
t.Fatal(err)
}
var foundRow *view.Row
for _, row := range rows {
if reflect.DeepEqual(row.Tags, tags) {
foundRow = row
break
}
}
if foundRow == nil {
var gotTags [][]tag.Tag
for _, row := range rows {
gotTags = append(gotTags, row.Tags)
}
t.Fatalf("Failed to find row with keys %v among:\n%v", tags, gotTags)
return nil
}
return foundRow.Data.(*view.DistributionData)
}

View File

@@ -24,7 +24,7 @@ import (
func ExampleClientHandler() {
// Subscribe views to collect data.
if err := view.Subscribe(ocgrpc.DefaultClientViews...); err != nil {
if err := view.Register(ocgrpc.DefaultClientViews...); err != nil {
log.Fatal(err)
}
@@ -39,7 +39,7 @@ func ExampleClientHandler() {
func ExampleServerHandler() {
// Subscribe to views to collect data.
if err := view.Subscribe(ocgrpc.DefaultServerViews...); err != nil {
if err := view.Register(ocgrpc.DefaultServerViews...); err != nil {
log.Fatal(err)
}

View File

@@ -31,14 +31,12 @@ func TestClientHandler(t *testing.T) {
ctx := context.Background()
te := &traceExporter{}
trace.RegisterExporter(te)
if err := ClientRequestCountView.Subscribe(); err != nil {
if err := view.Register(ClientSentMessagesPerRPCView); err != nil {
t.Fatal(err)
}
defer view.Unregister(ClientSentMessagesPerRPCView)
span := trace.NewSpan("/foo", nil, trace.StartOptions{
Sampler: trace.AlwaysSample(),
})
ctx = trace.WithSpan(ctx, span)
ctx, _ = trace.StartSpan(ctx, "/foo", trace.WithSampler(trace.AlwaysSample()))
var handler ClientHandler
ctx = handler.TagRPC(ctx, &stats.RPCTagInfo{
@@ -53,7 +51,7 @@ func TestClientHandler(t *testing.T) {
EndTime: time.Now(),
})
stats, err := view.RetrieveData(ClientRequestCountView.Name)
stats, err := view.RetrieveData(ClientSentMessagesPerRPCView.Name)
if err != nil {
t.Fatal(err)
}
@@ -65,9 +63,6 @@ func TestClientHandler(t *testing.T) {
if got, want := len(traces), 1; got != want {
t.Errorf("Got %v traces; want %v", got, want)
}
// Cleanup.
view.Unsubscribe(ClientErrorCountView)
}
func TestServerHandler(t *testing.T) {
@@ -94,7 +89,7 @@ func TestServerHandler(t *testing.T) {
te := &traceExporter{}
trace.RegisterExporter(te)
if err := ServerRequestCountView.Subscribe(); err != nil {
if err := view.Register(ServerCompletedRPCsView); err != nil {
t.Fatal(err)
}
@@ -112,7 +107,7 @@ func TestServerHandler(t *testing.T) {
EndTime: time.Now(),
})
rows, err := view.RetrieveData(ServerRequestCountView.Name)
rows, err := view.RetrieveData(ServerCompletedRPCsView.Name)
if err != nil {
t.Fatal(err)
}
@@ -126,7 +121,7 @@ func TestServerHandler(t *testing.T) {
}
// Cleanup.
view.Unsubscribe(ServerRequestCountView)
view.Unregister(ServerCompletedRPCsView)
})
}
}

View File

@@ -69,7 +69,7 @@ func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) con
// HandleRPC implements per-RPC tracing and stats instrumentation.
func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
traceHandleRPC(ctx, rs)
s.statsHandleRPC(ctx, rs)
statsHandleRPC(ctx, rs)
}
// TagRPC implements per-RPC context management.

View File

@@ -23,14 +23,11 @@ import (
// The following variables are measures are recorded by ServerHandler:
var (
ServerErrorCount, _ = stats.Int64("grpc.io/server/error_count", "RPC Errors", stats.UnitNone)
ServerServerElapsedTime, _ = stats.Float64("grpc.io/server/server_elapsed_time", "Server elapsed time in msecs", stats.UnitMilliseconds)
ServerRequestBytes, _ = stats.Int64("grpc.io/server/request_bytes", "Request bytes", stats.UnitBytes)
ServerResponseBytes, _ = stats.Int64("grpc.io/server/response_bytes", "Response bytes", stats.UnitBytes)
ServerStartedCount, _ = stats.Int64("grpc.io/server/started_count", "Number of server RPCs (streams) started", stats.UnitNone)
ServerFinishedCount, _ = stats.Int64("grpc.io/server/finished_count", "Number of server RPCs (streams) finished", stats.UnitNone)
ServerRequestCount, _ = stats.Int64("grpc.io/server/request_count", "Number of server RPC request messages", stats.UnitNone)
ServerResponseCount, _ = stats.Int64("grpc.io/server/response_count", "Number of server RPC response messages", stats.UnitNone)
ServerReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless)
ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes)
ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless)
ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes)
ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds)
)
// TODO(acetechnologist): This is temporary and will need to be replaced by a
@@ -42,63 +39,59 @@ var (
// package. These are declared as a convenience only; none are subscribed by
// default.
var (
ServerErrorCountView = &view.View{
Name: "grpc.io/server/error_count",
Description: "RPC Errors",
TagKeys: []tag.Key{KeyMethod, KeyStatus},
Measure: ServerErrorCount,
Aggregation: view.Count(),
ServerReceivedBytesPerRPCView = &view.View{
Name: "grpc.io/server/received_bytes_per_rpc",
Description: "Distribution of received bytes per RPC, by method.",
Measure: ServerReceivedBytesPerRPC,
TagKeys: []tag.Key{KeyServerMethod},
Aggregation: DefaultBytesDistribution,
}
ServerServerElapsedTimeView = &view.View{
Name: "grpc.io/server/server_elapsed_time",
Description: "Server elapsed time in msecs",
TagKeys: []tag.Key{KeyMethod},
Measure: ServerServerElapsedTime,
ServerSentBytesPerRPCView = &view.View{
Name: "grpc.io/server/sent_bytes_per_rpc",
Description: "Distribution of total sent bytes per RPC, by method.",
Measure: ServerSentBytesPerRPC,
TagKeys: []tag.Key{KeyServerMethod},
Aggregation: DefaultBytesDistribution,
}
ServerLatencyView = &view.View{
Name: "grpc.io/server/server_latency",
Description: "Distribution of server latency in milliseconds, by method.",
TagKeys: []tag.Key{KeyServerMethod},
Measure: ServerLatency,
Aggregation: DefaultMillisecondsDistribution,
}
ServerRequestBytesView = &view.View{
Name: "grpc.io/server/request_bytes",
Description: "Request bytes",
TagKeys: []tag.Key{KeyMethod},
Measure: ServerRequestBytes,
Aggregation: DefaultBytesDistribution,
ServerCompletedRPCsView = &view.View{
Name: "grpc.io/server/completed_rpcs",
Description: "Count of RPCs by method and status.",
TagKeys: []tag.Key{KeyServerMethod, KeyServerStatus},
Measure: ServerLatency,
Aggregation: view.Count(),
}
ServerResponseBytesView = &view.View{
Name: "grpc.io/server/response_bytes",
Description: "Response bytes",
TagKeys: []tag.Key{KeyMethod},
Measure: ServerResponseBytes,
Aggregation: DefaultBytesDistribution,
}
ServerRequestCountView = &view.View{
Name: "grpc.io/server/request_count",
Description: "Count of request messages per server RPC",
TagKeys: []tag.Key{KeyMethod},
Measure: ServerRequestCount,
ServerReceivedMessagesPerRPCView = &view.View{
Name: "grpc.io/server/received_messages_per_rpc",
Description: "Distribution of messages received count per RPC, by method.",
TagKeys: []tag.Key{KeyServerMethod},
Measure: ServerReceivedMessagesPerRPC,
Aggregation: DefaultMessageCountDistribution,
}
ServerResponseCountView = &view.View{
Name: "grpc.io/server/response_count",
Description: "Count of response messages per server RPC",
TagKeys: []tag.Key{KeyMethod},
Measure: ServerResponseCount,
ServerSentMessagesPerRPCView = &view.View{
Name: "grpc.io/server/sent_messages_per_rpc",
Description: "Distribution of messages sent count per RPC, by method.",
TagKeys: []tag.Key{KeyServerMethod},
Measure: ServerSentMessagesPerRPC,
Aggregation: DefaultMessageCountDistribution,
}
)
// DefaultServerViews are the default server views provided by this package.
var DefaultServerViews = []*view.View{
ServerErrorCountView,
ServerServerElapsedTimeView,
ServerRequestBytesView,
ServerResponseBytesView,
ServerRequestCountView,
ServerResponseCountView,
ServerReceivedBytesPerRPCView,
ServerSentBytesPerRPCView,
ServerLatencyView,
ServerCompletedRPCsView,
}
// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count.

View File

@@ -0,0 +1,146 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package ocgrpc
import (
"strings"
"testing"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
)
func TestSpecServerMeasures(t *testing.T) {
spec := `
| Measure name | Unit | Description |
|------------------------------------------|------|-----------------------------------------------------------------------------------------------|
| grpc.io/server/received_messages_per_rpc | 1 | Number of messages received in each RPC. Has value 1 for non-streaming RPCs. |
| grpc.io/server/received_bytes_per_rpc | By | Total bytes received across all messages per RPC. |
| grpc.io/server/sent_messages_per_rpc | 1 | Number of messages sent in each RPC. Has value 1 for non-streaming RPCs. |
| grpc.io/server/sent_bytes_per_rpc | By | Total bytes sent in across all response messages per RPC. |
| grpc.io/server/server_latency | ms | Time between first byte of request received to last byte of response sent, or terminal error. |`
lines := strings.Split(spec, "\n")[3:]
type measureDef struct {
name string
unit string
desc string
}
measureDefs := make([]measureDef, 0, len(lines))
for _, line := range lines {
cols := colSep.Split(line, -1)[1:]
if len(cols) < 3 {
t.Fatalf("Invalid config line %#v", cols)
}
measureDefs = append(measureDefs, measureDef{cols[0], cols[1], cols[2]})
}
gotMeasures := []stats.Measure{
ServerReceivedMessagesPerRPC,
ServerReceivedBytesPerRPC,
ServerSentMessagesPerRPC,
ServerSentBytesPerRPC,
ServerLatency,
}
if got, want := len(gotMeasures), len(measureDefs); got != want {
t.Fatalf("len(gotMeasures) = %d; want %d", got, want)
}
for i, m := range gotMeasures {
defn := measureDefs[i]
if got, want := m.Name(), defn.name; got != want {
t.Errorf("Name = %q; want %q", got, want)
}
if got, want := m.Unit(), defn.unit; got != want {
t.Errorf("%q: Unit = %q; want %q", defn.name, got, want)
}
if got, want := m.Description(), defn.desc; got != want {
t.Errorf("%q: Description = %q; want %q", defn.name, got, want)
}
}
}
func TestSpecServerViews(t *testing.T) {
defaultViewsSpec := `
| View name | Measure suffix | Aggregation | Tags suffix |
|---------------------------------------|------------------------|--------------|------------------------------|
| grpc.io/server/received_bytes_per_rpc | received_bytes_per_rpc | distribution | server_method |
| grpc.io/server/sent_bytes_per_rpc | sent_bytes_per_rpc | distribution | server_method |
| grpc.io/server/server_latency | server_latency | distribution | server_method |
| grpc.io/server/completed_rpcs | server_latency | count | server_method, server_status |`
extraViewsSpec := `
| View name | Measure suffix | Aggregation | Tags suffix |
|------------------------------------------|---------------------------|--------------|---------------|
| grpc.io/server/received_messages_per_rpc | received_messages_per_rpc | distribution | server_method |
| grpc.io/server/sent_messages_per_rpc | sent_messages_per_rpc | distribution | server_method |`
lines := strings.Split(defaultViewsSpec, "\n")[3:]
lines = append(lines, strings.Split(extraViewsSpec, "\n")[3:]...)
type viewDef struct {
name string
measureSuffix string
aggregation string
tags string
}
viewDefs := make([]viewDef, 0, len(lines))
for _, line := range lines {
cols := colSep.Split(line, -1)[1:]
if len(cols) < 4 {
t.Fatalf("Invalid config line %#v", cols)
}
viewDefs = append(viewDefs, viewDef{cols[0], cols[1], cols[2], cols[3]})
}
views := DefaultServerViews
views = append(views, ServerReceivedMessagesPerRPCView, ServerSentMessagesPerRPCView)
if got, want := len(views), len(viewDefs); got != want {
t.Fatalf("len(gotMeasures) = %d; want %d", got, want)
}
for i, v := range views {
defn := viewDefs[i]
if got, want := v.Name, defn.name; got != want {
t.Errorf("Name = %q; want %q", got, want)
}
if got, want := v.Measure.Name(), "grpc.io/server/"+defn.measureSuffix; got != want {
t.Errorf("%q: Measure.Name = %q; want %q", defn.name, got, want)
}
switch v.Aggregation.Type {
case view.AggTypeDistribution:
if got, want := "distribution", defn.aggregation; got != want {
t.Errorf("%q: Description = %q; want %q", defn.name, got, want)
}
case view.AggTypeCount:
if got, want := "count", defn.aggregation; got != want {
t.Errorf("%q: Description = %q; want %q", defn.name, got, want)
}
default:
t.Errorf("Invalid aggregation type")
}
wantTags := strings.Split(defn.tags, ", ")
if got, want := len(v.TagKeys), len(wantTags); got != want {
t.Errorf("len(TagKeys) = %d; want %d", got, want)
}
for j := range wantTags {
if got, want := v.TagKeys[j].Name(), "grpc_"+wantTags[j]; got != want {
t.Errorf("TagKeys[%d].Name() = %q; want %q", j, got, want)
}
}
}
}

View File

@@ -16,17 +16,13 @@
package ocgrpc
import (
"fmt"
"sync/atomic"
"time"
"golang.org/x/net/context"
ocstats "go.opencensus.io/stats"
"go.opencensus.io/tag"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
)
// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from
@@ -35,7 +31,7 @@ func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo)
startTime := time.Now()
if info == nil {
if grpclog.V(2) {
grpclog.Infof("serverHandler.TagRPC called with nil info.", info.FullMethodName)
grpclog.Infof("opencensus: TagRPC called with nil info.")
}
return ctx
}
@@ -43,95 +39,25 @@ func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo)
startTime: startTime,
method: info.FullMethodName,
}
ctx, _ = h.createTags(ctx)
record(ctx, d, "", ServerStartedCount.M(1))
return context.WithValue(ctx, grpcServerRPCKey, d)
propagated := h.extractPropagatedTags(ctx)
ctx = tag.NewContext(ctx, propagated)
ctx, _ = tag.New(ctx, tag.Upsert(KeyServerMethod, methodName(info.FullMethodName)))
return context.WithValue(ctx, rpcDataKey, d)
}
// statsHandleRPC processes the RPC events.
func (h *ServerHandler) statsHandleRPC(ctx context.Context, s stats.RPCStats) {
switch st := s.(type) {
case *stats.Begin, *stats.InHeader, *stats.InTrailer, *stats.OutHeader, *stats.OutTrailer:
// Do nothing for server
case *stats.InPayload:
h.handleRPCInPayload(ctx, st)
case *stats.OutPayload:
// For stream it can be called multiple times per RPC.
h.handleRPCOutPayload(ctx, st)
case *stats.End:
h.handleRPCEnd(ctx, st)
default:
grpclog.Infof("unexpected stats: %T", st)
}
}
func (h *ServerHandler) handleRPCInPayload(ctx context.Context, s *stats.InPayload) {
d, ok := ctx.Value(grpcServerRPCKey).(*rpcData)
if !ok {
if grpclog.V(2) {
grpclog.Infoln("handleRPCInPayload: failed to retrieve *rpcData from context")
}
return
}
record(ctx, d, "", ServerRequestBytes.M(int64(s.Length)))
atomic.AddInt64(&d.reqCount, 1)
}
func (h *ServerHandler) handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) {
d, ok := ctx.Value(grpcServerRPCKey).(*rpcData)
if !ok {
if grpclog.V(2) {
grpclog.Infoln("handleRPCOutPayload: failed to retrieve *rpcData from context")
}
return
}
record(ctx, d, "", ServerResponseBytes.M(int64(s.Length)))
atomic.AddInt64(&d.respCount, 1)
}
func (h *ServerHandler) handleRPCEnd(ctx context.Context, s *stats.End) {
d, ok := ctx.Value(grpcServerRPCKey).(*rpcData)
if !ok {
if grpclog.V(2) {
grpclog.Infoln("serverHandler.handleRPCEnd failed to retrieve *rpcData from context")
}
return
}
elapsedTime := time.Since(d.startTime)
reqCount := atomic.LoadInt64(&d.reqCount)
respCount := atomic.LoadInt64(&d.respCount)
m := []ocstats.Measurement{
ServerRequestCount.M(reqCount),
ServerResponseCount.M(respCount),
ServerFinishedCount.M(1),
ServerServerElapsedTime.M(float64(elapsedTime) / float64(time.Millisecond)),
}
var st string
if s.Error != nil {
s, ok := status.FromError(s.Error)
if ok {
st = s.Code().String()
}
m = append(m, ServerErrorCount.M(1))
}
record(ctx, d, st, m...)
}
// createTags creates a new tag map containing the tags extracted from the
// extractPropagatedTags creates a new tag map containing the tags extracted from the
// gRPC metadata.
func (h *ServerHandler) createTags(ctx context.Context) (context.Context, error) {
func (h *ServerHandler) extractPropagatedTags(ctx context.Context) *tag.Map {
buf := stats.Tags(ctx)
if buf == nil {
return ctx, nil
return nil
}
propagated, err := tag.Decode(buf)
if err != nil {
return nil, fmt.Errorf("serverHandler.createTags failed to decode: %v", err)
if grpclog.V(2) {
grpclog.Warningf("opencensus: Failed to decode tags from gRPC metadata failed to decode: %v", err)
}
return tag.NewContext(ctx, propagated), nil
return nil
}
return propagated
}

View File

@@ -74,44 +74,44 @@ func TestServerDefaultCollections(t *testing.T) {
},
[]*wantData{
{
func() *view.View { return ServerRequestCountView },
func() *view.View { return ServerReceivedMessagesPerRPCView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyMethod, Value: "package.service/method"},
{Key: KeyServerMethod, Value: "package.service/method"},
},
Data: newDistributionData([]int64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0),
},
},
},
{
func() *view.View { return ServerResponseCountView },
func() *view.View { return ServerSentMessagesPerRPCView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyMethod, Value: "package.service/method"},
{Key: KeyServerMethod, Value: "package.service/method"},
},
Data: newDistributionData([]int64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0),
},
},
},
{
func() *view.View { return ServerRequestBytesView },
func() *view.View { return ServerReceivedBytesPerRPCView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyMethod, Value: "package.service/method"},
{Key: KeyServerMethod, Value: "package.service/method"},
},
Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0),
},
},
},
{
func() *view.View { return ServerResponseBytesView },
func() *view.View { return ServerSentBytesPerRPCView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyMethod, Value: "package.service/method"},
{Key: KeyServerMethod, Value: "package.service/method"},
},
Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0),
},
@@ -151,34 +151,22 @@ func TestServerDefaultCollections(t *testing.T) {
},
[]*wantData{
{
func() *view.View { return ServerErrorCountView },
func() *view.View { return ServerReceivedMessagesPerRPCView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyStatus, Value: "Canceled"},
{Key: KeyMethod, Value: "package.service/method"},
},
Data: newCountData(1),
},
},
},
{
func() *view.View { return ServerRequestCountView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyMethod, Value: "package.service/method"},
{Key: KeyServerMethod, Value: "package.service/method"},
},
Data: newDistributionData([]int64{0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 1, 2, 1.5, 0.5),
},
},
},
{
func() *view.View { return ServerResponseCountView },
func() *view.View { return ServerSentMessagesPerRPCView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyMethod, Value: "package.service/method"},
{Key: KeyServerMethod, Value: "package.service/method"},
},
Data: newDistributionData([]int64{0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 2, 3, 2.5, 0.5),
},
@@ -231,65 +219,46 @@ func TestServerDefaultCollections(t *testing.T) {
},
[]*wantData{
{
func() *view.View { return ServerErrorCountView },
func() *view.View { return ServerReceivedMessagesPerRPCView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyStatus, Value: "Canceled"},
{Key: KeyMethod, Value: "package.service/method"},
},
Data: newCountData(1),
},
{
Tags: []tag.Tag{
{Key: KeyStatus, Value: "Aborted"},
{Key: KeyMethod, Value: "package.service/method"},
},
Data: newCountData(1),
},
},
},
{
func() *view.View { return ServerRequestCountView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyMethod, Value: "package.service/method"},
{Key: KeyServerMethod, Value: "package.service/method"},
},
Data: newDistributionData([]int64{0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 2, 1.333333333, 0.333333333*2),
},
},
},
{
func() *view.View { return ServerResponseCountView },
func() *view.View { return ServerSentMessagesPerRPCView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyMethod, Value: "package.service/method"},
{Key: KeyServerMethod, Value: "package.service/method"},
},
Data: newDistributionData([]int64{0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 2, 3, 2.666666666, 0.333333333*2),
},
},
},
{
func() *view.View { return ServerRequestBytesView },
func() *view.View { return ServerReceivedBytesPerRPCView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyMethod, Value: "package.service/method"},
{Key: KeyServerMethod, Value: "package.service/method"},
},
Data: newDistributionData([]int64{0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 1, 16384, 4864.25, 59678208.25*3),
Data: newDistributionData([]int64{0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 18432, 6485.6666667, 2.1459558466666667e+08),
},
},
},
{
func() *view.View { return ServerResponseBytesView },
func() *view.View { return ServerSentBytesPerRPCView },
[]*view.Row{
{
Tags: []tag.Tag{
{Key: KeyMethod, Value: "package.service/method"},
{Key: KeyServerMethod, Value: "package.service/method"},
},
Data: newDistributionData([]int64{0, 1, 1, 1, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 1, 65536, 13696.125, 481423542.982143*7),
Data: newDistributionData([]int64{0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 20480, 66561, 36523, 1.355519318e+09),
},
},
},
@@ -297,8 +266,10 @@ func TestServerDefaultCollections(t *testing.T) {
},
}
views := append(DefaultServerViews[:], ServerReceivedMessagesPerRPCView, ServerSentMessagesPerRPCView)
for _, tc := range tcs {
if err := view.Subscribe(DefaultServerViews...); err != nil {
if err := view.Register(views...); err != nil {
t.Fatal(err)
}
@@ -349,18 +320,12 @@ func TestServerDefaultCollections(t *testing.T) {
}
// Unregister views to cleanup.
view.Unsubscribe(DefaultServerViews...)
view.Unregister(views...)
}
}
func newCountData(v int) *view.CountData {
cav := view.CountData(v)
return &cav
}
func newMeanData(count int64, mean float64) *view.MeanData {
mav := view.MeanData{Count: count, Mean: mean}
return &mav
return &view.CountData{Value: int64(v)}
}
func newDistributionData(countPerBucket []int64, count int64, min, max, mean, sumOfSquaredDev float64) *view.DistributionData {

View File

@@ -16,13 +16,19 @@
package ocgrpc
import (
"context"
"strconv"
"strings"
"sync/atomic"
"time"
ocstats "go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
)
type grpcInstrumentationKey string
@@ -33,7 +39,7 @@ type grpcInstrumentationKey string
type rpcData struct {
// reqCount and respCount has to be the first words
// in order to be 64-aligned on 32-bit architectures.
reqCount, respCount int64 // access atomically
sentCount, sentBytes, recvCount, recvBytes int64 // access atomically
// startTime represents the time at which TagRPC was invoked at the
// beginning of an RPC. It is an appoximation of the time when the
@@ -46,32 +52,148 @@ type rpcData struct {
// both the default GRPC client and GRPC server metrics.
var (
DefaultBytesDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
DefaultMillisecondsDistribution = view.Distribution(0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
DefaultMillisecondsDistribution = view.Distribution(0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
DefaultMessageCountDistribution = view.Distribution(0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536)
)
var (
KeyMethod, _ = tag.NewKey("method") // gRPC service and method name
KeyStatus, _ = tag.NewKey("canonical_status") // Canonical status code
KeyServerMethod, _ = tag.NewKey("grpc_server_method")
KeyClientMethod, _ = tag.NewKey("grpc_client_method")
KeyServerStatus, _ = tag.NewKey("grpc_server_status")
KeyClientStatus, _ = tag.NewKey("grpc_client_status")
)
var (
grpcServerConnKey = grpcInstrumentationKey("server-conn")
grpcServerRPCKey = grpcInstrumentationKey("server-rpc")
grpcClientRPCKey = grpcInstrumentationKey("client-rpc")
rpcDataKey = grpcInstrumentationKey("opencensus-rpcData")
)
func methodName(fullname string) string {
return strings.TrimLeft(fullname, "/")
}
func record(ctx context.Context, data *rpcData, status string, m ...ocstats.Measurement) {
mods := []tag.Mutator{
tag.Upsert(KeyMethod, methodName(data.method)),
// statsHandleRPC processes the RPC events.
func statsHandleRPC(ctx context.Context, s stats.RPCStats) {
switch st := s.(type) {
case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer:
// do nothing for client
case *stats.OutPayload:
handleRPCOutPayload(ctx, st)
case *stats.InPayload:
handleRPCInPayload(ctx, st)
case *stats.End:
handleRPCEnd(ctx, st)
default:
grpclog.Infof("unexpected stats: %T", st)
}
if status != "" {
mods = append(mods, tag.Upsert(KeyStatus, status))
}
ctx, _ = tag.New(ctx, mods...)
ocstats.Record(ctx, m...)
func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) {
d, ok := ctx.Value(rpcDataKey).(*rpcData)
if !ok {
if grpclog.V(2) {
grpclog.Infoln("Failed to retrieve *rpcData from context.")
}
return
}
atomic.AddInt64(&d.sentBytes, int64(s.Length))
atomic.AddInt64(&d.sentCount, 1)
}
func handleRPCInPayload(ctx context.Context, s *stats.InPayload) {
d, ok := ctx.Value(rpcDataKey).(*rpcData)
if !ok {
if grpclog.V(2) {
grpclog.Infoln("Failed to retrieve *rpcData from context.")
}
return
}
atomic.AddInt64(&d.recvBytes, int64(s.Length))
atomic.AddInt64(&d.recvCount, 1)
}
func handleRPCEnd(ctx context.Context, s *stats.End) {
d, ok := ctx.Value(rpcDataKey).(*rpcData)
if !ok {
if grpclog.V(2) {
grpclog.Infoln("Failed to retrieve *rpcData from context.")
}
return
}
elapsedTime := time.Since(d.startTime)
var st string
if s.Error != nil {
s, ok := status.FromError(s.Error)
if ok {
st = statusCodeToString(s)
}
} else {
st = "OK"
}
latencyMillis := float64(elapsedTime) / float64(time.Millisecond)
if s.Client {
ctx, _ = tag.New(ctx,
tag.Upsert(KeyClientMethod, methodName(d.method)),
tag.Upsert(KeyClientStatus, st))
ocstats.Record(ctx,
ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
ClientRoundtripLatency.M(latencyMillis))
} else {
ctx, _ = tag.New(ctx, tag.Upsert(KeyServerStatus, st))
ocstats.Record(ctx,
ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
ServerLatency.M(latencyMillis))
}
}
func statusCodeToString(s *status.Status) string {
// see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
switch c := s.Code(); c {
case codes.OK:
return "OK"
case codes.Canceled:
return "CANCELLED"
case codes.Unknown:
return "UNKNOWN"
case codes.InvalidArgument:
return "INVALID_ARGUMENT"
case codes.DeadlineExceeded:
return "DEADLINE_EXCEEDED"
case codes.NotFound:
return "NOT_FOUND"
case codes.AlreadyExists:
return "ALREADY_EXISTS"
case codes.PermissionDenied:
return "PERMISSION_DENIED"
case codes.ResourceExhausted:
return "RESOURCE_EXHAUSTED"
case codes.FailedPrecondition:
return "FAILED_PRECONDITION"
case codes.Aborted:
return "ABORTED"
case codes.OutOfRange:
return "OUT_OF_RANGE"
case codes.Unimplemented:
return "UNIMPLEMENTED"
case codes.Internal:
return "INTERNAL"
case codes.Unavailable:
return "UNAVAILABLE"
case codes.DataLoss:
return "DATA_LOSS"
case codes.Unauthenticated:
return "UNAUTHENTICATED"
default:
return "CODE_" + strconv.FormatInt(int64(c), 10)
}
}

View File

@@ -36,11 +36,9 @@ const traceContextKey = "grpc-trace-bin"
func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
name := strings.TrimPrefix(rti.FullMethodName, "/")
name = strings.Replace(name, "/", ".", -1)
span := trace.NewSpan(name, trace.FromContext(ctx), trace.StartOptions{
Sampler: c.StartOptions.Sampler,
SpanKind: trace.SpanKindClient,
}) // span is ended by traceHandleRPC
ctx = trace.WithSpan(ctx, span)
ctx, span := trace.StartSpan(ctx, name,
trace.WithSampler(c.StartOptions.Sampler),
trace.WithSpanKind(trace.SpanKindClient)) // span is ended by traceHandleRPC
traceContextBinary := propagation.Binary(span.SpanContext())
return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary))
}
@@ -52,11 +50,6 @@ func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo)
//
// It returns ctx, with the new trace span added.
func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
opts := trace.StartOptions{
Sampler: s.StartOptions.Sampler,
SpanKind: trace.SpanKindServer,
}
md, _ := metadata.FromIncomingContext(ctx)
name := strings.TrimPrefix(rti.FullMethodName, "/")
name = strings.Replace(name, "/", ".", -1)
@@ -72,15 +65,20 @@ func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo)
traceContextBinary := []byte(traceContext[0])
parent, haveParent = propagation.FromBinary(traceContextBinary)
if haveParent && !s.IsPublicEndpoint {
span := trace.NewSpanWithRemoteParent(name, parent, opts)
return trace.WithSpan(ctx, span)
ctx, _ := trace.StartSpanWithRemoteParent(ctx, name, parent,
trace.WithSpanKind(trace.SpanKindServer),
trace.WithSampler(s.StartOptions.Sampler),
)
return ctx
}
}
span := trace.NewSpan(name, nil, opts)
ctx, span := trace.StartSpan(ctx, name,
trace.WithSpanKind(trace.SpanKindServer),
trace.WithSampler(s.StartOptions.Sampler))
if haveParent {
span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild})
}
return trace.WithSpan(ctx, span)
return ctx
}
func traceHandleRPC(ctx context.Context, rs stats.RPCStats) {

View File

@@ -33,7 +33,7 @@ func (t *testExporter) ExportSpan(s *trace.SpanData) {
}
func TestStreaming(t *testing.T) {
trace.SetDefaultSampler(trace.AlwaysSample())
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
te := testExporter{make(chan *trace.SpanData)}
trace.RegisterExporter(&te)
defer trace.UnregisterExporter(&te)
@@ -76,7 +76,7 @@ func TestStreaming(t *testing.T) {
}
func TestStreamingFail(t *testing.T) {
trace.SetDefaultSampler(trace.AlwaysSample())
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
te := testExporter{make(chan *trace.SpanData)}
trace.RegisterExporter(&te)
defer trace.UnregisterExporter(&te)
@@ -117,7 +117,7 @@ func TestStreamingFail(t *testing.T) {
}
func TestSingle(t *testing.T) {
trace.SetDefaultSampler(trace.AlwaysSample())
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
te := testExporter{make(chan *trace.SpanData)}
trace.RegisterExporter(&te)
defer trace.UnregisterExporter(&te)
@@ -150,7 +150,7 @@ func TestServerSpanDuration(t *testing.T) {
trace.RegisterExporter(&te)
defer trace.UnregisterExporter(&te)
trace.SetDefaultSampler(trace.AlwaysSample())
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
ctx := context.Background()
const sleep = 100 * time.Millisecond
@@ -174,7 +174,7 @@ loop:
}
func TestSingleFail(t *testing.T) {
trace.SetDefaultSampler(trace.AlwaysSample())
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
te := testExporter{make(chan *trace.SpanData)}
trace.RegisterExporter(&te)
defer trace.UnregisterExporter(&te)

View File

@@ -36,8 +36,8 @@ func TestClient(t *testing.T) {
}))
defer server.Close()
for _, v := range ochttp.DefaultClientViews {
v.Subscribe()
if err := view.Register(ochttp.DefaultClientViews...); err != nil {
t.Fatalf("Failed to register ochttp.DefaultClientViews error: %v", err)
}
views := []string{
@@ -54,15 +54,14 @@ func TestClient(t *testing.T) {
}
}
var (
w sync.WaitGroup
tr ochttp.Transport
errs = make(chan error, reqCount)
)
w.Add(reqCount)
var wg sync.WaitGroup
var tr ochttp.Transport
errs := make(chan error, reqCount)
wg.Add(reqCount)
for i := 0; i < reqCount; i++ {
go func() {
defer w.Done()
defer wg.Done()
req, err := http.NewRequest("POST", server.URL, strings.NewReader("req-body"))
if err != nil {
errs <- fmt.Errorf("error creating request: %v", err)
@@ -81,7 +80,7 @@ func TestClient(t *testing.T) {
}
go func() {
w.Wait()
wg.Wait()
close(errs)
}()
@@ -110,7 +109,7 @@ func TestClient(t *testing.T) {
var count int64
switch data := data.(type) {
case *view.CountData:
count = *(*int64)(data)
count = data.Value
case *view.DistributionData:
count = data.Count
default:

View File

@@ -25,13 +25,13 @@ import (
)
func ExampleTransport() {
if err := view.Subscribe(
// Subscribe to a few default views.
if err := view.Register(
// Register to a few default views.
ochttp.ClientRequestCountByMethod,
ochttp.ClientResponseCountByStatusCode,
ochttp.ClientLatencyView,
// Subscribe to a custom view.
// Register to a custom view.
&view.View{
Name: "httpclient_latency_by_hostpath",
TagKeys: []tag.Key{ochttp.Host, ochttp.Path},

View File

@@ -15,7 +15,7 @@
// Package b3 contains a propagation.HTTPFormat implementation
// for B3 propagation. See https://github.com/openzipkin/b3-propagation
// for more details.
package b3
package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3"
import (
"encoding/hex"

View File

@@ -14,7 +14,7 @@
// Package tracecontext contains HTTP propagator for TraceContext standard.
// See https://github.com/w3c/distributed-tracing for more information.
package tracecontext
package tracecontext // import "go.opencensus.io/plugin/ochttp/propagation/tracecontext"
import (
"encoding/hex"

View File

@@ -36,7 +36,7 @@ func TestRoundTripAllFormats(t *testing.T) {
}
ctx := context.Background()
trace.SetDefaultSampler(trace.AlwaysSample())
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
ctx, span := trace.StartSpan(ctx, "test")
sc := span.SpanContext()
wantStr := fmt.Sprintf("trace_id=%x, span_id=%x, options=%d", sc.TraceID, sc.SpanID, sc.TraceOptions)

View File

@@ -15,7 +15,10 @@
package ochttp
import (
"bufio"
"context"
"errors"
"net"
"net/http"
"strconv"
"sync"
@@ -65,7 +68,6 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
defer traceEnd()
w, statsEnd = h.startStats(w, r)
defer statsEnd()
handler := h.Handler
if handler == nil {
handler = http.DefaultServeMux
@@ -74,20 +76,19 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) {
opts := trace.StartOptions{
Sampler: h.StartOptions.Sampler,
SpanKind: trace.SpanKindServer,
}
name := spanNameFromURL(r.URL)
ctx := r.Context()
var span *trace.Span
sc, ok := h.extractSpanContext(r)
if ok && !h.IsPublicEndpoint {
span = trace.NewSpanWithRemoteParent(name, sc, opts)
ctx = trace.WithSpan(ctx, span)
ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc,
trace.WithSampler(h.StartOptions.Sampler),
trace.WithSpanKind(trace.SpanKindServer))
} else {
span = trace.NewSpan(name, nil, opts)
ctx, span = trace.StartSpan(ctx, name,
trace.WithSampler(h.StartOptions.Sampler),
trace.WithSpanKind(trace.SpanKindServer),
)
if ok {
span.AddLink(trace.Link{
TraceID: sc.TraceID,
@@ -97,9 +98,8 @@ func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Requ
})
}
}
ctx = trace.WithSpan(ctx, span)
span.AddAttributes(requestAttrs(r)...)
return r.WithContext(trace.WithSpan(r.Context(), span)), span.End
return r.WithContext(ctx), span.End
}
func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) {
@@ -135,17 +135,33 @@ type trackingResponseWriter struct {
respSize int64
start time.Time
statusCode int
statusLine string
endOnce sync.Once
writer http.ResponseWriter
}
var _ http.ResponseWriter = (*trackingResponseWriter)(nil)
var _ http.Hijacker = (*trackingResponseWriter)(nil)
var errHijackerUnimplemented = errors.New("ResponseWriter does not implement http.Hijacker")
func (t *trackingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
hj, ok := t.writer.(http.Hijacker)
if !ok {
return nil, nil, errHijackerUnimplemented
}
return hj.Hijack()
}
func (t *trackingResponseWriter) end() {
t.endOnce.Do(func() {
if t.statusCode == 0 {
t.statusCode = 200
}
span := trace.FromContext(t.ctx)
span.SetStatus(TraceStatus(t.statusCode, t.statusLine))
m := []stats.Measurement{
ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)),
ServerResponseBytes.M(t.respSize),
@@ -171,6 +187,7 @@ func (t *trackingResponseWriter) Write(data []byte) (int, error) {
func (t *trackingResponseWriter) WriteHeader(statusCode int) {
t.writer.WriteHeader(statusCode)
t.statusCode = statusCode
t.statusLine = http.StatusText(t.statusCode)
}
func (t *trackingResponseWriter) Flush() {

View File

@@ -1,11 +1,22 @@
package ochttp
import (
"bufio"
"bytes"
"context"
"crypto/tls"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"strings"
"sync"
"testing"
"golang.org/x/net/http2"
"go.opencensus.io/stats/view"
"go.opencensus.io/trace"
)
@@ -26,8 +37,8 @@ func updateMean(mean float64, sample, count int) float64 {
}
func TestHandlerStatsCollection(t *testing.T) {
for _, v := range DefaultServerViews {
v.Subscribe()
if err := view.Register(DefaultServerViews...); err != nil {
t.Fatalf("Failed to register ochttp.DefaultServerViews error: %v", err)
}
views := []string{
@@ -90,7 +101,7 @@ func TestHandlerStatsCollection(t *testing.T) {
var sum float64
switch data := data.(type) {
case *view.CountData:
count = int(*data)
count = int(data.Value)
case *view.DistributionData:
count = int(data.Count)
sum = data.Sum()
@@ -116,3 +127,227 @@ func TestHandlerStatsCollection(t *testing.T) {
}
}
}
type testResponseWriterHijacker struct {
httptest.ResponseRecorder
}
func (trw *testResponseWriterHijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return nil, nil, nil
}
func TestUnitTestHandlerProxiesHijack(t *testing.T) {
tests := []struct {
w http.ResponseWriter
wantErr string
}{
{httptest.NewRecorder(), "ResponseWriter does not implement http.Hijacker"},
{nil, "ResponseWriter does not implement http.Hijacker"},
{new(testResponseWriterHijacker), ""},
}
for i, tt := range tests {
tw := &trackingResponseWriter{writer: tt.w}
conn, buf, err := tw.Hijack()
if tt.wantErr != "" {
if err == nil || !strings.Contains(err.Error(), tt.wantErr) {
t.Errorf("#%d got error (%v) want error substring (%q)", i, err, tt.wantErr)
}
if conn != nil {
t.Errorf("#%d inconsistent state got non-nil conn (%v)", i, conn)
}
if buf != nil {
t.Errorf("#%d inconsistent state got non-nil buf (%v)", i, buf)
}
continue
}
if err != nil {
t.Errorf("#%d got unexpected error %v", i, err)
}
}
}
// Integration test with net/http to ensure that our Handler proxies to its
// response the call to (http.Hijack).Hijacker() and that that successfully
// passes with HTTP/1.1 connections. See Issue #642
func TestHandlerProxiesHijack_HTTP1(t *testing.T) {
cst := httptest.NewServer(&Handler{
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var writeMsg func(string)
defer func() {
err := recover()
writeMsg(fmt.Sprintf("Proto=%s\npanic=%v", r.Proto, err != nil))
}()
conn, _, _ := w.(http.Hijacker).Hijack()
writeMsg = func(msg string) {
fmt.Fprintf(conn, "%s 200\nContentLength: %d", r.Proto, len(msg))
fmt.Fprintf(conn, "\r\n\r\n%s", msg)
conn.Close()
}
}),
})
defer cst.Close()
testCases := []struct {
name string
tr *http.Transport
want string
}{
{
name: "http1-transport",
tr: new(http.Transport),
want: "Proto=HTTP/1.1\npanic=false",
},
{
name: "http2-transport",
tr: func() *http.Transport {
tr := new(http.Transport)
http2.ConfigureTransport(tr)
return tr
}(),
want: "Proto=HTTP/1.1\npanic=false",
},
}
for _, tc := range testCases {
c := &http.Client{Transport: &Transport{Base: tc.tr}}
res, err := c.Get(cst.URL)
if err != nil {
t.Errorf("(%s) unexpected error %v", tc.name, err)
continue
}
blob, _ := ioutil.ReadAll(res.Body)
res.Body.Close()
if g, w := string(blob), tc.want; g != w {
t.Errorf("(%s) got = %q; want = %q", tc.name, g, w)
}
}
}
// Integration test with net/http, x/net/http2 to ensure that our Handler proxies
// to its response the call to (http.Hijack).Hijacker() and that that crashes
// since http.Hijacker and HTTP/2.0 connections are incompatible, but the
// detection is only at runtime and ensure that we can stream and flush to the
// connection even after invoking Hijack(). See Issue #642.
func TestHandlerProxiesHijack_HTTP2(t *testing.T) {
cst := httptest.NewUnstartedServer(&Handler{
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
conn, _, err := w.(http.Hijacker).Hijack()
if conn != nil {
data := fmt.Sprintf("Surprisingly got the Hijacker() Proto: %s", r.Proto)
fmt.Fprintf(conn, "%s 200\nContent-Length:%d\r\n\r\n%s", r.Proto, len(data), data)
conn.Close()
return
}
switch {
case err == nil:
fmt.Fprintf(w, "Unexpectedly did not encounter an error!")
default:
fmt.Fprintf(w, "Unexpected error: %v", err)
case strings.Contains(err.(error).Error(), "Hijack"):
// Confirmed HTTP/2.0, let's stream to it
for i := 0; i < 5; i++ {
fmt.Fprintf(w, "%d\n", i)
w.(http.Flusher).Flush()
}
}
}),
})
cst.TLS = &tls.Config{NextProtos: []string{"h2"}}
cst.StartTLS()
defer cst.Close()
if wantPrefix := "https://"; !strings.HasPrefix(cst.URL, wantPrefix) {
t.Fatalf("URL got = %q wantPrefix = %q", cst.URL, wantPrefix)
}
tr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}
http2.ConfigureTransport(tr)
c := &http.Client{Transport: tr}
res, err := c.Get(cst.URL)
if err != nil {
t.Fatalf("Unexpected error %v", err)
}
blob, _ := ioutil.ReadAll(res.Body)
res.Body.Close()
if g, w := string(blob), "0\n1\n2\n3\n4\n"; g != w {
t.Errorf("got = %q; want = %q", g, w)
}
}
func TestEnsureTrackingResponseWriterSetsStatusCode(t *testing.T) {
// Ensure that the trackingResponseWriter always sets the spanStatus on ending the span.
// Because we can only examine the Status after exporting, this test roundtrips a
// couple of requests and then later examines the exported spans.
// See Issue #700.
exporter := &spanExporter{cur: make(chan *trace.SpanData, 1)}
trace.RegisterExporter(exporter)
defer trace.UnregisterExporter(exporter)
tests := []struct {
res *http.Response
want trace.Status
}{
{res: &http.Response{StatusCode: 200}, want: trace.Status{Code: trace.StatusCodeOK, Message: `"OK"`}},
{res: &http.Response{StatusCode: 500}, want: trace.Status{Code: trace.StatusCodeUnknown, Message: `"UNKNOWN"`}},
{res: &http.Response{StatusCode: 403}, want: trace.Status{Code: trace.StatusCodePermissionDenied, Message: `"PERMISSION_DENIED"`}},
{res: &http.Response{StatusCode: 401}, want: trace.Status{Code: trace.StatusCodeUnauthenticated, Message: `"UNAUTHENTICATED"`}},
{res: &http.Response{StatusCode: 429}, want: trace.Status{Code: trace.StatusCodeResourceExhausted, Message: `"RESOURCE_EXHAUSTED"`}},
}
for _, tt := range tests {
t.Run(tt.want.Message, func(t *testing.T) {
span := trace.NewSpan("testing", nil, trace.StartOptions{Sampler: trace.AlwaysSample()})
ctx := trace.WithSpan(context.Background(), span)
prc, pwc := io.Pipe()
go func() {
pwc.Write([]byte("Foo"))
pwc.Close()
}()
inRes := tt.res
inRes.Body = prc
tr := &traceTransport{base: &testResponseTransport{res: inRes}}
req, err := http.NewRequest("POST", "https://example.org", bytes.NewReader([]byte("testing")))
if err != nil {
t.Fatalf("NewRequest error: %v", err)
}
req = req.WithContext(ctx)
res, err := tr.RoundTrip(req)
if err != nil {
t.Fatalf("RoundTrip error: %v", err)
}
_, _ = ioutil.ReadAll(res.Body)
res.Body.Close()
cur := <-exporter.cur
if got, want := cur.Status, tt.want; got != want {
t.Fatalf("SpanData:\ngot = (%#v)\nwant = (%#v)", got, want)
}
})
}
}
type spanExporter struct {
sync.Mutex
cur chan *trace.SpanData
}
var _ trace.Exporter = (*spanExporter)(nil)
func (se *spanExporter) ExportSpan(sd *trace.SpanData) {
se.Lock()
se.cur <- sd
se.Unlock()
}
type testResponseTransport struct {
res *http.Response
}
var _ http.RoundTripper = (*testResponseTransport)(nil)
func (rb *testResponseTransport) RoundTrip(*http.Request) (*http.Response, error) {
return rb.res, nil
}

View File

@@ -22,18 +22,18 @@ import (
// The following client HTTP measures are supported for use in custom views.
var (
ClientRequestCount, _ = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitNone)
ClientRequestBytes, _ = stats.Int64("opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes)
ClientResponseBytes, _ = stats.Int64("opencensus.io/http/client/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes)
ClientLatency, _ = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds)
ClientRequestCount = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitDimensionless)
ClientRequestBytes = stats.Int64("opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes)
ClientResponseBytes = stats.Int64("opencensus.io/http/client/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes)
ClientLatency = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds)
)
// The following server HTTP measures are supported for use in custom views:
var (
ServerRequestCount, _ = stats.Int64("opencensus.io/http/server/request_count", "Number of HTTP requests started", stats.UnitNone)
ServerRequestBytes, _ = stats.Int64("opencensus.io/http/server/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes)
ServerResponseBytes, _ = stats.Int64("opencensus.io/http/server/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes)
ServerLatency, _ = stats.Float64("opencensus.io/http/server/latency", "End-to-end latency", stats.UnitMilliseconds)
ServerRequestCount = stats.Int64("opencensus.io/http/server/request_count", "Number of HTTP requests started", stats.UnitDimensionless)
ServerRequestBytes = stats.Int64("opencensus.io/http/server/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes)
ServerResponseBytes = stats.Int64("opencensus.io/http/server/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes)
ServerLatency = stats.Float64("opencensus.io/http/server/latency", "End-to-end latency", stats.UnitMilliseconds)
)
// The following tags are applied to stats recorded by this package. Host, Path

View File

@@ -53,10 +53,11 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
name := spanNameFromURL(req.URL)
// TODO(jbd): Discuss whether we want to prefix
// outgoing requests with Sent.
parent := trace.FromContext(req.Context())
span := trace.NewSpan(name, parent, t.startOptions)
req = req.WithContext(trace.WithSpan(req.Context(), span))
_, span := trace.StartSpan(req.Context(), name,
trace.WithSampler(t.startOptions.Sampler),
trace.WithSpanKind(trace.SpanKindClient))
req = req.WithContext(trace.WithSpan(req.Context(), span))
if t.format != nil {
t.format.SpanContextToRequest(span.SpanContext(), req)
}
@@ -64,13 +65,13 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
span.AddAttributes(requestAttrs(req)...)
resp, err := t.base.RoundTrip(req)
if err != nil {
span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
span.End()
return resp, err
}
span.AddAttributes(responseAttrs(resp)...)
span.SetStatus(status(resp.StatusCode))
span.SetStatus(TraceStatus(resp.StatusCode, resp.Status))
// span.End() will be invoked after
// a read from resp.Body returns io.EOF or when
@@ -145,71 +146,54 @@ func responseAttrs(resp *http.Response) []trace.Attribute {
}
}
func status(statusCode int) trace.Status {
// HTTPStatusToTraceStatus converts the HTTP status code to a trace.Status that
// represents the outcome as closely as possible.
func TraceStatus(httpStatusCode int, statusLine string) trace.Status {
var code int32
if statusCode < 200 || statusCode >= 400 {
code = codeUnknown
if httpStatusCode < 200 || httpStatusCode >= 400 {
code = trace.StatusCodeUnknown
}
switch statusCode {
switch httpStatusCode {
case 499:
code = codeCancelled
code = trace.StatusCodeCancelled
case http.StatusBadRequest:
code = codeInvalidArgument
code = trace.StatusCodeInvalidArgument
case http.StatusGatewayTimeout:
code = codeDeadlineExceeded
code = trace.StatusCodeDeadlineExceeded
case http.StatusNotFound:
code = codeNotFound
code = trace.StatusCodeNotFound
case http.StatusForbidden:
code = codePermissionDenied
code = trace.StatusCodePermissionDenied
case http.StatusUnauthorized: // 401 is actually unauthenticated.
code = codeUnathenticated
code = trace.StatusCodeUnauthenticated
case http.StatusTooManyRequests:
code = codeResourceExhausted
code = trace.StatusCodeResourceExhausted
case http.StatusNotImplemented:
code = codeUnimplemented
code = trace.StatusCodeUnimplemented
case http.StatusServiceUnavailable:
code = codeUnavailable
code = trace.StatusCodeUnavailable
case http.StatusOK:
code = trace.StatusCodeOK
}
return trace.Status{Code: code, Message: codeToStr[code]}
}
// TODO(jbd): Provide status codes from trace package.
const (
codeOK = 0
codeCancelled = 1
codeUnknown = 2
codeInvalidArgument = 3
codeDeadlineExceeded = 4
codeNotFound = 5
codeAlreadyExists = 6
codePermissionDenied = 7
codeResourceExhausted = 8
codeFailedPrecondition = 9
codeAborted = 10
codeOutOfRange = 11
codeUnimplemented = 12
codeInternal = 13
codeUnavailable = 14
codeDataLoss = 15
codeUnathenticated = 16
)
var codeToStr = map[int32]string{
codeOK: `"OK"`,
codeCancelled: `"CANCELLED"`,
codeUnknown: `"UNKNOWN"`,
codeInvalidArgument: `"INVALID_ARGUMENT"`,
codeDeadlineExceeded: `"DEADLINE_EXCEEDED"`,
codeNotFound: `"NOT_FOUND"`,
codeAlreadyExists: `"ALREADY_EXISTS"`,
codePermissionDenied: `"PERMISSION_DENIED"`,
codeResourceExhausted: `"RESOURCE_EXHAUSTED"`,
codeFailedPrecondition: `"FAILED_PRECONDITION"`,
codeAborted: `"ABORTED"`,
codeOutOfRange: `"OUT_OF_RANGE"`,
codeUnimplemented: `"UNIMPLEMENTED"`,
codeInternal: `"INTERNAL"`,
codeUnavailable: `"UNAVAILABLE"`,
codeDataLoss: `"DATA_LOSS"`,
codeUnathenticated: `"UNAUTHENTICATED"`,
trace.StatusCodeOK: `"OK"`,
trace.StatusCodeCancelled: `"CANCELLED"`,
trace.StatusCodeUnknown: `"UNKNOWN"`,
trace.StatusCodeInvalidArgument: `"INVALID_ARGUMENT"`,
trace.StatusCodeDeadlineExceeded: `"DEADLINE_EXCEEDED"`,
trace.StatusCodeNotFound: `"NOT_FOUND"`,
trace.StatusCodeAlreadyExists: `"ALREADY_EXISTS"`,
trace.StatusCodePermissionDenied: `"PERMISSION_DENIED"`,
trace.StatusCodeResourceExhausted: `"RESOURCE_EXHAUSTED"`,
trace.StatusCodeFailedPrecondition: `"FAILED_PRECONDITION"`,
trace.StatusCodeAborted: `"ABORTED"`,
trace.StatusCodeOutOfRange: `"OUT_OF_RANGE"`,
trace.StatusCodeUnimplemented: `"UNIMPLEMENTED"`,
trace.StatusCodeInternal: `"INTERNAL"`,
trace.StatusCodeUnavailable: `"UNAVAILABLE"`,
trace.StatusCodeDataLoss: `"DATA_LOSS"`,
trace.StatusCodeUnauthenticated: `"UNAUTHENTICATED"`,
}

View File

@@ -73,7 +73,8 @@ func (t testPropagator) SpanContextToRequest(sc trace.SpanContext, req *http.Req
}
func TestTransport_RoundTrip(t *testing.T) {
parent := trace.NewSpan("parent", nil, trace.StartOptions{})
ctx := context.Background()
ctx, parent := trace.StartSpan(ctx, "parent")
tests := []struct {
name string
parent *trace.Span
@@ -172,7 +173,7 @@ func (c *collector) ExportSpan(s *trace.SpanData) {
}
func TestEndToEnd(t *testing.T) {
trace.SetDefaultSampler(trace.AlwaysSample())
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
tc := []struct {
name string
@@ -221,12 +222,9 @@ func TestEndToEnd(t *testing.T) {
url := serveHTTP(tt.handler, serverDone, serverReturn)
// Start a root Span in the client.
root := trace.NewSpan(
"top-level",
nil,
trace.StartOptions{})
ctx := trace.WithSpan(context.Background(), root)
ctx, root := trace.StartSpan(
context.Background(),
"top-level")
// Make the request.
req, err := http.NewRequest(
http.MethodPost,
@@ -278,7 +276,7 @@ func TestEndToEnd(t *testing.T) {
t.Errorf("Span name: %q; want %q", got, want)
}
default:
t.Fatalf("server or client span missing")
t.Fatalf("server or client span missing; kind = %v", sp.SpanKind)
}
}
@@ -439,19 +437,20 @@ func TestStatusUnitTest(t *testing.T) {
in int
want trace.Status
}{
{200, trace.Status{Code: 0, Message: `"OK"`}},
{100, trace.Status{Code: 2, Message: `"UNKNOWN"`}},
{500, trace.Status{Code: 2, Message: `"UNKNOWN"`}},
{404, trace.Status{Code: 5, Message: `"NOT_FOUND"`}},
{600, trace.Status{Code: 2, Message: `"UNKNOWN"`}},
{401, trace.Status{Code: 16, Message: `"UNAUTHENTICATED"`}},
{403, trace.Status{Code: 7, Message: `"PERMISSION_DENIED"`}},
{301, trace.Status{Code: 0, Message: `"OK"`}},
{501, trace.Status{Code: 12, Message: `"UNIMPLEMENTED"`}},
{200, trace.Status{Code: trace.StatusCodeOK, Message: `"OK"`}},
{204, trace.Status{Code: trace.StatusCodeOK, Message: `"OK"`}},
{100, trace.Status{Code: trace.StatusCodeUnknown, Message: `"UNKNOWN"`}},
{500, trace.Status{Code: trace.StatusCodeUnknown, Message: `"UNKNOWN"`}},
{404, trace.Status{Code: trace.StatusCodeNotFound, Message: `"NOT_FOUND"`}},
{600, trace.Status{Code: trace.StatusCodeUnknown, Message: `"UNKNOWN"`}},
{401, trace.Status{Code: trace.StatusCodeUnauthenticated, Message: `"UNAUTHENTICATED"`}},
{403, trace.Status{Code: trace.StatusCodePermissionDenied, Message: `"PERMISSION_DENIED"`}},
{301, trace.Status{Code: trace.StatusCodeOK, Message: `"OK"`}},
{501, trace.Status{Code: trace.StatusCodeUnimplemented, Message: `"UNIMPLEMENTED"`}},
}
for _, tt := range tests {
got, want := status(tt.in), tt.want
got, want := TraceStatus(tt.in, ""), tt.want
if got != want {
t.Errorf("status(%d) got = (%#v) want = (%#v)", tt.in, got, want)
}

View File

@@ -16,7 +16,6 @@ package stats_test
import (
"context"
"log"
"testing"
"go.opencensus.io/stats"
@@ -93,9 +92,5 @@ func BenchmarkRecord8_8Tags(b *testing.B) {
}
func makeMeasure() *stats.Int64Measure {
m, err := stats.Int64("m", "test measure", "")
if err != nil {
log.Fatal(err)
}
return m
return stats.Int64("m", "test measure", "")
}

View File

@@ -16,16 +16,16 @@ package stats_test
import (
"context"
"log"
"go.opencensus.io/stats"
)
func ExampleRecord() {
ctx := context.Background()
openConns, err := stats.Int64("my.org/measure/openconns", "open connections", stats.UnitNone)
if err != nil {
log.Fatal(err)
}
// Measures are usually declared as package-private global variables.
openConns := stats.Int64("my.org/measure/openconns", "open connections", stats.UnitDimensionless)
// Instrumented packages call stats.Record() to record measuremens.
stats.Record(ctx, openConns.M(124)) // Record 124 open connections.
}

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
package internal // import "go.opencensus.io/stats/internal"
const (
MaxNameLength = 255

View File

@@ -16,12 +16,8 @@
package stats
import (
"errors"
"fmt"
"sync"
"sync/atomic"
"go.opencensus.io/stats/internal"
)
// Measure represents a type of metric to be tracked and recorded.
@@ -38,12 +34,13 @@ type Measure interface {
Name() string
Description() string
Unit() string
subscribe()
subscribed() bool
}
type measure struct {
// measureDescriptor is the untyped descriptor associated with each measure.
// Int64Measure and Float64Measure wrap measureDescriptor to provide typed
// recording APIs.
// Two Measures with the same name will have the same measureDescriptor.
type measureDescriptor struct {
subs int32 // access atomically
name string
@@ -51,56 +48,33 @@ type measure struct {
unit string
}
func (m *measure) subscribe() {
func (m *measureDescriptor) subscribe() {
atomic.StoreInt32(&m.subs, 1)
}
func (m *measure) subscribed() bool {
func (m *measureDescriptor) subscribed() bool {
return atomic.LoadInt32(&m.subs) == 1
}
// Name returns the name of the measure.
func (m *measure) Name() string {
return m.name
}
// Description returns the description of the measure.
func (m *measure) Description() string {
return m.description
}
// Unit returns the unit of the measure.
func (m *measure) Unit() string {
return m.unit
}
var (
mu sync.RWMutex
measures = make(map[string]Measure)
measures = make(map[string]*measureDescriptor)
)
var (
errDuplicate = errors.New("duplicate measure name")
errMeasureNameTooLong = fmt.Errorf("measure name cannot be longer than %v", internal.MaxNameLength)
)
// FindMeasure finds the Measure instance, if any, associated with the given name.
func FindMeasure(name string) Measure {
mu.RLock()
m := measures[name]
mu.RUnlock()
return m
}
func register(m Measure) (Measure, error) {
key := m.Name()
func registerMeasureHandle(name, desc, unit string) *measureDescriptor {
mu.Lock()
defer mu.Unlock()
if stored, ok := measures[key]; ok {
return stored, errDuplicate
if stored, ok := measures[name]; ok {
return stored
}
measures[key] = m
return m, nil
m := &measureDescriptor{
name: name,
description: desc,
unit: unit,
}
measures[name] = m
return m
}
// Measurement is the numeric value measured when recording stats. Each measure
@@ -120,13 +94,3 @@ func (m Measurement) Value() float64 {
func (m Measurement) Measure() Measure {
return m.m
}
func checkName(name string) error {
if len(name) > internal.MaxNameLength {
return errMeasureNameTooLong
}
if !internal.IsPrintable(name) {
return errors.New("measure name needs to be an ASCII string")
}
return nil
}

View File

@@ -17,41 +17,36 @@ package stats
// Float64Measure is a measure of type float64.
type Float64Measure struct {
measure
md *measureDescriptor
}
func (m *Float64Measure) subscribe() {
m.measure.subscribe()
// Name returns the name of the measure.
func (m *Float64Measure) Name() string {
return m.md.name
}
func (m *Float64Measure) subscribed() bool {
return m.measure.subscribed()
// Description returns the description of the measure.
func (m *Float64Measure) Description() string {
return m.md.description
}
// Unit returns the unit of the measure.
func (m *Float64Measure) Unit() string {
return m.md.unit
}
// M creates a new float64 measurement.
// Use Record to record measurements.
func (m *Float64Measure) M(v float64) Measurement {
if !m.subscribed() {
if !m.md.subscribed() {
return Measurement{}
}
return Measurement{m: m, v: v}
}
// Float64 creates a new measure of type Float64Measure. It returns
// an error if a measure with the same name already exists.
func Float64(name, description, unit string) (*Float64Measure, error) {
if err := checkName(name); err != nil {
return nil, err
}
m := &Float64Measure{
measure: measure{
name: name,
description: description,
unit: unit,
},
}
if _, err := register(m); err != nil {
return nil, err
}
return m, nil
// Float64 creates a new measure of type Float64Measure.
// It never returns an error.
func Float64(name, description, unit string) *Float64Measure {
mi := registerMeasureHandle(name, description, unit)
return &Float64Measure{mi}
}

View File

@@ -17,41 +17,36 @@ package stats
// Int64Measure is a measure of type int64.
type Int64Measure struct {
measure
md *measureDescriptor
}
func (m *Int64Measure) subscribe() {
m.measure.subscribe()
// Name returns the name of the measure.
func (m *Int64Measure) Name() string {
return m.md.name
}
func (m *Int64Measure) subscribed() bool {
return m.measure.subscribed()
// Description returns the description of the measure.
func (m *Int64Measure) Description() string {
return m.md.description
}
// Unit returns the unit of the measure.
func (m *Int64Measure) Unit() string {
return m.md.unit
}
// M creates a new int64 measurement.
// Use Record to record measurements.
func (m *Int64Measure) M(v int64) Measurement {
if !m.subscribed() {
if !m.md.subscribed() {
return Measurement{}
}
return Measurement{m: m, v: float64(v)}
}
// Int64 creates a new measure of type Int64Measure. It returns an
// error if a measure with the same name already exists.
func Int64(name, description, unit string) (*Int64Measure, error) {
if err := checkName(name); err != nil {
return nil, err
}
m := &Int64Measure{
measure: measure{
name: name,
description: description,
unit: unit,
},
}
if _, err := register(m); err != nil {
return nil, err
}
return m, nil
// Int64 creates a new measure of type Int64Measure.
// It never returns an error.
func Int64(name, description, unit string) *Int64Measure {
mi := registerMeasureHandle(name, description, unit)
return &Int64Measure{mi}
}

View File

@@ -1,108 +0,0 @@
package stats
import (
"strings"
"testing"
)
func TestCheckMeasureName(t *testing.T) {
tests := []struct {
name string
view string
wantErr bool
}{
{
name: "valid measure name",
view: "my.org/measures/response_size",
wantErr: false,
},
{
name: "long name",
view: strings.Repeat("a", 256),
wantErr: true,
},
{
name: "name with non-ASCII",
view: "my.org/measures/\007",
wantErr: true,
},
{
name: "no emoji for you!",
view: "💩",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := checkName(tt.view); (err != nil) != tt.wantErr {
t.Errorf("checkName() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func Test_FindMeasure(t *testing.T) {
mf1, err := Float64("MF1", "desc MF1", "unit")
if err != nil {
t.Errorf("stats.Float64(\"MF1\", \"desc MF1\") got error %v, want no error", err)
}
mf2, err := Float64("MF2", "desc MF2", "unit")
if err != nil {
t.Errorf("stats.Float64(\"MF2\", \"desc MF2\") got error %v, want no error", err)
}
mi1, err := Int64("MI1", "desc MI1", "unit")
if err != nil {
t.Errorf("stats.Int64(\"MI1\", \"desc MI1\") got error %v, want no error", err)
}
type testCase struct {
label string
name string
m Measure
}
tcs := []testCase{
{
"0",
mf1.Name(),
mf1,
},
{
"1",
"MF1",
mf1,
},
{
"2",
mf2.Name(),
mf2,
},
{
"3",
"MF2",
mf2,
},
{
"4",
mi1.Name(),
mi1,
},
{
"5",
"MI1",
mi1,
},
{
"6",
"other",
nil,
},
}
for _, tc := range tcs {
m := FindMeasure(tc.name)
if m != tc.m {
t.Errorf("FindMeasure(%q) got measure %v; want %v", tc.label, m, tc.m)
}
}
}

View File

@@ -18,7 +18,8 @@ package stats
// Units are encoded according to the case-sensitive abbreviations from the
// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
const (
UnitNone = "1"
UnitNone = "1" // Deprecated: Use UnitDimensionless.
UnitDimensionless = "1"
UnitBytes = "By"
UnitMilliseconds = "ms"
)

View File

@@ -15,21 +15,32 @@
package view
//go:generate stringer -type AggType
// AggType represents the type of aggregation function used on a View.
type AggType int
// All available aggregation types.
const (
AggTypeNone AggType = iota // no aggregation; reserved for future use.
AggTypeCount // the count aggregation, see Count.
AggTypeSum // the sum aggregation, see Sum.
AggTypeMean // the mean aggregation, see Mean.
AggTypeDistribution // the distribution aggregation, see Distribution.
AggTypeLastValue // the last value aggregation, see LastValue.
)
func (t AggType) String() string {
return aggTypeName[t]
}
var aggTypeName = map[AggType]string{
AggTypeNone: "None",
AggTypeCount: "Count",
AggTypeSum: "Sum",
AggTypeDistribution: "Distribution",
AggTypeLastValue: "LastValue",
}
// Aggregation represents a data aggregation method. Use one of the functions:
// Count, Sum, Mean, or Distribution to construct an Aggregation.
// Count, Sum, or Distribution to construct an Aggregation.
type Aggregation struct {
Type AggType // Type is the AggType of this Aggregation.
Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution.
@@ -41,19 +52,13 @@ var (
aggCount = &Aggregation{
Type: AggTypeCount,
newData: func() AggregationData {
return newCountData(0)
return &CountData{}
},
}
aggSum = &Aggregation{
Type: AggTypeSum,
newData: func() AggregationData {
return newSumData(0)
},
}
aggMean = &Aggregation{
Type: AggTypeMean,
newData: func() AggregationData {
return newMeanData(0, 0)
return &SumData{}
},
}
)
@@ -74,14 +79,6 @@ func Sum() *Aggregation {
return aggSum
}
// Mean indicates that collect and aggregate data and maintain
// the mean value.
// For example, average latency in milliseconds can be aggregated by using
// Mean, although in most cases it is preferable to use a Distribution.
func Mean() *Aggregation {
return aggMean
}
// Distribution indicates that the desired aggregation is
// a histogram distribution.
//
@@ -110,3 +107,14 @@ func Distribution(bounds ...float64) *Aggregation {
},
}
}
// LastValue only reports the last value recorded using this
// aggregation. All other measurements will be dropped.
func LastValue() *Aggregation {
return &Aggregation{
Type: AggTypeLastValue,
newData: func() AggregationData {
return &LastValueData{}
},
}
}

View File

@@ -35,21 +35,18 @@ const epsilon = 1e-9
// A count aggregation processes data and counts the recordings.
//
// Most users won't directly access count data.
type CountData int64
func newCountData(v int64) *CountData {
tmp := CountData(v)
return &tmp
type CountData struct {
Value int64
}
func (a *CountData) isAggregationData() bool { return true }
func (a *CountData) addSample(_ float64) {
*a = *a + 1
func (a *CountData) addSample(v float64) {
a.Value = a.Value + 1
}
func (a *CountData) clone() AggregationData {
return newCountData(int64(*a))
return &CountData{Value: a.Value}
}
func (a *CountData) equal(other AggregationData) bool {
@@ -58,28 +55,25 @@ func (a *CountData) equal(other AggregationData) bool {
return false
}
return int64(*a) == int64(*a2)
return a.Value == a2.Value
}
// SumData is the aggregated data for the Sum aggregation.
// A sum aggregation processes data and sums up the recordings.
//
// Most users won't directly access sum data.
type SumData float64
func newSumData(v float64) *SumData {
tmp := SumData(v)
return &tmp
type SumData struct {
Value float64
}
func (a *SumData) isAggregationData() bool { return true }
func (a *SumData) addSample(f float64) {
*a += SumData(f)
a.Value += f
}
func (a *SumData) clone() AggregationData {
return newSumData(float64(*a))
return &SumData{Value: a.Value}
}
func (a *SumData) equal(other AggregationData) bool {
@@ -87,49 +81,7 @@ func (a *SumData) equal(other AggregationData) bool {
if !ok {
return false
}
return math.Pow(float64(*a)-float64(*a2), 2) < epsilon
}
// MeanData is the aggregated data for the Mean aggregation.
// A mean aggregation processes data and maintains the mean value.
//
// Most users won't directly access mean data.
type MeanData struct {
Count int64 // number of data points aggregated
Mean float64 // mean of all data points
}
func newMeanData(mean float64, count int64) *MeanData {
return &MeanData{
Mean: mean,
Count: count,
}
}
// Sum returns the sum of all samples collected.
func (a *MeanData) Sum() float64 { return a.Mean * float64(a.Count) }
func (a *MeanData) isAggregationData() bool { return true }
func (a *MeanData) addSample(f float64) {
a.Count++
if a.Count == 1 {
a.Mean = f
return
}
a.Mean = a.Mean + (f-a.Mean)/float64(a.Count)
}
func (a *MeanData) clone() AggregationData {
return newMeanData(a.Mean, a.Count)
}
func (a *MeanData) equal(other AggregationData) bool {
a2, ok := other.(*MeanData)
if !ok {
return false
}
return a.Count == a2.Count && math.Pow(a.Mean-a2.Mean, 2) < epsilon
return math.Pow(a.Value-a2.Value, 2) < epsilon
}
// DistributionData is the aggregated data for the
@@ -228,3 +180,28 @@ func (a *DistributionData) equal(other AggregationData) bool {
}
return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon
}
// LastValueData returns the last value recorded for LastValue aggregation.
type LastValueData struct {
Value float64
}
func (l *LastValueData) isAggregationData() bool {
return true
}
func (l *LastValueData) addSample(v float64) {
l.Value = v
}
func (l *LastValueData) clone() AggregationData {
return &LastValueData{l.Value}
}
func (l *LastValueData) equal(other AggregationData) bool {
a2, ok := other.(*LastValueData)
if !ok {
return false
}
return l.Value == a2.Value
}

View File

@@ -35,19 +35,15 @@ func TestDataClone(t *testing.T) {
}{
{
name: "count data",
src: newCountData(5),
src: &CountData{Value: 5},
},
{
name: "distribution data",
src: dist,
},
{
name: "mean data",
src: newMeanData(11.0, 5),
},
{
name: "sum data",
src: newSumData(65.7),
src: &SumData{Value: 65.7},
},
}
for _, tt := range tests {

View File

@@ -1,16 +0,0 @@
// Code generated by "stringer -type AggType"; DO NOT EDIT.
package view
import "strconv"
const _AggType_name = "AggTypeNoneAggTypeCountAggTypeSumAggTypeMeanAggTypeDistribution"
var _AggType_index = [...]uint8{0, 11, 23, 33, 44, 63}
func (i AggType) String() string {
if i < 0 || i >= AggType(len(_AggType_index)-1) {
return "AggType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _AggType_name[_AggType_index[i]:_AggType_index[i+1]]
}

View File

@@ -25,7 +25,7 @@ import (
)
var (
m, _ = stats.Float64("m", "", "")
m = stats.Float64("m", "", "")
k1, _ = tag.NewKey("k1")
k2, _ = tag.NewKey("k2")
k3, _ = tag.NewKey("k3")
@@ -46,9 +46,9 @@ var (
func BenchmarkRecordReqCommand(b *testing.B) {
w := newWorker()
subscribe := &subscribeToViewReq{views: []*View{view}, err: make(chan error, 1)}
subscribe.handleCommand(w)
if err := <-subscribe.err; err != nil {
register := &registerViewReq{views: []*View{view}, err: make(chan error, 1)}
register.handleCommand(w)
if err := <-register.err; err != nil {
b.Fatal(err)
}

View File

@@ -21,11 +21,10 @@ A view allows recorded measurements to be filtered and aggregated over a time wi
All recorded measurements can be filtered by a list of tags.
OpenCensus provides several aggregation methods: count, distribution, sum and mean.
OpenCensus provides several aggregation methods: count, distribution and sum.
Count aggregation only counts the number of measurement points. Distribution
aggregation provides statistical summary of the aggregated data. Sum distribution
sums up the measurement points. Mean provides the mean of the recorded measurements.
Aggregations can either happen cumulatively or over an interval.
sums up the measurement points. Aggregations are cumulative.
Users can dynamically create and delete views.

View File

@@ -22,9 +22,11 @@ import (
)
func Example() {
m, _ := stats.Int64("my.org/measure/openconns", "open connections", "")
// Measures are usually declared and used by instrumented packages.
m := stats.Int64("my.org/measure/openconns", "open connections", stats.UnitDimensionless)
if err := view.Subscribe(&view.View{
// Views are usually subscribed in your application main function.
if err := view.Register(&view.View{
Name: "my.org/views/openconns",
Description: "open connections",
Measure: m,

View File

@@ -46,20 +46,6 @@ type View struct {
Aggregation *Aggregation
}
// Deprecated: Use &View{}.
func New(name, description string, keys []tag.Key, measure stats.Measure, agg *Aggregation) (*View, error) {
if measure == nil {
panic("measure may not be nil")
}
return &View{
Name: name,
Description: description,
TagKeys: keys,
Measure: measure,
Aggregation: agg,
}, nil
}
// WithName returns a copy of the View with a new name. This is useful for
// renaming views to cope with limitations placed on metric names by various
// backends.
@@ -176,7 +162,7 @@ func (r *Row) String() string {
return buffer.String()
}
// same returns true if both Rows are equal. Tags are expected to be ordered
// Equal returns true if both rows are equal. Tags are expected to be ordered
// by the key name. Even both rows have the same tags but the tags appear in
// different orders it will return false.
func (r *Row) Equal(other *Row) bool {

View File

@@ -0,0 +1,50 @@
package view
import (
"context"
"testing"
"go.opencensus.io/stats"
)
func TestMeasureFloat64AndInt64(t *testing.T) {
// Recording through both a Float64Measure and Int64Measure with the
// same name should work.
im := stats.Int64("TestMeasureFloat64AndInt64", "", stats.UnitDimensionless)
fm := stats.Float64("TestMeasureFloat64AndInt64", "", stats.UnitDimensionless)
if im == nil || fm == nil {
t.Fatal("Error creating Measures")
}
v1 := &View{
Name: "TestMeasureFloat64AndInt64/v1",
Measure: im,
Aggregation: Sum(),
}
v2 := &View{
Name: "TestMeasureFloat64AndInt64/v2",
Measure: fm,
Aggregation: Sum(),
}
Register(v1, v2)
stats.Record(context.Background(), im.M(5))
stats.Record(context.Background(), fm.M(2.2))
d1, _ := RetrieveData(v1.Name)
d2, _ := RetrieveData(v2.Name)
sum1 := d1[0].Data.(*SumData)
sum2 := d2[0].Data.(*SumData)
// We expect both views to return 7.2, as though we recorded on a single measure.
if got, want := sum1.Value, 7.2; got != want {
t.Errorf("sum1 = %v; want %v", got, want)
}
if got, want := sum2.Value, 7.2; got != want {
t.Errorf("sum2 = %v; want %v", got, want)
}
}

View File

@@ -28,7 +28,7 @@ func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) {
k2, _ := tag.NewKey("k2")
k3, _ := tag.NewKey("k3")
agg1 := Distribution(2)
m, _ := stats.Int64("Test_View_MeasureFloat64_AggregationDistribution/m1", "", stats.UnitNone)
m := stats.Int64("Test_View_MeasureFloat64_AggregationDistribution/m1", "", stats.UnitDimensionless)
view1 := &View{
TagKeys: []tag.Key{k1, k2},
Measure: m,
@@ -197,7 +197,7 @@ func Test_View_MeasureFloat64_AggregationSum(t *testing.T) {
k1, _ := tag.NewKey("k1")
k2, _ := tag.NewKey("k2")
k3, _ := tag.NewKey("k3")
m, _ := stats.Int64("Test_View_MeasureFloat64_AggregationSum/m1", "", stats.UnitNone)
m := stats.Int64("Test_View_MeasureFloat64_AggregationSum/m1", "", stats.UnitDimensionless)
view, err := newViewInternal(&View{TagKeys: []tag.Key{k1, k2}, Measure: m, Aggregation: Sum()})
if err != nil {
t.Fatal(err)
@@ -226,7 +226,7 @@ func Test_View_MeasureFloat64_AggregationSum(t *testing.T) {
[]*Row{
{
[]tag.Tag{{Key: k1, Value: "v1"}},
newSumData(6),
&SumData{Value: 6},
},
},
},
@@ -239,11 +239,11 @@ func Test_View_MeasureFloat64_AggregationSum(t *testing.T) {
[]*Row{
{
[]tag.Tag{{Key: k1, Value: "v1"}},
newSumData(1),
&SumData{Value: 1},
},
{
[]tag.Tag{{Key: k2, Value: "v2"}},
newSumData(5),
&SumData{Value: 5},
},
},
},
@@ -259,19 +259,19 @@ func Test_View_MeasureFloat64_AggregationSum(t *testing.T) {
[]*Row{
{
[]tag.Tag{{Key: k1, Value: "v1"}},
newSumData(6),
&SumData{Value: 6},
},
{
[]tag.Tag{{Key: k1, Value: "v1 other"}},
newSumData(1),
&SumData{Value: 1},
},
{
[]tag.Tag{{Key: k2, Value: "v2"}},
newSumData(5),
&SumData{Value: 5},
},
{
[]tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}},
newSumData(5),
&SumData{Value: 5},
},
},
},
@@ -312,8 +312,8 @@ func Test_View_MeasureFloat64_AggregationSum(t *testing.T) {
func TestCanonicalize(t *testing.T) {
k1, _ := tag.NewKey("k1")
k2, _ := tag.NewKey("k2")
m, _ := stats.Int64("TestCanonicalize/m1", "desc desc", stats.UnitNone)
v := &View{TagKeys: []tag.Key{k2, k1}, Measure: m, Aggregation: Mean()}
m := stats.Int64("TestCanonicalize/m1", "desc desc", stats.UnitDimensionless)
v := &View{TagKeys: []tag.Key{k2, k1}, Measure: m, Aggregation: Sum()}
err := v.canonicalize()
if err != nil {
t.Fatal(err)
@@ -332,138 +332,19 @@ func TestCanonicalize(t *testing.T) {
}
}
func Test_View_MeasureFloat64_AggregationMean(t *testing.T) {
k1, _ := tag.NewKey("k1")
k2, _ := tag.NewKey("k2")
k3, _ := tag.NewKey("k3")
m, _ := stats.Int64("Test_View_MeasureFloat64_AggregationMean/m1", "", stats.UnitNone)
viewDesc := &View{TagKeys: []tag.Key{k1, k2}, Measure: m, Aggregation: Mean()}
view, err := newViewInternal(viewDesc)
if err != nil {
t.Fatal(err)
}
type tagString struct {
k tag.Key
v string
}
type record struct {
f float64
tags []tagString
}
tcs := []struct {
label string
records []record
wantRows []*Row
}{
{
"1",
[]record{
{1, []tagString{{k1, "v1"}}},
{5, []tagString{{k1, "v1"}}},
},
[]*Row{
{
[]tag.Tag{{Key: k1, Value: "v1"}},
newMeanData(3, 2),
},
},
},
{
"2",
[]record{
{1, []tagString{{k1, "v1"}}},
{5, []tagString{{k2, "v2"}}},
{-0.5, []tagString{{k2, "v2"}}},
},
[]*Row{
{
[]tag.Tag{{Key: k1, Value: "v1"}},
newMeanData(1, 1),
},
{
[]tag.Tag{{Key: k2, Value: "v2"}},
newMeanData(2.25, 2),
},
},
},
{
"3",
[]record{
{1, []tagString{{k1, "v1"}}},
{5, []tagString{{k1, "v1"}, {k3, "v3"}}},
{1, []tagString{{k1, "v1 other"}}},
{5, []tagString{{k2, "v2"}}},
{5, []tagString{{k1, "v1"}, {k2, "v2"}}},
{-4, []tagString{{k1, "v1"}, {k2, "v2"}}},
},
[]*Row{
{
[]tag.Tag{{Key: k1, Value: "v1"}},
newMeanData(3, 2),
},
{
[]tag.Tag{{Key: k1, Value: "v1 other"}},
newMeanData(1, 1),
},
{
[]tag.Tag{{Key: k2, Value: "v2"}},
newMeanData(5, 1),
},
{
[]tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}},
newMeanData(0.5, 2),
},
},
},
}
for _, tt := range tcs {
view.clearRows()
view.subscribe()
for _, r := range tt.records {
mods := []tag.Mutator{}
for _, t := range r.tags {
mods = append(mods, tag.Insert(t.k, t.v))
}
ctx, err := tag.New(context.Background(), mods...)
if err != nil {
t.Errorf("%v: New = %v", tt.label, err)
}
view.addSample(tag.FromContext(ctx), r.f)
}
gotRows := view.collectedRows()
for i, got := range gotRows {
if !containsRow(tt.wantRows, got) {
t.Errorf("%v-%d: got row %v; want none", tt.label, i, got)
break
}
}
for i, want := range tt.wantRows {
if !containsRow(gotRows, want) {
t.Errorf("%v-%d: got none; want row %v", tt.label, i, want)
break
}
}
}
}
func TestViewSortedKeys(t *testing.T) {
k1, _ := tag.NewKey("a")
k2, _ := tag.NewKey("b")
k3, _ := tag.NewKey("c")
ks := []tag.Key{k1, k3, k2}
m, _ := stats.Int64("TestViewSortedKeys/m1", "", stats.UnitNone)
Subscribe(&View{
m := stats.Int64("TestViewSortedKeys/m1", "", stats.UnitDimensionless)
Register(&View{
Name: "sort_keys",
Description: "desc sort_keys",
TagKeys: ks,
Measure: m,
Aggregation: Mean(),
Aggregation: Sum(),
})
// Subscribe normalizes the view by sorting the tag keys, retrieve the normalized view
v := Find("sort_keys")
@@ -490,3 +371,31 @@ func containsRow(rows []*Row, r *Row) bool {
}
return false
}
func TestRegisterUnregisterParity(t *testing.T) {
measures := []stats.Measure{
stats.Int64("ifoo", "iFOO", "iBar"),
stats.Float64("ffoo", "fFOO", "fBar"),
}
aggregations := []*Aggregation{
Count(),
Sum(),
Distribution(1, 2.0, 4.0, 8.0, 16.0),
}
for i := 0; i < 10; i++ {
for _, m := range measures {
for _, agg := range aggregations {
v := &View{
Aggregation: agg,
Name: "Lookup here",
Measure: m,
}
if err := Register(v); err != nil {
t.Errorf("Iteration #%d:\nMeasure: (%#v)\nAggregation (%#v)\nError: %v", i, m, agg, err)
}
Unregister(v)
}
}
}
}

View File

@@ -61,30 +61,15 @@ func Find(name string) (v *View) {
return resp.v
}
// Deprecated: Registering is a no-op. Use the Subscribe function.
func Register(_ *View) error {
return nil
}
// Deprecated: Unregistering is a no-op, see: Unsubscribe.
func Unregister(_ *View) error {
return nil
}
// Deprecated: Use the Subscribe function.
func (v *View) Subscribe() error {
return Subscribe(v)
}
// Subscribe begins collecting data for the given views.
// Register begins collecting data for the given views.
// Once a view is subscribed, it reports data to the registered exporters.
func Subscribe(views ...*View) error {
func Register(views ...*View) error {
for _, v := range views {
if err := v.canonicalize(); err != nil {
return err
}
}
req := &subscribeToViewReq{
req := &registerViewReq{
views: views,
err: make(chan error),
}
@@ -92,16 +77,16 @@ func Subscribe(views ...*View) error {
return <-req.err
}
// Unsubscribe the given views. Data will not longer be exported for these views
// after Unsubscribe returns.
// It is not necessary to unsubscribe from views you expect to collect for the
// Unregister the given views. Data will not longer be exported for these views
// after Unregister returns.
// It is not necessary to unregister from views you expect to collect for the
// duration of your program execution.
func Unsubscribe(views ...*View) {
func Unregister(views ...*View) {
names := make([]string, len(views))
for i := range views {
names[i] = views[i].Name
}
req := &unsubscribeFromViewReq{
req := &unregisterFromViewReq{
views: names,
done: make(chan struct{}),
}
@@ -109,15 +94,6 @@ func Unsubscribe(views ...*View) {
<-req.done
}
// Deprecated: Use the Unsubscribe function instead.
func (v *View) Unsubscribe() error {
if v == nil {
return nil
}
Unsubscribe(v)
return nil
}
func RetrieveData(viewName string) ([]*Row, error) {
req := &retrieveDataReq{
now: time.Now(),

View File

@@ -41,16 +41,21 @@ type getViewByNameResp struct {
}
func (cmd *getViewByNameReq) handleCommand(w *worker) {
cmd.c <- &getViewByNameResp{w.views[cmd.name].view}
v := w.views[cmd.name]
if v == nil {
cmd.c <- &getViewByNameResp{nil}
return
}
cmd.c <- &getViewByNameResp{v.view}
}
// subscribeToViewReq is the command to subscribe to a view.
type subscribeToViewReq struct {
// registerViewReq is the command to register a view.
type registerViewReq struct {
views []*View
err chan error
}
func (cmd *subscribeToViewReq) handleCommand(w *worker) {
func (cmd *registerViewReq) handleCommand(w *worker) {
var errstr []string
for _, view := range cmd.views {
vi, err := w.tryRegisterView(view)
@@ -68,15 +73,15 @@ func (cmd *subscribeToViewReq) handleCommand(w *worker) {
}
}
// unsubscribeFromViewReq is the command to unsubscribe to a view. Has no
// unregisterFromViewReq is the command to unsubscribe to a view. Has no
// impact on the data collection for client that are pulling data from the
// library.
type unsubscribeFromViewReq struct {
type unregisterFromViewReq struct {
views []string
done chan struct{}
}
func (cmd *unsubscribeFromViewReq) handleCommand(w *worker) {
func (cmd *unregisterFromViewReq) handleCommand(w *worker) {
for _, name := range cmd.views {
vi, ok := w.views[name]
if !ok {
@@ -89,6 +94,7 @@ func (cmd *unsubscribeFromViewReq) handleCommand(w *worker) {
// The collected data can be cleared.
vi.clearRows()
}
delete(w.views, name)
}
cmd.done <- struct{}{}
}

View File

@@ -26,56 +26,24 @@ import (
"go.opencensus.io/tag"
)
func Test_Worker_MeasureCreation(t *testing.T) {
restart()
if _, err := stats.Float64("MF1", "desc MF1", "unit"); err != nil {
t.Errorf("stats.Float64(\"MF1\", \"desc MF1\") got error %v, want no error", err)
}
if _, err := stats.Float64("MF1", "Duplicate measure with same name as MF1.", "unit"); err == nil {
t.Error("stats.Float64(\"MF1\", \"Duplicate Float64Measure with same name as MF1.\") got no error, want no error")
}
if _, err := stats.Int64("MF1", "Duplicate measure with same name as MF1.", "unit"); err == nil {
t.Error("stats.Int64(\"MF1\", \"Duplicate Int64Measure with same name as MF1.\") got no error, want no error")
}
if _, err := stats.Float64("MF2", "desc MF2", "unit"); err != nil {
t.Errorf("stats.Float64(\"MF2\", \"desc MF2\") got error %v, want no error", err)
}
if _, err := stats.Int64("MI1", "desc MI1", "unit"); err != nil {
t.Errorf("stats.Int64(\"MI1\", \"desc MI1\") got error %v, want no error", err)
}
if _, err := stats.Int64("MI1", "Duplicate measure with same name as MI1.", "unit"); err == nil {
t.Error("stats.Int64(\"MI1\", \"Duplicate Int64 with same name as MI1.\") got no error, want no error")
}
if _, err := stats.Float64("MI1", "Duplicate measure with same name as MI1.", "unit"); err == nil {
t.Error("stats.Float64(\"MI1\", \"Duplicate Float64 with same name as MI1.\") got no error, want no error")
}
}
func Test_Worker_ViewSubscription(t *testing.T) {
func Test_Worker_ViewRegistration(t *testing.T) {
someError := errors.New("some error")
sc1 := make(chan *Data)
type subscription struct {
type registration struct {
c chan *Data
vID string
err error
}
type testCase struct {
label string
subscriptions []subscription
registrations []registration
}
tcs := []testCase{
{
"register and subscribe to v1ID",
[]subscription{
[]registration{
{
sc1,
"v1ID",
@@ -85,7 +53,7 @@ func Test_Worker_ViewSubscription(t *testing.T) {
},
{
"register v1ID+v2ID, susbsribe to v1ID",
[]subscription{
[]registration{
{
sc1,
"v1ID",
@@ -95,7 +63,7 @@ func Test_Worker_ViewSubscription(t *testing.T) {
},
{
"register to v1ID; subscribe to v1ID and view with same ID",
[]subscription{
[]registration{
{
sc1,
"v1ID",
@@ -110,8 +78,8 @@ func Test_Worker_ViewSubscription(t *testing.T) {
},
}
mf1, _ := stats.Float64("MF1/Test_Worker_ViewSubscription", "desc MF1", "unit")
mf2, _ := stats.Float64("MF2/Test_Worker_ViewSubscription", "desc MF2", "unit")
mf1 := stats.Float64("MF1/Test_Worker_ViewSubscription", "desc MF1", "unit")
mf2 := stats.Float64("MF2/Test_Worker_ViewSubscription", "desc MF2", "unit")
for _, tc := range tcs {
t.Run(tc.label, func(t *testing.T) {
@@ -137,11 +105,11 @@ func Test_Worker_ViewSubscription(t *testing.T) {
"vNilID": nil,
}
for _, s := range tc.subscriptions {
v := views[s.vID]
err := Subscribe(v)
if (err != nil) != (s.err != nil) {
t.Errorf("%v: Subscribe() = %v, want %v", tc.label, err, s.err)
for _, r := range tc.registrations {
v := views[r.vID]
err := Register(v)
if (err != nil) != (r.err != nil) {
t.Errorf("%v: Register() = %v, want %v", tc.label, err, r.err)
}
}
})
@@ -152,10 +120,7 @@ func Test_Worker_RecordFloat64(t *testing.T) {
restart()
someError := errors.New("some error")
m, err := stats.Float64("Test_Worker_RecordFloat64/MF1", "desc MF1", "unit")
if err != nil {
t.Errorf("stats.Float64(\"MF1\", \"desc MF1\") got error '%v', want no error", err)
}
m := stats.Float64("Test_Worker_RecordFloat64/MF1", "desc MF1", "unit")
k1, _ := tag.NewKey("k1")
k2, _ := tag.NewKey("k2")
@@ -178,31 +143,28 @@ func Test_Worker_RecordFloat64(t *testing.T) {
type testCase struct {
label string
registrations []*View
subscriptions []*View
records []float64
wants []want
}
tcs := []testCase{
{
"0",
[]*View{v1, v2},
[]*View{},
[]float64{1, 1},
[]want{{v1, nil, someError}, {v2, nil, someError}},
label: "0",
registrations: []*View{},
records: []float64{1, 1},
wants: []want{{v1, nil, someError}, {v2, nil, someError}},
},
{
"1",
[]*View{v1, v2},
[]*View{v1},
[]float64{1, 1},
[]want{
label: "1",
registrations: []*View{v1},
records: []float64{1, 1},
wants: []want{
{
v1,
[]*Row{
{
[]tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}},
newCountData(2),
&CountData{Value: 2},
},
},
nil,
@@ -211,17 +173,16 @@ func Test_Worker_RecordFloat64(t *testing.T) {
},
},
{
"2",
[]*View{v1, v2},
[]*View{v1, v2},
[]float64{1, 1},
[]want{
label: "2",
registrations: []*View{v1, v2},
records: []float64{1, 1},
wants: []want{
{
v1,
[]*Row{
{
[]tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}},
newCountData(2),
&CountData{Value: 2},
},
},
nil,
@@ -231,7 +192,7 @@ func Test_Worker_RecordFloat64(t *testing.T) {
[]*Row{
{
[]tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}},
newCountData(2),
&CountData{Value: 2},
},
},
nil,
@@ -247,12 +208,6 @@ func Test_Worker_RecordFloat64(t *testing.T) {
}
}
for _, v := range tc.subscriptions {
if err := v.Subscribe(); err != nil {
t.Fatalf("%v: Subscribe(%v) = %v; want no errors", tc.label, v.Name, err)
}
}
for _, value := range tc.records {
stats.Record(ctx, m.M(value))
}
@@ -260,44 +215,31 @@ func Test_Worker_RecordFloat64(t *testing.T) {
for _, w := range tc.wants {
gotRows, err := RetrieveData(w.v.Name)
if (err != nil) != (w.err != nil) {
t.Fatalf("%v: RetrieveData(%v) = %v; want no errors", tc.label, w.v.Name, err)
t.Fatalf("%s: RetrieveData(%v) = %v; want error = %v", tc.label, w.v.Name, err, w.err)
}
for _, got := range gotRows {
if !containsRow(w.rows, got) {
t.Errorf("%v: got row %v; want none", tc.label, got)
t.Errorf("%s: got row %#v; want none", tc.label, got)
break
}
}
for _, want := range w.rows {
if !containsRow(gotRows, want) {
t.Errorf("%v: got none; want %v'", tc.label, want)
t.Errorf("%s: got none; want %#v'", tc.label, want)
break
}
}
}
// cleaning up
for _, v := range tc.subscriptions {
if err := v.Unsubscribe(); err != nil {
t.Fatalf("%v: Unsubscribing from view %v errored with %v; want no error", tc.label, v.Name, err)
}
}
for _, v := range tc.registrations {
if err := Unregister(v); err != nil {
t.Fatalf("%v: Unregistering view %v errrored with %v; want no error", tc.label, v.Name, err)
}
}
// Cleaning up.
Unregister(tc.registrations...)
}
}
func TestReportUsage(t *testing.T) {
ctx := context.Background()
m, err := stats.Int64("measure", "desc", "unit")
if err != nil {
t.Fatalf("stats.Int64() = %v", err)
}
m := stats.Int64("measure", "desc", "unit")
tests := []struct {
name string
@@ -320,8 +262,7 @@ func TestReportUsage(t *testing.T) {
restart()
SetReportingPeriod(25 * time.Millisecond)
err = Subscribe(tt.view)
if err != nil {
if err := Register(tt.view); err != nil {
t.Fatalf("%v: cannot subscribe: %v", tt.name, err)
}
@@ -374,15 +315,16 @@ func TestWorkerStarttime(t *testing.T) {
restart()
ctx := context.Background()
m, err := stats.Int64("measure/TestWorkerStarttime", "desc", "unit")
if err != nil {
t.Fatalf("stats.Int64() = %v", err)
m := stats.Int64("measure/TestWorkerStarttime", "desc", "unit")
v := &View{
Name: "testview",
Measure: m,
Aggregation: Count(),
}
v, _ := New("testview", "", nil, m, Count())
SetReportingPeriod(25 * time.Millisecond)
if err := v.Subscribe(); err != nil {
t.Fatalf("cannot subscribe to %v: %v", v.Name, err)
if err := Register(v); err != nil {
t.Fatalf("cannot register to %v: %v", v.Name, err)
}
e := &vdExporter{}
@@ -433,7 +375,7 @@ func (e *countExporter) ExportView(vd *Data) {
e.Lock()
defer e.Unlock()
e.count = int64(*d)
e.count = d.Value
}
type vdExporter struct {

View File

@@ -22,6 +22,7 @@ import (
type (
// TraceID is a 16-byte identifier for a set of spans.
TraceID [16]byte
// SpanID is an 8-byte identifier for a single span.
SpanID [8]byte
)

View File

@@ -94,12 +94,12 @@ func BenchmarkSpanID_DotString(b *testing.B) {
func traceBenchmark(b *testing.B, fn func(*testing.B)) {
b.Run("AlwaysSample", func(b *testing.B) {
b.ReportAllocs()
SetDefaultSampler(AlwaysSample())
ApplyConfig(Config{DefaultSampler: AlwaysSample()})
fn(b)
})
b.Run("NeverSample", func(b *testing.B) {
b.ReportAllocs()
SetDefaultSampler(NeverSample())
ApplyConfig(Config{DefaultSampler: NeverSample()})
fn(b)
})
}

40
vendor/go.opencensus.io/trace/config.go generated vendored Normal file
View File

@@ -0,0 +1,40 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import "go.opencensus.io/trace/internal"
// Config represents the global tracing configuration.
type Config struct {
// DefaultSampler is the default sampler used when creating new spans.
DefaultSampler Sampler
// IDGenerator is for internal use only.
IDGenerator internal.IDGenerator
}
// ApplyConfig applies changes to the global tracing configuration.
//
// Fields not provided in the given config are going to be preserved.
func ApplyConfig(cfg Config) {
c := config.Load().(*Config)
if cfg.DefaultSampler != nil {
c.DefaultSampler = cfg.DefaultSampler
}
if cfg.IDGenerator != nil {
c.IDGenerator = cfg.IDGenerator
}
config.Store(c)
}

33
vendor/go.opencensus.io/trace/config_test.go generated vendored Normal file
View File

@@ -0,0 +1,33 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"reflect"
"testing"
)
func TestApplyZeroConfig(t *testing.T) {
cfg := config.Load().(*Config)
ApplyConfig(Config{})
currentCfg := config.Load().(*Config)
if got, want := reflect.ValueOf(currentCfg.DefaultSampler).Pointer(), reflect.ValueOf(cfg.DefaultSampler).Pointer(); got != want {
t.Fatalf("config.DefaultSampler = %#v; want %#v", got, want)
}
if got, want := currentCfg.IDGenerator, cfg.IDGenerator; got != want {
t.Fatalf("config.IDGenerator = %#v; want %#v", got, want)
}
}

20
vendor/go.opencensus.io/trace/doc.go generated vendored
View File

@@ -13,25 +13,24 @@
// limitations under the License.
/*
Package trace contains types for representing trace information, and
functions for global configuration of tracing.
Package trace contains support for OpenCensus distributed tracing.
The following assumes a basic familiarity with OpenCensus concepts.
See http://opencensus.io.
See http://opencensus.io
Enabling Tracing for a Program
Exporting Traces
To use OpenCensus tracing, register at least one Exporter. You can use
To export collected tracing data, register at least one exporter. You can use
one of the provided exporters or write your own.
trace.RegisterExporter(anExporter)
trace.RegisterExporter(exporter)
By default, traces will be sampled relatively rarely. To change the sampling
frequency for your entire program, call SetDefaultSampler. Use a ProbabilitySampler
frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler
to sample a subset of traces, or use AlwaysSample to collect a trace on every run:
trace.SetDefaultSampler(trace.AlwaysSample())
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
Adding Spans to a Trace
@@ -43,13 +42,10 @@ It is common to want to capture all the activity of a function call in a span. F
this to work, the function must take a context.Context as a parameter. Add these two
lines to the top of the function:
ctx, span := trace.StartSpan(ctx, "your choice of name")
ctx, span := trace.StartSpan(ctx, "my.org/Run")
defer span.End()
StartSpan will create a new top-level span if the context
doesn't contain another span, otherwise it will create a child span.
As a suggestion, use the fully-qualified function name as the span name, e.g.
"github.com/me/mypackage.Run".
*/
package trace // import "go.opencensus.io/trace"

21
vendor/go.opencensus.io/trace/internal/internal.go generated vendored Normal file
View File

@@ -0,0 +1,21 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package internal provides trace internals.
package internal
type IDGenerator interface {
NewTraceID() [16]byte
NewSpanID() [8]byte
}

Some files were not shown because too many files have changed in this diff Show More