mirror of
https://github.com/fnproject/fn.git
synced 2022-10-28 21:29:17 +03:00
The opencensus API changes between 0.6.0 and 0.9.0 (#980)
We get some useful features in later versions; update so as to not pin downstream consumers (extensions) to an older version.
This commit is contained in:
8
Gopkg.lock
generated
8
Gopkg.lock
generated
@@ -5,6 +5,7 @@
|
|||||||
name = "git.apache.org/thrift.git"
|
name = "git.apache.org/thrift.git"
|
||||||
packages = ["lib/go/thrift"]
|
packages = ["lib/go/thrift"]
|
||||||
revision = "272470790ad6db791bd6f9db399b2cd2d5879f74"
|
revision = "272470790ad6db791bd6f9db399b2cd2d5879f74"
|
||||||
|
source = "github.com/apache/thrift"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
@@ -529,10 +530,11 @@
|
|||||||
"stats/view",
|
"stats/view",
|
||||||
"tag",
|
"tag",
|
||||||
"trace",
|
"trace",
|
||||||
|
"trace/internal",
|
||||||
"trace/propagation"
|
"trace/propagation"
|
||||||
]
|
]
|
||||||
revision = "6e3f034057826b530038d93267906ec3c012183f"
|
revision = "10cec2c05ea2cfb8b0d856711daedc49d8a45c56"
|
||||||
version = "v0.6.0"
|
version = "v0.9.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
@@ -664,6 +666,6 @@
|
|||||||
[solve-meta]
|
[solve-meta]
|
||||||
analyzer-name = "dep"
|
analyzer-name = "dep"
|
||||||
analyzer-version = 1
|
analyzer-version = 1
|
||||||
inputs-digest = "321ea984c523241adc23f36302d387cebbcc05a56812fc3555d82c9c5928274c"
|
inputs-digest = "5ff01d4a02d97ec5447f99d45f47e593bb94c4581f07baefad209f25d0b88785"
|
||||||
solver-name = "gps-cdcl"
|
solver-name = "gps-cdcl"
|
||||||
solver-version = 1
|
solver-version = 1
|
||||||
|
|||||||
@@ -72,7 +72,7 @@ ignored = ["github.com/fnproject/fn/cli"]
|
|||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "go.opencensus.io"
|
name = "go.opencensus.io"
|
||||||
version = "0.6.0"
|
version = "0.9.0"
|
||||||
|
|
||||||
[[override]]
|
[[override]]
|
||||||
name = "git.apache.org/thrift.git"
|
name = "git.apache.org/thrift.git"
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ import (
|
|||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"go.opencensus.io/stats"
|
"go.opencensus.io/stats"
|
||||||
"go.opencensus.io/stats/view"
|
"go.opencensus.io/stats/view"
|
||||||
"go.opencensus.io/tag"
|
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -1013,7 +1012,9 @@ func (c *container) FsSize() uint64 { return c.fsSize }
|
|||||||
// WriteStat publishes each metric in the specified Stats structure as a histogram metric
|
// WriteStat publishes each metric in the specified Stats structure as a histogram metric
|
||||||
func (c *container) WriteStat(ctx context.Context, stat drivers.Stat) {
|
func (c *container) WriteStat(ctx context.Context, stat drivers.Stat) {
|
||||||
for key, value := range stat.Metrics {
|
for key, value := range stat.Metrics {
|
||||||
stats.Record(ctx, stats.FindMeasure("docker_stats_"+key).(*stats.Int64Measure).M(int64(value)))
|
if m, ok := measures[key]; ok {
|
||||||
|
stats.Record(ctx, m.M(int64(value)))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
c.statsMu.Lock()
|
c.statsMu.Lock()
|
||||||
@@ -1023,42 +1024,19 @@ func (c *container) WriteStat(ctx context.Context, stat drivers.Stat) {
|
|||||||
c.statsMu.Unlock()
|
c.statsMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var measures map[string]*stats.Int64Measure
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// TODO this is nasty figure out how to use opencensus to not have to declare these
|
// TODO this is nasty figure out how to use opencensus to not have to declare these
|
||||||
keys := []string{"net_rx", "net_tx", "mem_limit", "mem_usage", "disk_read", "disk_write", "cpu_user", "cpu_total", "cpu_kernel"}
|
keys := []string{"net_rx", "net_tx", "mem_limit", "mem_usage", "disk_read", "disk_write", "cpu_user", "cpu_total", "cpu_kernel"}
|
||||||
|
|
||||||
// TODO necessary?
|
measures = make(map[string]*stats.Int64Measure)
|
||||||
appKey, err := tag.NewKey("fn_appname")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
pathKey, err := tag.NewKey("fn_path")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, key := range keys {
|
for _, key := range keys {
|
||||||
units := "bytes"
|
units := "bytes"
|
||||||
if strings.Contains(key, "cpu") {
|
if strings.Contains(key, "cpu") {
|
||||||
units = "cpu"
|
units = "cpu"
|
||||||
}
|
}
|
||||||
dockerStatsDist, err := stats.Int64("docker_stats_"+key, "docker container stats for "+key, units)
|
measures[key] = makeMeasure("docker_stats_"+key, "docker container stats for "+key, units, view.Distribution())
|
||||||
if err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
v, err := view.New(
|
|
||||||
"docker_stats_"+key,
|
|
||||||
"docker container stats for "+key,
|
|
||||||
[]tag.Key{appKey, pathKey},
|
|
||||||
dockerStatsDist,
|
|
||||||
view.Distribution(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatalf("cannot create view: %v", err)
|
|
||||||
}
|
|
||||||
if err := v.Subscribe(); err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -102,101 +102,10 @@ type dockerWrap struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// TODO doing this at each call site seems not the intention of the library since measurements
|
dockerRetriesMeasure = makeMeasure("docker_api_retries", "docker api retries", "", view.Sum())
|
||||||
// need to be created and views registered. doing this up front seems painful but maybe there
|
dockerTimeoutMeasure = makeMeasure("docker_api_timeout", "docker api timeouts", "", view.Count())
|
||||||
// are benefits?
|
dockerErrorMeasure = makeMeasure("docker_api_error", "docker api errors", "", view.Count())
|
||||||
|
dockerOOMMeasure = makeMeasure("docker_oom", "docker oom", "", view.Count())
|
||||||
// TODO do we have to do this? the measurements will be tagged on the context, will they be propagated
|
|
||||||
// or we have to white list them in the view for them to show up? test...
|
|
||||||
var err error
|
|
||||||
appKey, err := tag.NewKey("fn_appname")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
pathKey, err := tag.NewKey("fn_path")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
dockerRetriesMeasure, err = stats.Int64("docker_api_retries", "docker api retries", "")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
v, err := view.New(
|
|
||||||
"docker_api_retries",
|
|
||||||
"number of times we've retried docker API upon failure",
|
|
||||||
[]tag.Key{appKey, pathKey},
|
|
||||||
dockerRetriesMeasure,
|
|
||||||
view.Sum(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatalf("cannot create view: %v", err)
|
|
||||||
}
|
|
||||||
if err := v.Subscribe(); err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
dockerTimeoutMeasure, err = stats.Int64("docker_api_timeout", "docker api timeouts", "")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
v, err := view.New(
|
|
||||||
"docker_api_timeout_count",
|
|
||||||
"number of times we've timed out calling docker API",
|
|
||||||
[]tag.Key{appKey, pathKey},
|
|
||||||
dockerTimeoutMeasure,
|
|
||||||
view.Count(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatalf("cannot create view: %v", err)
|
|
||||||
}
|
|
||||||
if err := v.Subscribe(); err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
dockerErrorMeasure, err = stats.Int64("docker_api_error", "docker api errors", "")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
v, err := view.New(
|
|
||||||
"docker_api_error_count",
|
|
||||||
"number of unrecoverable errors from docker API",
|
|
||||||
[]tag.Key{appKey, pathKey},
|
|
||||||
dockerErrorMeasure,
|
|
||||||
view.Count(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatalf("cannot create view: %v", err)
|
|
||||||
}
|
|
||||||
if err := v.Subscribe(); err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
dockerOOMMeasure, err = stats.Int64("docker_oom", "docker oom", "")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
v, err := view.New(
|
|
||||||
"docker_oom_count",
|
|
||||||
"number of docker container oom",
|
|
||||||
[]tag.Key{appKey, pathKey},
|
|
||||||
dockerOOMMeasure,
|
|
||||||
view.Count(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatalf("cannot create view: %v", err)
|
|
||||||
}
|
|
||||||
if err := v.Subscribe(); err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -447,3 +356,29 @@ func (d *dockerWrap) Stats(opts docker.StatsOptions) (err error) {
|
|||||||
//})
|
//})
|
||||||
//return err
|
//return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func makeMeasure(name string, desc string, unit string, agg *view.Aggregation) *stats.Int64Measure {
|
||||||
|
appKey, err := tag.NewKey("fn_appname")
|
||||||
|
if err != nil {
|
||||||
|
logrus.Fatal(err)
|
||||||
|
}
|
||||||
|
pathKey, err := tag.NewKey("fn_path")
|
||||||
|
if err != nil {
|
||||||
|
logrus.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
measure := stats.Int64(name, desc, unit)
|
||||||
|
err = view.Register(
|
||||||
|
&view.View{
|
||||||
|
Name: name,
|
||||||
|
Description: desc,
|
||||||
|
TagKeys: []tag.Key{appKey, pathKey},
|
||||||
|
Measure: measure,
|
||||||
|
Aggregation: agg,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithError(err).Fatal("cannot create view")
|
||||||
|
}
|
||||||
|
return measure
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,10 +5,8 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"go.opencensus.io/stats"
|
"go.opencensus.io/stats"
|
||||||
"go.opencensus.io/stats/view"
|
"go.opencensus.io/stats/view"
|
||||||
"go.opencensus.io/tag"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type RequestStateType int
|
type RequestStateType int
|
||||||
@@ -140,76 +138,44 @@ func (c *containerState) UpdateState(ctx context.Context, newState ContainerStat
|
|||||||
// update old state stats
|
// update old state stats
|
||||||
gaugeKey := containerGaugeKeys[oldState]
|
gaugeKey := containerGaugeKeys[oldState]
|
||||||
if gaugeKey != "" {
|
if gaugeKey != "" {
|
||||||
stats.Record(ctx, stats.FindMeasure(gaugeKey).(*stats.Int64Measure).M(-1))
|
stats.Record(ctx, containerGaugeMeasures[oldState].M(-1))
|
||||||
}
|
}
|
||||||
|
|
||||||
timeKey := containerTimeKeys[oldState]
|
timeKey := containerTimeKeys[oldState]
|
||||||
if timeKey != "" {
|
if timeKey != "" {
|
||||||
stats.Record(ctx, stats.FindMeasure(timeKey).(*stats.Int64Measure).M(int64(now.Sub(before).Round(time.Millisecond))))
|
stats.Record(ctx, containerTimeMeasures[oldState].M(int64(now.Sub(before).Round(time.Millisecond))))
|
||||||
}
|
}
|
||||||
|
|
||||||
// update new state stats
|
// update new state stats
|
||||||
gaugeKey = containerGaugeKeys[newState]
|
gaugeKey = containerGaugeKeys[newState]
|
||||||
if gaugeKey != "" {
|
if gaugeKey != "" {
|
||||||
stats.Record(ctx, stats.FindMeasure(gaugeKey).(*stats.Int64Measure).M(1))
|
stats.Record(ctx, containerGaugeMeasures[newState].M(1))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
containerGaugeMeasures []*stats.Int64Measure
|
||||||
|
containerTimeMeasures []*stats.Int64Measure
|
||||||
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// TODO(reed): do we have to do this? the measurements will be tagged on the context, will they be propagated
|
// TODO(reed): do we have to do this? the measurements will be tagged on the context, will they be propagated
|
||||||
// or we have to white list them in the view for them to show up? test...
|
// or we have to white list them in the view for them to show up? test...
|
||||||
appKey, err := tag.NewKey("fn_appname")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
pathKey, err := tag.NewKey("fn_path")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, key := range containerGaugeKeys {
|
containerGaugeMeasures = make([]*stats.Int64Measure, len(containerGaugeKeys))
|
||||||
|
for i, key := range containerGaugeKeys {
|
||||||
if key == "" { // leave nil intentionally, let it panic
|
if key == "" { // leave nil intentionally, let it panic
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
measure, err := stats.Int64(key, "containers in state "+key, "")
|
containerGaugeMeasures[i] = makeMeasure(key, "containers in state "+key, "", view.Count())
|
||||||
if err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
v, err := view.New(
|
|
||||||
key,
|
|
||||||
"containers in state "+key,
|
|
||||||
[]tag.Key{appKey, pathKey},
|
|
||||||
measure,
|
|
||||||
view.Count(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatalf("cannot create view: %v", err)
|
|
||||||
}
|
|
||||||
if err := v.Subscribe(); err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, key := range containerTimeKeys {
|
containerTimeMeasures = make([]*stats.Int64Measure, len(containerTimeKeys))
|
||||||
|
|
||||||
|
for i, key := range containerTimeKeys {
|
||||||
if key == "" {
|
if key == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
measure, err := stats.Int64(key, "time spent in container state "+key, "ms")
|
containerTimeMeasures[i] = makeMeasure(key, "time spent in container state "+key, "ms", view.Distribution())
|
||||||
if err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
v, err := view.New(
|
|
||||||
key,
|
|
||||||
"time spent in container state "+key,
|
|
||||||
[]tag.Key{appKey, pathKey},
|
|
||||||
measure,
|
|
||||||
view.Distribution(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatalf("cannot create view: %v", err)
|
|
||||||
}
|
|
||||||
if err := v.Subscribe(); err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -79,13 +79,17 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// TODO(reed): doing this at each call site seems not the intention of the library since measurements
|
queuedMeasure = makeMeasure(queuedMetricName, "calls currently queued against agent", "", view.Sum())
|
||||||
// need to be created and views registered. doing this up front seems painful but maybe there
|
callsMeasure = makeMeasure(callsMetricName, "calls created in agent", "", view.Sum())
|
||||||
// are benefits?
|
runningMeasure = makeMeasure(runningMetricName, "calls currently running in agent", "", view.Sum())
|
||||||
|
completedMeasure = makeMeasure(completedMetricName, "calls completed in agent", "", view.Sum())
|
||||||
|
failedMeasure = makeMeasure(failedMetricName, "calls failed in agent", "", view.Sum())
|
||||||
|
timedoutMeasure = makeMeasure(timedoutMetricName, "calls timed out in agent", "", view.Sum())
|
||||||
|
errorsMeasure = makeMeasure(errorsMetricName, "calls errored in agent", "", view.Sum())
|
||||||
|
serverBusyMeasure = makeMeasure(serverBusyMetricName, "calls where server was too busy in agent", "", view.Sum())
|
||||||
|
}
|
||||||
|
|
||||||
// TODO(reed): do we have to do this? the measurements will be tagged on the context, will they be propagated
|
func makeMeasure(name string, desc string, unit string, agg *view.Aggregation) *stats.Int64Measure {
|
||||||
// or we have to white list them in the view for them to show up? test...
|
|
||||||
var err error
|
|
||||||
appKey, err := tag.NewKey("fn_appname")
|
appKey, err := tag.NewKey("fn_appname")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Fatal(err)
|
logrus.Fatal(err)
|
||||||
@@ -95,163 +99,18 @@ func init() {
|
|||||||
logrus.Fatal(err)
|
logrus.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
measure := stats.Int64(name, desc, unit)
|
||||||
queuedMeasure, err = stats.Int64(queuedMetricName, "calls currently queued against agent", "")
|
err = view.Register(
|
||||||
if err != nil {
|
&view.View{
|
||||||
logrus.Fatal(err)
|
Name: name,
|
||||||
}
|
Description: desc,
|
||||||
v, err := view.New(
|
TagKeys: []tag.Key{appKey, pathKey},
|
||||||
queuedMetricName,
|
Measure: measure,
|
||||||
"calls currently queued to agent",
|
Aggregation: agg,
|
||||||
[]tag.Key{appKey, pathKey},
|
},
|
||||||
queuedMeasure,
|
)
|
||||||
view.Sum(),
|
if err != nil {
|
||||||
)
|
logrus.WithError(err).Fatal("cannot create view")
|
||||||
if err != nil {
|
|
||||||
logrus.Fatalf("cannot create view: %v", err)
|
|
||||||
}
|
|
||||||
if err := v.Subscribe(); err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
callsMeasure, err = stats.Int64(callsMetricName, "calls created in agent", "")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
v, err := view.New(
|
|
||||||
callsMetricName,
|
|
||||||
"calls created in agent",
|
|
||||||
[]tag.Key{appKey, pathKey},
|
|
||||||
callsMeasure,
|
|
||||||
view.Sum(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatalf("cannot create view: %v", err)
|
|
||||||
}
|
|
||||||
if err := v.Subscribe(); err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
runningMeasure, err = stats.Int64(runningMetricName, "calls currently running in agent", "")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
v, err := view.New(
|
|
||||||
runningMetricName,
|
|
||||||
"calls currently running in agent",
|
|
||||||
[]tag.Key{appKey, pathKey},
|
|
||||||
runningMeasure,
|
|
||||||
view.Sum(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatalf("cannot create view: %v", err)
|
|
||||||
}
|
|
||||||
if err := v.Subscribe(); err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
completedMeasure, err = stats.Int64(completedMetricName, "calls completed in agent", "")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
v, err := view.New(
|
|
||||||
completedMetricName,
|
|
||||||
"calls completed in agent",
|
|
||||||
[]tag.Key{appKey, pathKey},
|
|
||||||
completedMeasure,
|
|
||||||
view.Sum(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatalf("cannot create view: %v", err)
|
|
||||||
}
|
|
||||||
if err := v.Subscribe(); err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
failedMeasure, err = stats.Int64(failedMetricName, "calls failed in agent", "")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
v, err := view.New(
|
|
||||||
failedMetricName,
|
|
||||||
"calls failed in agent",
|
|
||||||
[]tag.Key{appKey, pathKey},
|
|
||||||
failedMeasure,
|
|
||||||
view.Sum(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatalf("cannot create view: %v", err)
|
|
||||||
}
|
|
||||||
if err := v.Subscribe(); err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
timedoutMeasure, err = stats.Int64(timedoutMetricName, "calls timed out in agent", "")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
v, err := view.New(
|
|
||||||
timedoutMetricName,
|
|
||||||
"calls timed out in agent",
|
|
||||||
[]tag.Key{appKey, pathKey},
|
|
||||||
timedoutMeasure,
|
|
||||||
view.Sum(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatalf("cannot create view: %v", err)
|
|
||||||
}
|
|
||||||
if err := v.Subscribe(); err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
errorsMeasure, err = stats.Int64(errorsMetricName, "calls errored in agent", "")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
v, err := view.New(
|
|
||||||
errorsMetricName,
|
|
||||||
"calls errored in agent",
|
|
||||||
[]tag.Key{appKey, pathKey},
|
|
||||||
errorsMeasure,
|
|
||||||
view.Sum(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatalf("cannot create view: %v", err)
|
|
||||||
}
|
|
||||||
if err := v.Subscribe(); err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
serverBusyMeasure, err = stats.Int64(serverBusyMetricName, "calls where server was too busy in agent", "")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
v, err := view.New(
|
|
||||||
serverBusyMetricName,
|
|
||||||
"calls where server was too busy in agent",
|
|
||||||
[]tag.Key{appKey, pathKey},
|
|
||||||
serverBusyMeasure,
|
|
||||||
view.Sum(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Fatalf("cannot create view: %v", err)
|
|
||||||
}
|
|
||||||
if err := v.Subscribe(); err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return measure
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -429,42 +429,34 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
uploadSizeMeasure, err = stats.Int64("s3_log_upload_size", "uploaded log size", "byte")
|
uploadSizeMeasure = stats.Int64("s3_log_upload_size", "uploaded log size", "byte")
|
||||||
if err != nil {
|
err = view.Register(
|
||||||
logrus.Fatal(err)
|
&view.View{
|
||||||
}
|
Name: "s3_log_upload_size",
|
||||||
v, err := view.New(
|
Description: "uploaded log size",
|
||||||
"s3_log_upload_size",
|
TagKeys: []tag.Key{appKey, pathKey},
|
||||||
"uploaded log size",
|
Measure: uploadSizeMeasure,
|
||||||
[]tag.Key{appKey, pathKey},
|
Aggregation: view.Distribution(),
|
||||||
uploadSizeMeasure,
|
},
|
||||||
view.Distribution(),
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Fatalf("cannot create view: %v", err)
|
logrus.WithError(err).Fatal("cannot create view")
|
||||||
}
|
|
||||||
if err := v.Subscribe(); err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
downloadSizeMeasure, err = stats.Int64("s3_log_download_size", "downloaded log size", "byte")
|
downloadSizeMeasure = stats.Int64("s3_log_download_size", "downloaded log size", "byte")
|
||||||
if err != nil {
|
err = view.Register(
|
||||||
logrus.Fatal(err)
|
&view.View{
|
||||||
}
|
Name: "s3_log_download_size",
|
||||||
v, err := view.New(
|
Description: "downloaded log size",
|
||||||
"s3_log_download_size",
|
TagKeys: []tag.Key{appKey, pathKey},
|
||||||
"downloaded log size",
|
Measure: uploadSizeMeasure,
|
||||||
[]tag.Key{appKey, pathKey},
|
Aggregation: view.Distribution(),
|
||||||
downloadSizeMeasure,
|
},
|
||||||
view.Distribution(),
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Fatalf("cannot create view: %v", err)
|
logrus.WithError(err).Fatal("cannot create view")
|
||||||
}
|
|
||||||
if err := v.Subscribe(); err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -576,7 +576,7 @@ func WithJaeger(jaegerURL string) ServerOption {
|
|||||||
logrus.WithFields(logrus.Fields{"url": jaegerURL}).Info("exporting spans to jaeger")
|
logrus.WithFields(logrus.Fields{"url": jaegerURL}).Info("exporting spans to jaeger")
|
||||||
|
|
||||||
// TODO don't do this. testing parity.
|
// TODO don't do this. testing parity.
|
||||||
trace.SetDefaultSampler(trace.AlwaysSample())
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -595,7 +595,7 @@ func WithZipkin(zipkinURL string) ServerOption {
|
|||||||
logrus.WithFields(logrus.Fields{"url": zipkinURL}).Info("exporting spans to zipkin")
|
logrus.WithFields(logrus.Fields{"url": zipkinURL}).Info("exporting spans to zipkin")
|
||||||
|
|
||||||
// TODO don't do this. testing parity.
|
// TODO don't do this. testing parity.
|
||||||
trace.SetDefaultSampler(trace.AlwaysSample())
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
1
vendor/go.opencensus.io/.gitignore
generated
vendored
1
vendor/go.opencensus.io/.gitignore
generated
vendored
@@ -2,4 +2,3 @@
|
|||||||
|
|
||||||
# go.opencensus.io/exporter/aws
|
# go.opencensus.io/exporter/aws
|
||||||
/exporter/aws/
|
/exporter/aws/
|
||||||
|
|
||||||
|
|||||||
7
vendor/go.opencensus.io/.travis.yml
generated
vendored
7
vendor/go.opencensus.io/.travis.yml
generated
vendored
@@ -13,9 +13,14 @@ notifications:
|
|||||||
before_script:
|
before_script:
|
||||||
- GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any
|
- GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any
|
||||||
- PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any
|
- PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any
|
||||||
|
- curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh # Install latest dep release
|
||||||
|
- go get github.com/rakyll/embedmd
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- if [ -n "$(gofmt -s -l .)" ]; then echo "gofmt the following files:"; gofmt -s -l .; exit 1; fi
|
- embedmd -d README.md # Ensure embedded code is up-to-date
|
||||||
|
- dep ensure -v
|
||||||
|
- go build ./... # Ensure dependency updates don't break build
|
||||||
|
- if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi
|
||||||
- go vet ./...
|
- go vet ./...
|
||||||
- go test -v -race $PKGS # Run all the tests with the race detector enabled
|
- go test -v -race $PKGS # Run all the tests with the race detector enabled
|
||||||
- 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi'
|
- 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi'
|
||||||
|
|||||||
247
vendor/go.opencensus.io/Gopkg.lock
generated
vendored
Normal file
247
vendor/go.opencensus.io/Gopkg.lock
generated
vendored
Normal file
@@ -0,0 +1,247 @@
|
|||||||
|
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||||
|
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "cloud.google.com/go"
|
||||||
|
packages = [
|
||||||
|
"compute/metadata",
|
||||||
|
"internal/version",
|
||||||
|
"monitoring/apiv3",
|
||||||
|
"trace/apiv2"
|
||||||
|
]
|
||||||
|
revision = "29f476ffa9c4cd4fd14336b6043090ac1ad76733"
|
||||||
|
version = "v0.21.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "git.apache.org/thrift.git"
|
||||||
|
packages = ["lib/go/thrift"]
|
||||||
|
revision = "606f1ef31447526b908244933d5b716397a6bad8"
|
||||||
|
source = "github.com/apache/thrift"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/beorn7/perks"
|
||||||
|
packages = ["quantile"]
|
||||||
|
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/golang/protobuf"
|
||||||
|
packages = [
|
||||||
|
"proto",
|
||||||
|
"protoc-gen-go/descriptor",
|
||||||
|
"ptypes",
|
||||||
|
"ptypes/any",
|
||||||
|
"ptypes/duration",
|
||||||
|
"ptypes/empty",
|
||||||
|
"ptypes/timestamp",
|
||||||
|
"ptypes/wrappers"
|
||||||
|
]
|
||||||
|
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/googleapis/gax-go"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f"
|
||||||
|
version = "v2.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||||
|
packages = ["pbutil"]
|
||||||
|
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/openzipkin/zipkin-go"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"idgenerator",
|
||||||
|
"model",
|
||||||
|
"propagation",
|
||||||
|
"reporter",
|
||||||
|
"reporter/http"
|
||||||
|
]
|
||||||
|
revision = "f197ec29e729f226d23370ea60f0e49b8f44ccf4"
|
||||||
|
version = "v0.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/prometheus/client_golang"
|
||||||
|
packages = [
|
||||||
|
"prometheus",
|
||||||
|
"prometheus/promhttp"
|
||||||
|
]
|
||||||
|
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||||
|
version = "v0.8.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/prometheus/client_model"
|
||||||
|
packages = ["go"]
|
||||||
|
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/prometheus/common"
|
||||||
|
packages = [
|
||||||
|
"expfmt",
|
||||||
|
"internal/bitbucket.org/ww/goautoneg",
|
||||||
|
"model"
|
||||||
|
]
|
||||||
|
revision = "d0f7cd64bda49e08b22ae8a730aa57aa0db125d6"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/prometheus/procfs"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"internal/util",
|
||||||
|
"nfs",
|
||||||
|
"xfs"
|
||||||
|
]
|
||||||
|
revision = "8b1c2da0d56deffdbb9e48d4414b4e674bd8083e"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/net"
|
||||||
|
packages = [
|
||||||
|
"context",
|
||||||
|
"context/ctxhttp",
|
||||||
|
"http2",
|
||||||
|
"http2/hpack",
|
||||||
|
"idna",
|
||||||
|
"internal/timeseries",
|
||||||
|
"lex/httplex",
|
||||||
|
"trace"
|
||||||
|
]
|
||||||
|
revision = "61147c48b25b599e5b561d2e9c4f3e1ef489ca41"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/oauth2"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"google",
|
||||||
|
"internal",
|
||||||
|
"jws",
|
||||||
|
"jwt"
|
||||||
|
]
|
||||||
|
revision = "921ae394b9430ed4fb549668d7b087601bd60a81"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/sync"
|
||||||
|
packages = ["semaphore"]
|
||||||
|
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "golang.org/x/text"
|
||||||
|
packages = [
|
||||||
|
"collate",
|
||||||
|
"collate/build",
|
||||||
|
"internal/colltab",
|
||||||
|
"internal/gen",
|
||||||
|
"internal/tag",
|
||||||
|
"internal/triegen",
|
||||||
|
"internal/ucd",
|
||||||
|
"language",
|
||||||
|
"secure/bidirule",
|
||||||
|
"transform",
|
||||||
|
"unicode/bidi",
|
||||||
|
"unicode/cldr",
|
||||||
|
"unicode/norm",
|
||||||
|
"unicode/rangetable"
|
||||||
|
]
|
||||||
|
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||||
|
version = "v0.3.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "google.golang.org/api"
|
||||||
|
packages = [
|
||||||
|
"googleapi/transport",
|
||||||
|
"internal",
|
||||||
|
"iterator",
|
||||||
|
"option",
|
||||||
|
"support/bundler",
|
||||||
|
"transport",
|
||||||
|
"transport/grpc",
|
||||||
|
"transport/http"
|
||||||
|
]
|
||||||
|
revision = "fca24fcb41126b846105a93fb9e30f416bdd55ce"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "google.golang.org/appengine"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"internal",
|
||||||
|
"internal/app_identity",
|
||||||
|
"internal/base",
|
||||||
|
"internal/datastore",
|
||||||
|
"internal/log",
|
||||||
|
"internal/modules",
|
||||||
|
"internal/remote_api",
|
||||||
|
"internal/socket",
|
||||||
|
"internal/urlfetch",
|
||||||
|
"socket",
|
||||||
|
"urlfetch"
|
||||||
|
]
|
||||||
|
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "google.golang.org/genproto"
|
||||||
|
packages = [
|
||||||
|
"googleapis/api/annotations",
|
||||||
|
"googleapis/api/distribution",
|
||||||
|
"googleapis/api/label",
|
||||||
|
"googleapis/api/metric",
|
||||||
|
"googleapis/api/monitoredres",
|
||||||
|
"googleapis/devtools/cloudtrace/v2",
|
||||||
|
"googleapis/monitoring/v3",
|
||||||
|
"googleapis/rpc/code",
|
||||||
|
"googleapis/rpc/status",
|
||||||
|
"protobuf/field_mask"
|
||||||
|
]
|
||||||
|
revision = "51d0944304c3cbce4afe9e5247e21100037bff78"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "google.golang.org/grpc"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"balancer",
|
||||||
|
"balancer/base",
|
||||||
|
"balancer/roundrobin",
|
||||||
|
"codes",
|
||||||
|
"connectivity",
|
||||||
|
"credentials",
|
||||||
|
"credentials/oauth",
|
||||||
|
"encoding",
|
||||||
|
"encoding/proto",
|
||||||
|
"grpclb/grpc_lb_v1/messages",
|
||||||
|
"grpclog",
|
||||||
|
"internal",
|
||||||
|
"keepalive",
|
||||||
|
"metadata",
|
||||||
|
"naming",
|
||||||
|
"peer",
|
||||||
|
"reflection",
|
||||||
|
"reflection/grpc_reflection_v1alpha",
|
||||||
|
"resolver",
|
||||||
|
"resolver/dns",
|
||||||
|
"resolver/passthrough",
|
||||||
|
"stats",
|
||||||
|
"status",
|
||||||
|
"tap",
|
||||||
|
"transport"
|
||||||
|
]
|
||||||
|
revision = "d11072e7ca9811b1100b80ca0269ac831f06d024"
|
||||||
|
version = "v1.11.3"
|
||||||
|
|
||||||
|
[solve-meta]
|
||||||
|
analyzer-name = "dep"
|
||||||
|
analyzer-version = 1
|
||||||
|
inputs-digest = "1be7e5255452682d433fe616bb0987e00cb73c1172fe797b9b7a6fd2c1f53d37"
|
||||||
|
solver-name = "gps-cdcl"
|
||||||
|
solver-version = 1
|
||||||
44
vendor/go.opencensus.io/Gopkg.toml
generated
vendored
Normal file
44
vendor/go.opencensus.io/Gopkg.toml
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
[[constraint]]
|
||||||
|
name = "cloud.google.com/go"
|
||||||
|
version = "0.21.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "git.apache.org/thrift.git"
|
||||||
|
source = "github.com/apache/thrift"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/golang/protobuf"
|
||||||
|
version = "1.0.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/openzipkin/zipkin-go"
|
||||||
|
version = "0.1.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/prometheus/client_golang"
|
||||||
|
version = "0.8.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/net"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/oauth2"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "google.golang.org/api"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "google.golang.org/genproto"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "google.golang.org/grpc"
|
||||||
|
version = "1.11.3"
|
||||||
|
|
||||||
|
[prune]
|
||||||
|
go-tests = true
|
||||||
|
unused-packages = true
|
||||||
49
vendor/go.opencensus.io/README.md
generated
vendored
49
vendor/go.opencensus.io/README.md
generated
vendored
@@ -9,16 +9,15 @@ OpenCensus Go is a Go implementation of OpenCensus, a toolkit for
|
|||||||
collecting application performance and behavior monitoring data.
|
collecting application performance and behavior monitoring data.
|
||||||
Currently it consists of three major components: tags, stats, and tracing.
|
Currently it consists of three major components: tags, stats, and tracing.
|
||||||
|
|
||||||
This project is still at a very early stage of development. The API is changing
|
|
||||||
rapidly, vendoring is recommended.
|
|
||||||
|
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
```
|
```
|
||||||
$ go get -u go.opencensus.io
|
$ go get -u go.opencensus.io
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The API of this project is still evolving, see: [Deprecation Policy](#deprecation-policy).
|
||||||
|
The use of vendoring or a dependency management tool is recommended.
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
OpenCensus Go libraries require Go 1.8 or later.
|
OpenCensus Go libraries require Go 1.8 or later.
|
||||||
@@ -53,17 +52,14 @@ then add additional custom instrumentation if needed.
|
|||||||
|
|
||||||
## Tags
|
## Tags
|
||||||
|
|
||||||
Tags represent propagated key-value pairs. They are propagated using context.Context
|
Tags represent propagated key-value pairs. They are propagated using `context.Context`
|
||||||
in the same process or can be encoded to be transmitted on the wire and decoded back
|
in the same process or can be encoded to be transmitted on the wire. Usually, this will
|
||||||
to a tag.Map at the destination.
|
be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler`
|
||||||
|
for gRPC.
|
||||||
|
|
||||||
Package tag provides a builder to create tag maps and put it
|
Package tag allows adding or modifying tags in the current context.
|
||||||
into the current context.
|
|
||||||
To propagate a tag map to downstream methods and RPCs, New
|
|
||||||
will add the produced tag map to the current context.
|
|
||||||
If there is already a tag map in the current context, it will be replaced.
|
|
||||||
|
|
||||||
[embedmd]:# (tags.go new)
|
[embedmd]:# (internal/readme/tags.go new)
|
||||||
```go
|
```go
|
||||||
ctx, err = tag.New(ctx,
|
ctx, err = tag.New(ctx,
|
||||||
tag.Insert(osKey, "macOS-10.12.5"),
|
tag.Insert(osKey, "macOS-10.12.5"),
|
||||||
@@ -91,7 +87,7 @@ Measurements are data points associated with a measure.
|
|||||||
Recording implicitly tags the set of Measurements with the tags from the
|
Recording implicitly tags the set of Measurements with the tags from the
|
||||||
provided context:
|
provided context:
|
||||||
|
|
||||||
[embedmd]:# (stats.go record)
|
[embedmd]:# (internal/readme/stats.go record)
|
||||||
```go
|
```go
|
||||||
stats.Record(ctx, videoSize.M(102478))
|
stats.Record(ctx, videoSize.M(102478))
|
||||||
```
|
```
|
||||||
@@ -103,25 +99,23 @@ set of recorded data points (measurements).
|
|||||||
|
|
||||||
Views have two parts: the tags to group by and the aggregation type used.
|
Views have two parts: the tags to group by and the aggregation type used.
|
||||||
|
|
||||||
Currently four types of aggregations are supported:
|
Currently three types of aggregations are supported:
|
||||||
* CountAggregation is used to count the number of times a sample was recorded.
|
* CountAggregation is used to count the number of times a sample was recorded.
|
||||||
* DistributionAggregation is used to provide a histogram of the values of the samples.
|
* DistributionAggregation is used to provide a histogram of the values of the samples.
|
||||||
* SumAggregation is used to sum up all sample values.
|
* SumAggregation is used to sum up all sample values.
|
||||||
* MeanAggregation is used to calculate the mean of sample values.
|
|
||||||
|
|
||||||
[embedmd]:# (stats.go aggs)
|
[embedmd]:# (internal/readme/stats.go aggs)
|
||||||
```go
|
```go
|
||||||
distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32)
|
distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32)
|
||||||
countAgg := view.Count()
|
countAgg := view.Count()
|
||||||
sumAgg := view.Sum()
|
sumAgg := view.Sum()
|
||||||
meanAgg := view.Mean()
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Here we create a view with the DistributionAggregation over our measure.
|
Here we create a view with the DistributionAggregation over our measure.
|
||||||
|
|
||||||
[embedmd]:# (stats.go view)
|
[embedmd]:# (internal/readme/stats.go view)
|
||||||
```go
|
```go
|
||||||
if err = view.Subscribe(&view.View{
|
if err := view.Register(&view.View{
|
||||||
Name: "my.org/video_size_distribution",
|
Name: "my.org/video_size_distribution",
|
||||||
Description: "distribution of processed video size over time",
|
Description: "distribution of processed video size over time",
|
||||||
Measure: videoSize,
|
Measure: videoSize,
|
||||||
@@ -136,7 +130,7 @@ exported via the registered exporters.
|
|||||||
|
|
||||||
## Traces
|
## Traces
|
||||||
|
|
||||||
[embedmd]:# (trace.go startend)
|
[embedmd]:# (internal/readme/trace.go startend)
|
||||||
```go
|
```go
|
||||||
ctx, span := trace.StartSpan(ctx, "your choice of name")
|
ctx, span := trace.StartSpan(ctx, "your choice of name")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
@@ -147,7 +141,7 @@ defer span.End()
|
|||||||
OpenCensus tags can be applied as profiler labels
|
OpenCensus tags can be applied as profiler labels
|
||||||
for users who are on Go 1.9 and above.
|
for users who are on Go 1.9 and above.
|
||||||
|
|
||||||
[embedmd]:# (tags.go profiler)
|
[embedmd]:# (internal/readme/tags.go profiler)
|
||||||
```go
|
```go
|
||||||
ctx, err = tag.New(ctx,
|
ctx, err = tag.New(ctx,
|
||||||
tag.Insert(osKey, "macOS-10.12.5"),
|
tag.Insert(osKey, "macOS-10.12.5"),
|
||||||
@@ -167,6 +161,15 @@ A screenshot of the CPU profile from the program above:
|
|||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
## Deprecation Policy
|
||||||
|
|
||||||
|
Before version 1.0.0, the following deprecation policy will be observed:
|
||||||
|
|
||||||
|
No backwards-incompatible changes will be made except for the removal of symbols that have
|
||||||
|
been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release
|
||||||
|
removing the *Deprecated* functionality will be made no sooner than 28 days after the first
|
||||||
|
release in which the functionality was marked *Deprecated*.
|
||||||
|
|
||||||
[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master
|
[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master
|
||||||
[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go
|
[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go
|
||||||
[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true
|
[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true
|
||||||
@@ -181,7 +184,7 @@ A screenshot of the CPU profile from the program above:
|
|||||||
[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace
|
[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace
|
||||||
|
|
||||||
[exporter-prom]: https://godoc.org/go.opencensus.io/exporter/prometheus
|
[exporter-prom]: https://godoc.org/go.opencensus.io/exporter/prometheus
|
||||||
[exporter-stackdriver]: https://godoc.org/go.opencensus.io/exporter/stackdriver
|
[exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver
|
||||||
[exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin
|
[exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin
|
||||||
[exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger
|
[exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger
|
||||||
[exporter-xray]: https://github.com/census-instrumentation/opencensus-go-exporter-aws
|
[exporter-xray]: https://github.com/census-instrumentation/opencensus-go-exporter-aws
|
||||||
|
|||||||
2
vendor/go.opencensus.io/examples/exporter/exporter.go
generated
vendored
2
vendor/go.opencensus.io/examples/exporter/exporter.go
generated
vendored
@@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package exporter
|
package exporter // import "go.opencensus.io/examples/exporter"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
"log"
|
||||||
|
|||||||
2
vendor/go.opencensus.io/examples/grpc/README.md
generated
vendored
2
vendor/go.opencensus.io/examples/grpc/README.md
generated
vendored
@@ -7,7 +7,7 @@ This example uses:
|
|||||||
* Debugging exporters to print stats and traces to stdout.
|
* Debugging exporters to print stats and traces to stdout.
|
||||||
|
|
||||||
```
|
```
|
||||||
$ go get go.opencensus.io/examples/grpc
|
$ go get go.opencensus.io/examples/grpc/...
|
||||||
```
|
```
|
||||||
|
|
||||||
First, run the server:
|
First, run the server:
|
||||||
|
|||||||
4
vendor/go.opencensus.io/examples/grpc/helloworld_client/main.go
generated
vendored
4
vendor/go.opencensus.io/examples/grpc/helloworld_client/main.go
generated
vendored
@@ -37,8 +37,8 @@ func main() {
|
|||||||
// the collected data.
|
// the collected data.
|
||||||
view.RegisterExporter(&exporter.PrintExporter{})
|
view.RegisterExporter(&exporter.PrintExporter{})
|
||||||
|
|
||||||
// Subscribe to collect client request count.
|
// Register the view to collect gRPC client stats.
|
||||||
if err := ocgrpc.ClientErrorCountView.Subscribe(); err != nil {
|
if err := view.Register(ocgrpc.DefaultClientViews...); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
8
vendor/go.opencensus.io/examples/grpc/helloworld_server/main.go
generated
vendored
8
vendor/go.opencensus.io/examples/grpc/helloworld_server/main.go
generated
vendored
@@ -31,7 +31,6 @@ import (
|
|||||||
"go.opencensus.io/zpages"
|
"go.opencensus.io/zpages"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/reflection"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const port = ":50051"
|
const port = ":50051"
|
||||||
@@ -56,8 +55,8 @@ func main() {
|
|||||||
// the collected data.
|
// the collected data.
|
||||||
view.RegisterExporter(&exporter.PrintExporter{})
|
view.RegisterExporter(&exporter.PrintExporter{})
|
||||||
|
|
||||||
// Subscribe to collect server request count.
|
// Register the views to collect server request count.
|
||||||
if err := view.Subscribe(ocgrpc.DefaultServerViews...); err != nil {
|
if err := view.Register(ocgrpc.DefaultServerViews...); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -70,8 +69,7 @@ func main() {
|
|||||||
// stats handler to enable stats and tracing.
|
// stats handler to enable stats and tracing.
|
||||||
s := grpc.NewServer(grpc.StatsHandler(&ocgrpc.ServerHandler{}))
|
s := grpc.NewServer(grpc.StatsHandler(&ocgrpc.ServerHandler{}))
|
||||||
pb.RegisterGreeterServer(s, &server{})
|
pb.RegisterGreeterServer(s, &server{})
|
||||||
// Register reflection service on gRPC server.
|
|
||||||
reflection.Register(s)
|
|
||||||
if err := s.Serve(lis); err != nil {
|
if err := s.Serve(lis); err != nil {
|
||||||
log.Fatalf("Failed to serve: %v", err)
|
log.Fatalf("Failed to serve: %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
2
vendor/go.opencensus.io/examples/grpc/proto/helloworld.pb.go
generated
vendored
2
vendor/go.opencensus.io/examples/grpc/proto/helloworld.pb.go
generated
vendored
@@ -11,7 +11,7 @@ It has these top-level messages:
|
|||||||
HelloRequest
|
HelloRequest
|
||||||
HelloReply
|
HelloReply
|
||||||
*/
|
*/
|
||||||
package helloworld
|
package helloworld // import "go.opencensus.io/examples/grpc/proto"
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
import proto "github.com/golang/protobuf/proto"
|
||||||
import fmt "fmt"
|
import fmt "fmt"
|
||||||
|
|||||||
9
vendor/go.opencensus.io/examples/helloworld/main.go
generated
vendored
9
vendor/go.opencensus.io/examples/helloworld/main.go
generated
vendored
@@ -53,15 +53,12 @@ func main() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
videoSize, err = stats.Int64("my.org/measure/video_size", "size of processed videos", "MBy")
|
videoSize = stats.Int64("my.org/measure/video_size", "size of processed videos", stats.UnitBytes)
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Video size measure not created: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create view to see the processed video size
|
// Create view to see the processed video size
|
||||||
// distribution broken down by frontend.
|
// distribution broken down by frontend.
|
||||||
// Subscribe will allow view data to be exported.
|
// Register will allow view data to be exported.
|
||||||
if err := view.Subscribe(&view.View{
|
if err := view.Register(&view.View{
|
||||||
Name: "my.org/views/video_size",
|
Name: "my.org/views/video_size",
|
||||||
Description: "processed video size over time",
|
Description: "processed video size over time",
|
||||||
TagKeys: []tag.Key{frontendKey},
|
TagKeys: []tag.Key{frontendKey},
|
||||||
|
|||||||
2
vendor/go.opencensus.io/examples/http/README.md
generated
vendored
2
vendor/go.opencensus.io/examples/http/README.md
generated
vendored
@@ -7,7 +7,7 @@ This example uses:
|
|||||||
* Debugging exporters to print stats and traces to stdout.
|
* Debugging exporters to print stats and traces to stdout.
|
||||||
|
|
||||||
```
|
```
|
||||||
$ go get go.opencensus.io/examples/http
|
$ go get go.opencensus.io/examples/http/...
|
||||||
```
|
```
|
||||||
|
|
||||||
First, run the server:
|
First, run the server:
|
||||||
|
|||||||
2
vendor/go.opencensus.io/examples/http/helloworld_client/main.go
generated
vendored
2
vendor/go.opencensus.io/examples/http/helloworld_client/main.go
generated
vendored
@@ -35,7 +35,7 @@ func main() {
|
|||||||
trace.RegisterExporter(exporter)
|
trace.RegisterExporter(exporter)
|
||||||
|
|
||||||
// Always trace for this demo.
|
// Always trace for this demo.
|
||||||
trace.SetDefaultSampler(trace.AlwaysSample())
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||||
|
|
||||||
// Report stats at every second.
|
// Report stats at every second.
|
||||||
view.SetReportingPeriod(1 * time.Second)
|
view.SetReportingPeriod(1 * time.Second)
|
||||||
|
|||||||
8
vendor/go.opencensus.io/examples/http/helloworld_server/main.go
generated
vendored
8
vendor/go.opencensus.io/examples/http/helloworld_server/main.go
generated
vendored
@@ -37,7 +37,7 @@ func main() {
|
|||||||
trace.RegisterExporter(exporter)
|
trace.RegisterExporter(exporter)
|
||||||
|
|
||||||
// Always trace for this demo.
|
// Always trace for this demo.
|
||||||
trace.SetDefaultSampler(trace.AlwaysSample())
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||||
|
|
||||||
// Report stats at every second.
|
// Report stats at every second.
|
||||||
view.SetReportingPeriod(1 * time.Second)
|
view.SetReportingPeriod(1 * time.Second)
|
||||||
@@ -50,12 +50,14 @@ func main() {
|
|||||||
r, _ := http.NewRequest("GET", "https://example.com", nil)
|
r, _ := http.NewRequest("GET", "https://example.com", nil)
|
||||||
|
|
||||||
// Propagate the trace header info in the outgoing requests.
|
// Propagate the trace header info in the outgoing requests.
|
||||||
r = req.WithContext(req.Context())
|
r = r.WithContext(req.Context())
|
||||||
resp, err := client.Do(r)
|
resp, err := client.Do(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println(err)
|
log.Println(err)
|
||||||
|
} else {
|
||||||
|
// TODO: handle response
|
||||||
|
resp.Body.Close()
|
||||||
}
|
}
|
||||||
_ = resp // handle response
|
|
||||||
})
|
})
|
||||||
log.Fatal(http.ListenAndServe(":50030", &ochttp.Handler{}))
|
log.Fatal(http.ListenAndServe(":50030", &ochttp.Handler{}))
|
||||||
}
|
}
|
||||||
|
|||||||
2
vendor/go.opencensus.io/exporter/jaeger/example/main.go
generated
vendored
2
vendor/go.opencensus.io/exporter/jaeger/example/main.go
generated
vendored
@@ -39,7 +39,7 @@ func main() {
|
|||||||
trace.RegisterExporter(exporter)
|
trace.RegisterExporter(exporter)
|
||||||
|
|
||||||
// For demoing purposes, always sample.
|
// For demoing purposes, always sample.
|
||||||
trace.SetDefaultSampler(trace.AlwaysSample())
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||||
|
|
||||||
ctx, span := trace.StartSpan(ctx, "/foo")
|
ctx, span := trace.StartSpan(ctx, "/foo")
|
||||||
bar(ctx)
|
bar(ctx)
|
||||||
|
|||||||
2
vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger/jaeger.go
generated
vendored
2
vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger/jaeger.go
generated
vendored
@@ -1,7 +1,7 @@
|
|||||||
// Autogenerated by Thrift Compiler (0.11.0)
|
// Autogenerated by Thrift Compiler (0.11.0)
|
||||||
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||||
|
|
||||||
package jaeger
|
package jaeger // import "go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
|||||||
2
vendor/go.opencensus.io/exporter/jaeger/jaeger_test.go
generated
vendored
2
vendor/go.opencensus.io/exporter/jaeger/jaeger_test.go
generated
vendored
@@ -98,7 +98,7 @@ func Test_spanDataToThrift(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Status: trace.Status{Code: 2, Message: "error"},
|
Status: trace.Status{Code: trace.StatusCodeUnknown, Message: "error"},
|
||||||
},
|
},
|
||||||
want: &gen.Span{
|
want: &gen.Span{
|
||||||
TraceIdLow: 651345242494996240,
|
TraceIdLow: 651345242494996240,
|
||||||
|
|||||||
20
vendor/go.opencensus.io/exporter/prometheus/example/main.go
generated
vendored
20
vendor/go.opencensus.io/exporter/prometheus/example/main.go
generated
vendored
@@ -28,6 +28,13 @@ import (
|
|||||||
"go.opencensus.io/stats/view"
|
"go.opencensus.io/stats/view"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Create measures. The program will record measures for the size of
|
||||||
|
// processed videos and the number of videos marked as spam.
|
||||||
|
var (
|
||||||
|
videoCount = stats.Int64("my.org/measures/video_count", "number of processed videos", stats.UnitDimensionless)
|
||||||
|
videoSize = stats.Int64("my.org/measures/video_size", "size of processed video", stats.UnitBytes)
|
||||||
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
@@ -37,22 +44,11 @@ func main() {
|
|||||||
}
|
}
|
||||||
view.RegisterExporter(exporter)
|
view.RegisterExporter(exporter)
|
||||||
|
|
||||||
// Create measures. The program will record measures for the size of
|
|
||||||
// processed videos and the number of videos marked as spam.
|
|
||||||
videoCount, err := stats.Int64("my.org/measures/video_count", "number of processed videos", "")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Video count measure not created: %v", err)
|
|
||||||
}
|
|
||||||
videoSize, err := stats.Int64("my.org/measures/video_size", "size of processed video", "MBy")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Video size measure not created: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create view to see the number of processed videos cumulatively.
|
// Create view to see the number of processed videos cumulatively.
|
||||||
// Create view to see the amount of video processed
|
// Create view to see the amount of video processed
|
||||||
// Subscribe will allow view data to be exported.
|
// Subscribe will allow view data to be exported.
|
||||||
// Once no longer needed, you can unsubscribe from the view.
|
// Once no longer needed, you can unsubscribe from the view.
|
||||||
if err = view.Subscribe(
|
if err = view.Register(
|
||||||
&view.View{
|
&view.View{
|
||||||
Name: "video_count",
|
Name: "video_count",
|
||||||
Description: "number of videos processed over time",
|
Description: "number of videos processed over time",
|
||||||
|
|||||||
52
vendor/go.opencensus.io/exporter/prometheus/prometheus.go
generated
vendored
52
vendor/go.opencensus.io/exporter/prometheus/prometheus.go
generated
vendored
@@ -23,6 +23,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"go.opencensus.io/internal"
|
"go.opencensus.io/internal"
|
||||||
@@ -33,10 +34,6 @@ import (
|
|||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
defaultNamespace = "opencensus"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Exporter exports stats to Prometheus, users need
|
// Exporter exports stats to Prometheus, users need
|
||||||
// to register the exporter as an http.Handler to be
|
// to register the exporter as an http.Handler to be
|
||||||
// able to export.
|
// able to export.
|
||||||
@@ -71,9 +68,6 @@ func NewExporter(o Options) (*Exporter, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func newExporter(o Options) (*Exporter, error) {
|
func newExporter(o Options) (*Exporter, error) {
|
||||||
if o.Namespace == "" {
|
|
||||||
o.Namespace = defaultNamespace
|
|
||||||
}
|
|
||||||
if o.Registry == nil {
|
if o.Registry == nil {
|
||||||
o.Registry = prometheus.NewRegistry()
|
o.Registry = prometheus.NewRegistry()
|
||||||
}
|
}
|
||||||
@@ -144,10 +138,8 @@ func (o *Options) onError(err error) {
|
|||||||
// ExportView exports to the Prometheus if view data has one or more rows.
|
// ExportView exports to the Prometheus if view data has one or more rows.
|
||||||
// Each OpenCensus AggregationData will be converted to
|
// Each OpenCensus AggregationData will be converted to
|
||||||
// corresponding Prometheus Metric: SumData will be converted
|
// corresponding Prometheus Metric: SumData will be converted
|
||||||
// to Untyped Metric, CountData will be Counter Metric,
|
// to Untyped Metric, CountData will be a Counter Metric,
|
||||||
// DistributionData will be Histogram Metric, and MeanData
|
// DistributionData will be a Histogram Metric.
|
||||||
// will be Summary Metric. Please note the Summary Metric from
|
|
||||||
// MeanData does not have any quantiles.
|
|
||||||
func (e *Exporter) ExportView(vd *view.Data) {
|
func (e *Exporter) ExportView(vd *view.Data) {
|
||||||
if len(vd.Rows) == 0 {
|
if len(vd.Rows) == 0 {
|
||||||
return
|
return
|
||||||
@@ -232,20 +224,40 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
|
|||||||
func (c *collector) toMetric(desc *prometheus.Desc, v *view.View, row *view.Row) (prometheus.Metric, error) {
|
func (c *collector) toMetric(desc *prometheus.Desc, v *view.View, row *view.Row) (prometheus.Metric, error) {
|
||||||
switch data := row.Data.(type) {
|
switch data := row.Data.(type) {
|
||||||
case *view.CountData:
|
case *view.CountData:
|
||||||
return prometheus.NewConstMetric(desc, prometheus.CounterValue, float64(*data), tagValues(row.Tags)...)
|
return prometheus.NewConstMetric(desc, prometheus.CounterValue, float64(data.Value), tagValues(row.Tags)...)
|
||||||
|
|
||||||
case *view.DistributionData:
|
case *view.DistributionData:
|
||||||
points := make(map[float64]uint64)
|
points := make(map[float64]uint64)
|
||||||
|
// Histograms are cumulative in Prometheus.
|
||||||
|
// 1. Sort buckets in ascending order but, retain
|
||||||
|
// their indices for reverse lookup later on.
|
||||||
|
// TODO: If there is a guarantee that distribution elements
|
||||||
|
// are always sorted, then skip the sorting.
|
||||||
|
indicesMap := make(map[float64]int)
|
||||||
|
buckets := make([]float64, 0, len(v.Aggregation.Buckets))
|
||||||
for i, b := range v.Aggregation.Buckets {
|
for i, b := range v.Aggregation.Buckets {
|
||||||
points[b] = uint64(data.CountPerBucket[i])
|
if _, ok := indicesMap[b]; !ok {
|
||||||
|
indicesMap[b] = i
|
||||||
|
buckets = append(buckets, b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Float64s(buckets)
|
||||||
|
|
||||||
|
// 2. Now that the buckets are sorted by magnitude
|
||||||
|
// we can create cumulative indicesmap them back by reverse index
|
||||||
|
cumCount := uint64(0)
|
||||||
|
for _, b := range buckets {
|
||||||
|
i := indicesMap[b]
|
||||||
|
cumCount += uint64(data.CountPerBucket[i])
|
||||||
|
points[b] = cumCount
|
||||||
}
|
}
|
||||||
return prometheus.NewConstHistogram(desc, uint64(data.Count), data.Sum(), points, tagValues(row.Tags)...)
|
return prometheus.NewConstHistogram(desc, uint64(data.Count), data.Sum(), points, tagValues(row.Tags)...)
|
||||||
|
|
||||||
case *view.MeanData:
|
|
||||||
return prometheus.NewConstSummary(desc, uint64(data.Count), data.Sum(), make(map[float64]float64), tagValues(row.Tags)...)
|
|
||||||
|
|
||||||
case *view.SumData:
|
case *view.SumData:
|
||||||
return prometheus.NewConstMetric(desc, prometheus.UntypedValue, float64(*data), tagValues(row.Tags)...)
|
return prometheus.NewConstMetric(desc, prometheus.UntypedValue, data.Value, tagValues(row.Tags)...)
|
||||||
|
|
||||||
|
case *view.LastValueData:
|
||||||
|
return prometheus.NewConstMetric(desc, prometheus.UntypedValue, data.Value, tagValues(row.Tags)...)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("aggregation %T is not yet supported", v.Aggregation)
|
return nil, fmt.Errorf("aggregation %T is not yet supported", v.Aggregation)
|
||||||
@@ -285,7 +297,11 @@ func tagValues(t []tag.Tag) []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func viewName(namespace string, v *view.View) string {
|
func viewName(namespace string, v *view.View) string {
|
||||||
return namespace + "_" + internal.Sanitize(v.Name)
|
var name string
|
||||||
|
if namespace != "" {
|
||||||
|
name = namespace + "_"
|
||||||
|
}
|
||||||
|
return name + internal.Sanitize(v.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func viewSignature(namespace string, v *view.View) string {
|
func viewSignature(namespace string, v *view.View) string {
|
||||||
|
|||||||
167
vendor/go.opencensus.io/exporter/prometheus/prometheus_test.go
generated
vendored
167
vendor/go.opencensus.io/exporter/prometheus/prometheus_test.go
generated
vendored
@@ -18,7 +18,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -34,10 +33,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func newView(measureName string, agg *view.Aggregation) *view.View {
|
func newView(measureName string, agg *view.Aggregation) *view.View {
|
||||||
m, err := stats.Int64(measureName, "bytes", stats.UnitBytes)
|
m := stats.Int64(measureName, "bytes", stats.UnitBytes)
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
return &view.View{
|
return &view.View{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
Description: "bar",
|
Description: "bar",
|
||||||
@@ -48,11 +44,7 @@ func newView(measureName string, agg *view.Aggregation) *view.View {
|
|||||||
|
|
||||||
func TestOnlyCumulativeWindowSupported(t *testing.T) {
|
func TestOnlyCumulativeWindowSupported(t *testing.T) {
|
||||||
// See Issue https://github.com/census-instrumentation/opencensus-go/issues/214.
|
// See Issue https://github.com/census-instrumentation/opencensus-go/issues/214.
|
||||||
count1 := view.CountData(1)
|
count1 := &view.CountData{Value: 1}
|
||||||
mean1 := view.MeanData{
|
|
||||||
Mean: 4.5,
|
|
||||||
Count: 5,
|
|
||||||
}
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
vds *view.Data
|
vds *view.Data
|
||||||
want int
|
want int
|
||||||
@@ -67,16 +59,7 @@ func TestOnlyCumulativeWindowSupported(t *testing.T) {
|
|||||||
vds: &view.Data{
|
vds: &view.Data{
|
||||||
View: newView("TestOnlyCumulativeWindowSupported/m2", view.Count()),
|
View: newView("TestOnlyCumulativeWindowSupported/m2", view.Count()),
|
||||||
Rows: []*view.Row{
|
Rows: []*view.Row{
|
||||||
{Data: &count1},
|
{Data: count1},
|
||||||
},
|
|
||||||
},
|
|
||||||
want: 1,
|
|
||||||
},
|
|
||||||
2: {
|
|
||||||
vds: &view.Data{
|
|
||||||
View: newView("TestOnlyCumulativeWindowSupported/m3", view.Mean()),
|
|
||||||
Rows: []*view.Row{
|
|
||||||
{Data: &mean1},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
want: 1,
|
want: 1,
|
||||||
@@ -143,11 +126,9 @@ func TestCollectNonRacy(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
for i := 0; i < 1e3; i++ {
|
for i := 0; i < 1e3; i++ {
|
||||||
count1 := view.CountData(1)
|
count1 := &view.CountData{Value: 1}
|
||||||
mean1 := &view.MeanData{Mean: 4.5, Count: 5}
|
|
||||||
vds := []*view.Data{
|
vds := []*view.Data{
|
||||||
{View: newView(fmt.Sprintf("TestCollectNonRacy/m1-%d", i), view.Mean()), Rows: []*view.Row{{Data: mean1}}},
|
{View: newView(fmt.Sprintf("TestCollectNonRacy/m2-%d", i), view.Count()), Rows: []*view.Row{{Data: count1}}},
|
||||||
{View: newView(fmt.Sprintf("TestCollectNonRacy/m2-%d", i), view.Count()), Rows: []*view.Row{{Data: &count1}}},
|
|
||||||
}
|
}
|
||||||
for _, v := range vds {
|
for _, v := range vds {
|
||||||
exp.ExportView(v)
|
exp.ExportView(v)
|
||||||
@@ -190,28 +171,24 @@ func TestCollectNonRacy(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
type mCreator struct {
|
|
||||||
m *stats.Int64Measure
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
type mSlice []*stats.Int64Measure
|
type mSlice []*stats.Int64Measure
|
||||||
|
|
||||||
func (mc *mCreator) createAndAppend(measures *mSlice, name, desc, unit string) {
|
func (measures *mSlice) createAndAppend(name, desc, unit string) {
|
||||||
mc.m, mc.err = stats.Int64(name, desc, unit)
|
m := stats.Int64(name, desc, unit)
|
||||||
*measures = append(*measures, mc.m)
|
*measures = append(*measures, m)
|
||||||
}
|
}
|
||||||
|
|
||||||
type vCreator struct {
|
type vCreator []*view.View
|
||||||
v *view.View
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vc *vCreator) createAndSubscribe(name, description string, keys []tag.Key, measure stats.Measure, agg *view.Aggregation) {
|
func (vc *vCreator) createAndAppend(name, description string, keys []tag.Key, measure stats.Measure, agg *view.Aggregation) {
|
||||||
vc.v, vc.err = view.New(name, description, keys, measure, agg)
|
v := &view.View{
|
||||||
if err := vc.v.Subscribe(); err != nil {
|
Name: name,
|
||||||
vc.err = err
|
Description: description,
|
||||||
|
TagKeys: keys,
|
||||||
|
Measure: measure,
|
||||||
|
Aggregation: agg,
|
||||||
}
|
}
|
||||||
|
*vc = append(*vc, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMetricsEndpointOutput(t *testing.T) {
|
func TestMetricsEndpointOutput(t *testing.T) {
|
||||||
@@ -223,22 +200,21 @@ func TestMetricsEndpointOutput(t *testing.T) {
|
|||||||
|
|
||||||
names := []string{"foo", "bar", "baz"}
|
names := []string{"foo", "bar", "baz"}
|
||||||
|
|
||||||
measures := make(mSlice, 0)
|
var measures mSlice
|
||||||
mc := &mCreator{}
|
|
||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
mc.createAndAppend(&measures, "tests/"+name, name, "")
|
measures.createAndAppend("tests/"+name, name, "")
|
||||||
}
|
|
||||||
if mc.err != nil {
|
|
||||||
t.Errorf("failed to create measures: %v", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
vc := &vCreator{}
|
var vc vCreator
|
||||||
for _, m := range measures {
|
for _, m := range measures {
|
||||||
vc.createAndSubscribe(m.Name(), m.Description(), nil, m, view.Count())
|
vc.createAndAppend(m.Name(), m.Description(), nil, m, view.Count())
|
||||||
}
|
}
|
||||||
if vc.err != nil {
|
|
||||||
|
if err := view.Register(vc...); err != nil {
|
||||||
t.Fatalf("failed to create views: %v", err)
|
t.Fatalf("failed to create views: %v", err)
|
||||||
}
|
}
|
||||||
|
defer view.Unregister(vc...)
|
||||||
|
|
||||||
view.SetReportingPeriod(time.Millisecond)
|
view.SetReportingPeriod(time.Millisecond)
|
||||||
|
|
||||||
for _, m := range measures {
|
for _, m := range measures {
|
||||||
@@ -251,7 +227,8 @@ func TestMetricsEndpointOutput(t *testing.T) {
|
|||||||
var i int
|
var i int
|
||||||
var output string
|
var output string
|
||||||
for {
|
for {
|
||||||
if i == 10000 {
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
if i == 1000 {
|
||||||
t.Fatal("no output at /metrics (10s wait)")
|
t.Fatal("no output at /metrics (10s wait)")
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
@@ -271,7 +248,6 @@ func TestMetricsEndpointOutput(t *testing.T) {
|
|||||||
if output != "" {
|
if output != "" {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
time.Sleep(time.Millisecond)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.Contains(output, "collected before with the same name and label values") {
|
if strings.Contains(output, "collected before with the same name and label values") {
|
||||||
@@ -283,8 +259,95 @@ func TestMetricsEndpointOutput(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
if !strings.Contains(output, "opencensus_tests_"+name+" 1") {
|
if !strings.Contains(output, "tests_"+name+" 1") {
|
||||||
t.Fatalf("measurement missing in output: %v", name)
|
t.Fatalf("measurement missing in output: %v", name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCumulativenessFromHistograms(t *testing.T) {
|
||||||
|
exporter, err := newExporter(Options{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create prometheus exporter: %v", err)
|
||||||
|
}
|
||||||
|
view.RegisterExporter(exporter)
|
||||||
|
reportPeriod := time.Millisecond
|
||||||
|
view.SetReportingPeriod(reportPeriod)
|
||||||
|
|
||||||
|
m := stats.Float64("tests/bills", "payments by denomination", stats.UnitDimensionless)
|
||||||
|
v := &view.View{
|
||||||
|
Name: "cash/register",
|
||||||
|
Description: "this is a test",
|
||||||
|
Measure: m,
|
||||||
|
|
||||||
|
// Intentionally used repeated elements in the ascending distribution.
|
||||||
|
// to ensure duplicate distribution items are handles.
|
||||||
|
Aggregation: view.Distribution(1, 5, 5, 5, 5, 10, 20, 50, 100, 250),
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := view.Register(v); err != nil {
|
||||||
|
t.Fatalf("Register error: %v", err)
|
||||||
|
}
|
||||||
|
defer view.Unregister(v)
|
||||||
|
|
||||||
|
// Give the reporter ample time to process registration
|
||||||
|
<-time.After(10 * reportPeriod)
|
||||||
|
|
||||||
|
values := []float64{0.25, 245.67, 12, 1.45, 199.9, 7.69, 187.12}
|
||||||
|
// We want the results that look like this:
|
||||||
|
// 1: [0.25] | 1 + prev(i) = 1 + 0 = 1
|
||||||
|
// 5: [1.45] | 1 + prev(i) = 1 + 1 = 2
|
||||||
|
// 10: [] | 1 + prev(i) = 1 + 2 = 3
|
||||||
|
// 20: [12] | 1 + prev(i) = 1 + 3 = 4
|
||||||
|
// 50: [] | 0 + prev(i) = 0 + 4 = 4
|
||||||
|
// 100: [] | 0 + prev(i) = 0 + 4 = 4
|
||||||
|
// 250: [187.12, 199.9, 245.67] | 3 + prev(i) = 3 + 4 = 7
|
||||||
|
wantLines := []string{
|
||||||
|
`cash_register_bucket{le="1"} 1`,
|
||||||
|
`cash_register_bucket{le="5"} 2`,
|
||||||
|
`cash_register_bucket{le="10"} 3`,
|
||||||
|
`cash_register_bucket{le="20"} 4`,
|
||||||
|
`cash_register_bucket{le="50"} 4`,
|
||||||
|
`cash_register_bucket{le="100"} 4`,
|
||||||
|
`cash_register_bucket{le="250"} 7`,
|
||||||
|
`cash_register_bucket{le="+Inf"} 7`,
|
||||||
|
`cash_register_sum 654.0799999999999`, // Summation of the input values
|
||||||
|
`cash_register_count 7`,
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
ms := make([]stats.Measurement, len(values))
|
||||||
|
for _, value := range values {
|
||||||
|
mx := m.M(value)
|
||||||
|
ms = append(ms, mx)
|
||||||
|
}
|
||||||
|
stats.Record(ctx, ms...)
|
||||||
|
|
||||||
|
// Give the recorder ample time to process recording
|
||||||
|
<-time.After(10 * reportPeriod)
|
||||||
|
|
||||||
|
cst := httptest.NewServer(exporter)
|
||||||
|
defer cst.Close()
|
||||||
|
res, err := http.Get(cst.URL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("http.Get error: %v", err)
|
||||||
|
}
|
||||||
|
blob, err := ioutil.ReadAll(res.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Read body error: %v", err)
|
||||||
|
}
|
||||||
|
str := strings.Trim(string(blob), "\n")
|
||||||
|
lines := strings.Split(str, "\n")
|
||||||
|
nonComments := make([]string, 0, len(lines))
|
||||||
|
for _, line := range lines {
|
||||||
|
if !strings.Contains(line, "#") {
|
||||||
|
nonComments = append(nonComments, line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
got := strings.Join(nonComments, "\n")
|
||||||
|
want := strings.Join(wantLines, "\n")
|
||||||
|
if got != want {
|
||||||
|
t.Fatalf("\ngot:\n%s\n\nwant:\n%s\n", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
58
vendor/go.opencensus.io/exporter/stackdriver/example_test.go
generated
vendored
58
vendor/go.opencensus.io/exporter/stackdriver/example_test.go
generated
vendored
@@ -1,58 +0,0 @@
|
|||||||
// Copyright 2018, OpenCensus Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package stackdriver_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"go.opencensus.io/exporter/stackdriver"
|
|
||||||
"go.opencensus.io/exporter/stackdriver/propagation"
|
|
||||||
"go.opencensus.io/plugin/ochttp"
|
|
||||||
"go.opencensus.io/stats/view"
|
|
||||||
"go.opencensus.io/trace"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Example() {
|
|
||||||
exporter, err := stackdriver.NewExporter(stackdriver.Options{ProjectID: "google-project-id"})
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Export to Stackdriver Monitoring.
|
|
||||||
view.RegisterExporter(exporter)
|
|
||||||
|
|
||||||
// Subscribe views to see stats in Stackdriver Monitoring.
|
|
||||||
if err := view.Subscribe(
|
|
||||||
ochttp.ClientLatencyView,
|
|
||||||
ochttp.ClientResponseBytesView,
|
|
||||||
); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Export to Stackdriver Trace.
|
|
||||||
trace.RegisterExporter(exporter)
|
|
||||||
|
|
||||||
// Automatically add a Stackdriver trace header to outgoing requests:
|
|
||||||
client := &http.Client{
|
|
||||||
Transport: &ochttp.Transport{
|
|
||||||
Propagation: &propagation.HTTPFormat{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_ = client // use client
|
|
||||||
|
|
||||||
// All outgoing requests from client will include a Stackdriver Trace header.
|
|
||||||
// See the ochttp package for how to handle incoming requests.
|
|
||||||
}
|
|
||||||
21
vendor/go.opencensus.io/exporter/stackdriver/examples/stats/main.go
generated
vendored
21
vendor/go.opencensus.io/exporter/stackdriver/examples/stats/main.go
generated
vendored
@@ -28,6 +28,10 @@ import (
|
|||||||
"go.opencensus.io/stats/view"
|
"go.opencensus.io/stats/view"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Create measures. The program will record measures for the size of
|
||||||
|
// processed videos and the nubmer of videos marked as spam.
|
||||||
|
var videoSize = stats.Int64("my.org/measure/video_size", "size of processed videos", stats.UnitBytes)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
@@ -49,20 +53,13 @@ func main() {
|
|||||||
}
|
}
|
||||||
view.RegisterExporter(exporter)
|
view.RegisterExporter(exporter)
|
||||||
|
|
||||||
// Create measures. The program will record measures for the size of
|
|
||||||
// processed videos and the nubmer of videos marked as spam.
|
|
||||||
videoSize, err := stats.Int64("my.org/measure/video_size", "size of processed videos", "MBy")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Video size measure not created: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set reporting period to report data at every second.
|
// Set reporting period to report data at every second.
|
||||||
view.SetReportingPeriod(1 * time.Second)
|
view.SetReportingPeriod(1 * time.Second)
|
||||||
|
|
||||||
// Create view to see the processed video size cumulatively.
|
// Create view to see the processed video size cumulatively.
|
||||||
// Subscribe will allow view data to be exported.
|
// Subscribe will allow view data to be exported.
|
||||||
// Once no longer need, you can unsubscribe from the view.
|
// Once no longer need, you can unsubscribe from the view.
|
||||||
if err := view.Subscribe(&view.View{
|
if err := view.Register(&view.View{
|
||||||
Name: "my.org/views/video_size_cum",
|
Name: "my.org/views/video_size_cum",
|
||||||
Description: "processed video size over time",
|
Description: "processed video size over time",
|
||||||
Measure: videoSize,
|
Measure: videoSize,
|
||||||
@@ -71,11 +68,15 @@ func main() {
|
|||||||
log.Fatalf("Cannot subscribe to the view: %v", err)
|
log.Fatalf("Cannot subscribe to the view: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Record data points.
|
processVideo(ctx)
|
||||||
stats.Record(ctx, videoSize.M(25648))
|
|
||||||
|
|
||||||
// Wait for a duration longer than reporting duration to ensure the stats
|
// Wait for a duration longer than reporting duration to ensure the stats
|
||||||
// library reports the collected data.
|
// library reports the collected data.
|
||||||
fmt.Println("Wait longer than the reporting duration...")
|
fmt.Println("Wait longer than the reporting duration...")
|
||||||
time.Sleep(1 * time.Minute)
|
time.Sleep(1 * time.Minute)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func processVideo(ctx context.Context) {
|
||||||
|
// Do some processing and record stats.
|
||||||
|
stats.Record(ctx, videoSize.M(25648))
|
||||||
|
}
|
||||||
|
|||||||
26
vendor/go.opencensus.io/exporter/stackdriver/stackdriver.go
generated
vendored
26
vendor/go.opencensus.io/exporter/stackdriver/stackdriver.go
generated
vendored
@@ -12,19 +12,16 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// Package stackdriver contains the OpenCensus exporters for
|
// Package stackdriver has moved.
|
||||||
// Stackdriver Monitoring and Stackdriver Tracing.
|
|
||||||
//
|
//
|
||||||
// Please note that the Stackdriver exporter is currently experimental.
|
// Deprecated: Use contrib.go.opencensus.io/exporter/stackdriver instead.
|
||||||
//
|
|
||||||
// The package uses Application Default Credentials to authenticate. See
|
|
||||||
// https://developers.google.com/identity/protocols/application-default-credentials
|
|
||||||
package stackdriver // import "go.opencensus.io/exporter/stackdriver"
|
package stackdriver // import "go.opencensus.io/exporter/stackdriver"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
traceapi "cloud.google.com/go/trace/apiv2"
|
traceapi "cloud.google.com/go/trace/apiv2"
|
||||||
@@ -49,10 +46,15 @@ type Options struct {
|
|||||||
// Optional.
|
// Optional.
|
||||||
OnError func(err error)
|
OnError func(err error)
|
||||||
|
|
||||||
// ClientOptions are additional options to be passed
|
// MonitoringClientOptions are additional options to be passed
|
||||||
// to the underlying Stackdriver Monitoring API client.
|
// to the underlying Stackdriver Monitoring API client.
|
||||||
// Optional.
|
// Optional.
|
||||||
ClientOptions []option.ClientOption
|
MonitoringClientOptions []option.ClientOption
|
||||||
|
|
||||||
|
// TraceClientOptions are additional options to be passed
|
||||||
|
// to the underlying Stackdriver Trace API client.
|
||||||
|
// Optional.
|
||||||
|
TraceClientOptions []option.ClientOption
|
||||||
|
|
||||||
// BundleDelayThreshold determines the max amount of time
|
// BundleDelayThreshold determines the max amount of time
|
||||||
// the exporter can wait before uploading view data to
|
// the exporter can wait before uploading view data to
|
||||||
@@ -130,3 +132,11 @@ func (e *Exporter) Flush() {
|
|||||||
e.statsExporter.Flush()
|
e.statsExporter.Flush()
|
||||||
e.traceExporter.Flush()
|
e.traceExporter.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (o Options) handleError(err error) {
|
||||||
|
if o.OnError != nil {
|
||||||
|
o.OnError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Printf("Error exporting to Stackdriver: %v", err)
|
||||||
|
}
|
||||||
|
|||||||
24
vendor/go.opencensus.io/exporter/stackdriver/stackdriver_test.go
generated
vendored
24
vendor/go.opencensus.io/exporter/stackdriver/stackdriver_test.go
generated
vendored
@@ -36,7 +36,11 @@ func TestExport(t *testing.T) {
|
|||||||
t.Skip("STACKDRIVER_TEST_PROJECT_ID not set")
|
t.Skip("STACKDRIVER_TEST_PROJECT_ID not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
exporter, err := NewExporter(Options{ProjectID: projectID})
|
var exportErrors []error
|
||||||
|
|
||||||
|
exporter, err := NewExporter(Options{ProjectID: projectID, OnError: func(err error) {
|
||||||
|
exportErrors = append(exportErrors, err)
|
||||||
|
}})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -47,16 +51,24 @@ func TestExport(t *testing.T) {
|
|||||||
view.RegisterExporter(exporter)
|
view.RegisterExporter(exporter)
|
||||||
defer view.UnregisterExporter(exporter)
|
defer view.UnregisterExporter(exporter)
|
||||||
|
|
||||||
trace.SetDefaultSampler(trace.AlwaysSample())
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||||
|
|
||||||
span := trace.NewSpan("custom-span", nil, trace.StartOptions{})
|
_, span := trace.StartSpan(context.Background(), "custom-span")
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
span.End()
|
span.End()
|
||||||
|
|
||||||
// Test HTTP spans
|
// Test HTTP spans
|
||||||
|
|
||||||
handler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
handler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||||
|
_, backgroundSpan := trace.StartSpan(context.Background(), "BackgroundWork")
|
||||||
|
spanContext := backgroundSpan.SpanContext()
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
backgroundSpan.End()
|
||||||
|
|
||||||
|
_, span := trace.StartSpan(req.Context(), "Sleep")
|
||||||
|
span.AddLink(trace.Link{Type: trace.LinkTypeChild, TraceID: spanContext.TraceID, SpanID: spanContext.SpanID})
|
||||||
time.Sleep(150 * time.Millisecond) // do work
|
time.Sleep(150 * time.Millisecond) // do work
|
||||||
|
span.End()
|
||||||
rw.Write([]byte("Hello, world!"))
|
rw.Write([]byte("Hello, world!"))
|
||||||
})
|
})
|
||||||
server := httptest.NewServer(&ochttp.Handler{Handler: handler})
|
server := httptest.NewServer(&ochttp.Handler{Handler: handler})
|
||||||
@@ -81,6 +93,10 @@ func TestExport(t *testing.T) {
|
|||||||
// Flush twice to expose issue of exporter creating traces internally (#557)
|
// Flush twice to expose issue of exporter creating traces internally (#557)
|
||||||
exporter.Flush()
|
exporter.Flush()
|
||||||
exporter.Flush()
|
exporter.Flush()
|
||||||
|
|
||||||
|
for _, err := range exportErrors {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGRPC(t *testing.T) {
|
func TestGRPC(t *testing.T) {
|
||||||
@@ -100,7 +116,7 @@ func TestGRPC(t *testing.T) {
|
|||||||
view.RegisterExporter(exporter)
|
view.RegisterExporter(exporter)
|
||||||
defer view.UnregisterExporter(exporter)
|
defer view.UnregisterExporter(exporter)
|
||||||
|
|
||||||
trace.SetDefaultSampler(trace.AlwaysSample())
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||||
|
|
||||||
client, done := testpb.NewTestClient(t)
|
client, done := testpb.NewTestClient(t)
|
||||||
defer done()
|
defer done()
|
||||||
|
|||||||
150
vendor/go.opencensus.io/exporter/stackdriver/stats.go
generated
vendored
150
vendor/go.opencensus.io/exporter/stackdriver/stats.go
generated
vendored
@@ -18,8 +18,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -28,6 +26,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"go.opencensus.io/internal"
|
"go.opencensus.io/internal"
|
||||||
|
"go.opencensus.io/stats"
|
||||||
"go.opencensus.io/stats/view"
|
"go.opencensus.io/stats/view"
|
||||||
"go.opencensus.io/tag"
|
"go.opencensus.io/tag"
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
@@ -42,8 +41,6 @@ import (
|
|||||||
metricpb "google.golang.org/genproto/googleapis/api/metric"
|
metricpb "google.golang.org/genproto/googleapis/api/metric"
|
||||||
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
|
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
|
||||||
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
|
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const maxTimeSeriesPerUpload = 200
|
const maxTimeSeriesPerUpload = 200
|
||||||
@@ -92,7 +89,7 @@ func newStatsExporter(o Options) (*statsExporter, error) {
|
|||||||
|
|
||||||
seenProjects[o.ProjectID] = true
|
seenProjects[o.ProjectID] = true
|
||||||
|
|
||||||
opts := append(o.ClientOptions, option.WithUserAgent(internal.UserAgent))
|
opts := append(o.MonitoringClientOptions, option.WithUserAgent(internal.UserAgent))
|
||||||
client, err := monitoring.NewMetricClient(context.Background(), opts...)
|
client, err := monitoring.NewMetricClient(context.Background(), opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -125,9 +122,9 @@ func (e *statsExporter) ExportView(vd *view.Data) {
|
|||||||
case bundler.ErrOversizedItem:
|
case bundler.ErrOversizedItem:
|
||||||
go e.handleUpload(vd)
|
go e.handleUpload(vd)
|
||||||
case bundler.ErrOverflow:
|
case bundler.ErrOverflow:
|
||||||
e.onError(errors.New("failed to upload: buffer full"))
|
e.o.handleError(errors.New("failed to upload: buffer full"))
|
||||||
default:
|
default:
|
||||||
e.onError(err)
|
e.o.handleError(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -145,7 +142,7 @@ func getTaskValue() string {
|
|||||||
// of Data, as well as error handling.
|
// of Data, as well as error handling.
|
||||||
func (e *statsExporter) handleUpload(vds ...*view.Data) {
|
func (e *statsExporter) handleUpload(vds ...*view.Data) {
|
||||||
if err := e.uploadStats(vds); err != nil {
|
if err := e.uploadStats(vds); err != nil {
|
||||||
e.onError(err)
|
e.o.handleError(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -157,32 +154,23 @@ func (e *statsExporter) Flush() {
|
|||||||
e.bundler.Flush()
|
e.bundler.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *statsExporter) onError(err error) {
|
|
||||||
if e.o.OnError != nil {
|
|
||||||
e.o.OnError(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Printf("Failed to export to Stackdriver Monitoring: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *statsExporter) uploadStats(vds []*view.Data) error {
|
func (e *statsExporter) uploadStats(vds []*view.Data) error {
|
||||||
span := trace.NewSpan(
|
ctx, span := trace.StartSpan(
|
||||||
|
context.Background(),
|
||||||
"go.opencensus.io/exporter/stackdriver.uploadStats",
|
"go.opencensus.io/exporter/stackdriver.uploadStats",
|
||||||
nil,
|
trace.WithSampler(trace.NeverSample()),
|
||||||
trace.StartOptions{Sampler: trace.NeverSample()},
|
|
||||||
)
|
)
|
||||||
ctx := trace.WithSpan(context.Background(), span)
|
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
for _, vd := range vds {
|
for _, vd := range vds {
|
||||||
if err := e.createMeasure(ctx, vd); err != nil {
|
if err := e.createMeasure(ctx, vd); err != nil {
|
||||||
span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
|
span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, req := range e.makeReq(vds, maxTimeSeriesPerUpload) {
|
for _, req := range e.makeReq(vds, maxTimeSeriesPerUpload) {
|
||||||
if err := e.c.CreateTimeSeries(ctx, req); err != nil {
|
if err := e.c.CreateTimeSeries(ctx, req); err != nil {
|
||||||
span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
|
span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
|
||||||
// TODO(jbd): Don't fail fast here, batch errors?
|
// TODO(jbd): Don't fail fast here, batch errors?
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -205,7 +193,7 @@ func (e *statsExporter) makeReq(vds []*view.Data, limit int) []*monitoringpb.Cre
|
|||||||
for _, row := range vd.Rows {
|
for _, row := range vd.Rows {
|
||||||
ts := &monitoringpb.TimeSeries{
|
ts := &monitoringpb.TimeSeries{
|
||||||
Metric: &metricpb.Metric{
|
Metric: &metricpb.Metric{
|
||||||
Type: namespacedViewName(vd.View.Name, false),
|
Type: namespacedViewName(vd.View.Name),
|
||||||
Labels: newLabels(row.Tags, e.taskValue),
|
Labels: newLabels(row.Tags, e.taskValue),
|
||||||
},
|
},
|
||||||
Resource: resource,
|
Resource: resource,
|
||||||
@@ -243,53 +231,53 @@ func (e *statsExporter) createMeasure(ctx context.Context, vd *view.Data) error
|
|||||||
viewName := vd.View.Name
|
viewName := vd.View.Name
|
||||||
|
|
||||||
if md, ok := e.createdViews[viewName]; ok {
|
if md, ok := e.createdViews[viewName]; ok {
|
||||||
return equalAggTagKeys(md, agg, tagKeys)
|
return equalMeasureAggTagKeys(md, m, agg, tagKeys)
|
||||||
}
|
}
|
||||||
|
|
||||||
metricName := monitoring.MetricMetricDescriptorPath(e.o.ProjectID, namespacedViewName(viewName, true))
|
metricType := namespacedViewName(viewName)
|
||||||
md, err := getMetricDescriptor(ctx, e.c, &monitoringpb.GetMetricDescriptorRequest{
|
|
||||||
Name: metricName,
|
|
||||||
})
|
|
||||||
if err == nil {
|
|
||||||
if err := equalAggTagKeys(md, agg, tagKeys); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
e.createdViews[viewName] = md
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if grpc.Code(err) != codes.NotFound {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var metricKind metricpb.MetricDescriptor_MetricKind
|
|
||||||
var valueType metricpb.MetricDescriptor_ValueType
|
var valueType metricpb.MetricDescriptor_ValueType
|
||||||
|
unit := m.Unit()
|
||||||
|
|
||||||
switch agg.Type {
|
switch agg.Type {
|
||||||
case view.AggTypeCount:
|
case view.AggTypeCount:
|
||||||
valueType = metricpb.MetricDescriptor_INT64
|
valueType = metricpb.MetricDescriptor_INT64
|
||||||
|
// If the aggregation type is count, which counts the number of recorded measurements, the unit must be "1",
|
||||||
|
// because this view does not apply to the recorded values.
|
||||||
|
unit = stats.UnitDimensionless
|
||||||
case view.AggTypeSum:
|
case view.AggTypeSum:
|
||||||
valueType = metricpb.MetricDescriptor_DOUBLE
|
switch m.(type) {
|
||||||
case view.AggTypeMean:
|
case *stats.Int64Measure:
|
||||||
valueType = metricpb.MetricDescriptor_DISTRIBUTION
|
valueType = metricpb.MetricDescriptor_INT64
|
||||||
|
case *stats.Float64Measure:
|
||||||
|
valueType = metricpb.MetricDescriptor_DOUBLE
|
||||||
|
}
|
||||||
case view.AggTypeDistribution:
|
case view.AggTypeDistribution:
|
||||||
valueType = metricpb.MetricDescriptor_DISTRIBUTION
|
valueType = metricpb.MetricDescriptor_DISTRIBUTION
|
||||||
|
case view.AggTypeLastValue:
|
||||||
|
switch m.(type) {
|
||||||
|
case *stats.Int64Measure:
|
||||||
|
valueType = metricpb.MetricDescriptor_INT64
|
||||||
|
case *stats.Float64Measure:
|
||||||
|
valueType = metricpb.MetricDescriptor_DOUBLE
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unsupported aggregation type: %s", agg.Type.String())
|
return fmt.Errorf("unsupported aggregation type: %s", agg.Type.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
metricKind = metricpb.MetricDescriptor_CUMULATIVE
|
metricKind := metricpb.MetricDescriptor_CUMULATIVE
|
||||||
displayNamePrefix := defaultDisplayNamePrefix
|
displayNamePrefix := defaultDisplayNamePrefix
|
||||||
if e.o.MetricPrefix != "" {
|
if e.o.MetricPrefix != "" {
|
||||||
displayNamePrefix = e.o.MetricPrefix
|
displayNamePrefix = e.o.MetricPrefix
|
||||||
}
|
}
|
||||||
|
|
||||||
md, err = createMetricDescriptor(ctx, e.c, &monitoringpb.CreateMetricDescriptorRequest{
|
md, err := createMetricDescriptor(ctx, e.c, &monitoringpb.CreateMetricDescriptorRequest{
|
||||||
Name: monitoring.MetricProjectPath(e.o.ProjectID),
|
Name: fmt.Sprintf("projects/%s", e.o.ProjectID),
|
||||||
MetricDescriptor: &metricpb.MetricDescriptor{
|
MetricDescriptor: &metricpb.MetricDescriptor{
|
||||||
|
Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", e.o.ProjectID, metricType),
|
||||||
DisplayName: path.Join(displayNamePrefix, viewName),
|
DisplayName: path.Join(displayNamePrefix, viewName),
|
||||||
Description: m.Description(),
|
Description: vd.View.Description,
|
||||||
Unit: m.Unit(),
|
Unit: unit,
|
||||||
Type: namespacedViewName(viewName, false),
|
Type: metricType,
|
||||||
MetricKind: metricKind,
|
MetricKind: metricKind,
|
||||||
ValueType: valueType,
|
ValueType: valueType,
|
||||||
Labels: newLabelDescriptors(vd.View.TagKeys),
|
Labels: newLabelDescriptors(vd.View.TagKeys),
|
||||||
@@ -323,28 +311,19 @@ func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue {
|
|||||||
switch v := r.Data.(type) {
|
switch v := r.Data.(type) {
|
||||||
case *view.CountData:
|
case *view.CountData:
|
||||||
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
|
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
|
||||||
Int64Value: int64(*v),
|
Int64Value: v.Value,
|
||||||
}}
|
}}
|
||||||
case *view.SumData:
|
case *view.SumData:
|
||||||
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
|
switch vd.Measure.(type) {
|
||||||
DoubleValue: float64(*v),
|
case *stats.Int64Measure:
|
||||||
}}
|
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
|
||||||
case *view.MeanData:
|
Int64Value: int64(v.Value),
|
||||||
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{
|
}}
|
||||||
DistributionValue: &distributionpb.Distribution{
|
case *stats.Float64Measure:
|
||||||
Count: int64(v.Count),
|
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
|
||||||
Mean: v.Mean,
|
DoubleValue: v.Value,
|
||||||
SumOfSquaredDeviation: 0,
|
}}
|
||||||
BucketOptions: &distributionpb.Distribution_BucketOptions{
|
}
|
||||||
Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{
|
|
||||||
ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{
|
|
||||||
Bounds: []float64{0},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
BucketCounts: []int64{0, int64(v.Count)},
|
|
||||||
},
|
|
||||||
}}
|
|
||||||
case *view.DistributionData:
|
case *view.DistributionData:
|
||||||
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{
|
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{
|
||||||
DistributionValue: &distributionpb.Distribution{
|
DistributionValue: &distributionpb.Distribution{
|
||||||
@@ -366,16 +345,23 @@ func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue {
|
|||||||
BucketCounts: v.CountPerBucket,
|
BucketCounts: v.CountPerBucket,
|
||||||
},
|
},
|
||||||
}}
|
}}
|
||||||
|
case *view.LastValueData:
|
||||||
|
switch vd.Measure.(type) {
|
||||||
|
case *stats.Int64Measure:
|
||||||
|
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
|
||||||
|
Int64Value: int64(v.Value),
|
||||||
|
}}
|
||||||
|
case *stats.Float64Measure:
|
||||||
|
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
|
||||||
|
DoubleValue: v.Value,
|
||||||
|
}}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func namespacedViewName(v string, escaped bool) string {
|
func namespacedViewName(v string) string {
|
||||||
p := path.Join("opencensus", v)
|
return path.Join("custom.googleapis.com", "opencensus", v)
|
||||||
if escaped {
|
|
||||||
p = url.PathEscape(p)
|
|
||||||
}
|
|
||||||
return path.Join("custom.googleapis.com", p)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLabels(tags []tag.Tag, taskValue string) map[string]string {
|
func newLabels(tags []tag.Tag, taskValue string) map[string]string {
|
||||||
@@ -404,15 +390,21 @@ func newLabelDescriptors(keys []tag.Key) []*labelpb.LabelDescriptor {
|
|||||||
return labelDescriptors
|
return labelDescriptors
|
||||||
}
|
}
|
||||||
|
|
||||||
func equalAggTagKeys(md *metricpb.MetricDescriptor, agg *view.Aggregation, keys []tag.Key) error {
|
func equalMeasureAggTagKeys(md *metricpb.MetricDescriptor, m stats.Measure, agg *view.Aggregation, keys []tag.Key) error {
|
||||||
var aggTypeMatch bool
|
var aggTypeMatch bool
|
||||||
switch md.ValueType {
|
switch md.ValueType {
|
||||||
case metricpb.MetricDescriptor_INT64:
|
case metricpb.MetricDescriptor_INT64:
|
||||||
aggTypeMatch = agg.Type == view.AggTypeCount
|
if _, ok := m.(*stats.Int64Measure); !(ok || agg.Type == view.AggTypeCount) {
|
||||||
|
return fmt.Errorf("stackdriver metric descriptor was not created as int64")
|
||||||
|
}
|
||||||
|
aggTypeMatch = agg.Type == view.AggTypeCount || agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue
|
||||||
case metricpb.MetricDescriptor_DOUBLE:
|
case metricpb.MetricDescriptor_DOUBLE:
|
||||||
aggTypeMatch = agg.Type == view.AggTypeSum
|
if _, ok := m.(*stats.Float64Measure); !ok {
|
||||||
|
return fmt.Errorf("stackdriver metric descriptor was not created as double")
|
||||||
|
}
|
||||||
|
aggTypeMatch = agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue
|
||||||
case metricpb.MetricDescriptor_DISTRIBUTION:
|
case metricpb.MetricDescriptor_DISTRIBUTION:
|
||||||
aggTypeMatch = agg.Type == view.AggTypeMean || agg.Type == view.AggTypeDistribution
|
aggTypeMatch = agg.Type == view.AggTypeDistribution
|
||||||
}
|
}
|
||||||
|
|
||||||
if !aggTypeMatch {
|
if !aggTypeMatch {
|
||||||
|
|||||||
296
vendor/go.opencensus.io/exporter/stackdriver/stats_test.go
generated
vendored
296
vendor/go.opencensus.io/exporter/stackdriver/stats_test.go
generated
vendored
@@ -26,15 +26,12 @@ import (
|
|||||||
"go.opencensus.io/stats/view"
|
"go.opencensus.io/stats/view"
|
||||||
"go.opencensus.io/tag"
|
"go.opencensus.io/tag"
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
distributionpb "google.golang.org/genproto/googleapis/api/distribution"
|
|
||||||
"google.golang.org/genproto/googleapis/api/label"
|
"google.golang.org/genproto/googleapis/api/label"
|
||||||
"google.golang.org/genproto/googleapis/api/metric"
|
"google.golang.org/genproto/googleapis/api/metric"
|
||||||
metricpb "google.golang.org/genproto/googleapis/api/metric"
|
metricpb "google.golang.org/genproto/googleapis/api/metric"
|
||||||
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
|
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
|
||||||
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
|
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var authOptions = []option.ClientOption{option.WithGRPCConn(&grpc.ClientConn{})}
|
var authOptions = []option.ClientOption{option.WithGRPCConn(&grpc.ClientConn{})}
|
||||||
@@ -42,7 +39,7 @@ var authOptions = []option.ClientOption{option.WithGRPCConn(&grpc.ClientConn{})}
|
|||||||
func TestRejectBlankProjectID(t *testing.T) {
|
func TestRejectBlankProjectID(t *testing.T) {
|
||||||
ids := []string{"", " ", " "}
|
ids := []string{"", " ", " "}
|
||||||
for _, projectID := range ids {
|
for _, projectID := range ids {
|
||||||
opts := Options{ProjectID: projectID, ClientOptions: authOptions}
|
opts := Options{ProjectID: projectID, MonitoringClientOptions: authOptions}
|
||||||
exp, err := newStatsExporter(opts)
|
exp, err := newStatsExporter(opts)
|
||||||
if err == nil || exp != nil {
|
if err == nil || exp != nil {
|
||||||
t.Errorf("%q ProjectID must be rejected: NewExporter() = %v err = %q", projectID, exp, err)
|
t.Errorf("%q ProjectID must be rejected: NewExporter() = %v err = %q", projectID, exp, err)
|
||||||
@@ -55,7 +52,7 @@ func TestRejectBlankProjectID(t *testing.T) {
|
|||||||
func TestNewExporterSingletonPerProcess(t *testing.T) {
|
func TestNewExporterSingletonPerProcess(t *testing.T) {
|
||||||
ids := []string{"open-census.io", "x", "fakeProjectID"}
|
ids := []string{"open-census.io", "x", "fakeProjectID"}
|
||||||
for _, projectID := range ids {
|
for _, projectID := range ids {
|
||||||
opts := Options{ProjectID: projectID, ClientOptions: authOptions}
|
opts := Options{ProjectID: projectID, MonitoringClientOptions: authOptions}
|
||||||
exp, err := newStatsExporter(opts)
|
exp, err := newStatsExporter(opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("NewExporter() projectID = %q err = %q", projectID, err)
|
t.Errorf("NewExporter() projectID = %q err = %q", projectID, err)
|
||||||
@@ -73,10 +70,7 @@ func TestNewExporterSingletonPerProcess(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestExporter_makeReq(t *testing.T) {
|
func TestExporter_makeReq(t *testing.T) {
|
||||||
m, err := stats.Float64("test-measure", "measure desc", "unit")
|
m := stats.Float64("test-measure", "measure desc", "unit")
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
key, err := tag.NewKey("test_key")
|
key, err := tag.NewKey("test_key")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -99,18 +93,12 @@ func TestExporter_makeReq(t *testing.T) {
|
|||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
end := start.Add(time.Minute)
|
end := start.Add(time.Minute)
|
||||||
count1 := view.CountData(10)
|
count1 := &view.CountData{Value: 10}
|
||||||
count2 := view.CountData(16)
|
count2 := &view.CountData{Value: 16}
|
||||||
sum1 := view.SumData(5.5)
|
sum1 := &view.SumData{Value: 5.5}
|
||||||
sum2 := view.SumData(-11.1)
|
sum2 := &view.SumData{Value: -11.1}
|
||||||
mean1 := view.MeanData{
|
last1 := view.LastValueData{Value: 100}
|
||||||
Mean: 3.3,
|
last2 := view.LastValueData{Value: 200}
|
||||||
Count: 7,
|
|
||||||
}
|
|
||||||
mean2 := view.MeanData{
|
|
||||||
Mean: -7.7,
|
|
||||||
Count: 5,
|
|
||||||
}
|
|
||||||
taskValue := getTaskValue()
|
taskValue := getTaskValue()
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
@@ -122,7 +110,7 @@ func TestExporter_makeReq(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "count agg + timeline",
|
name: "count agg + timeline",
|
||||||
projID: "proj-id",
|
projID: "proj-id",
|
||||||
vd: newTestViewData(v, start, end, &count1, &count2),
|
vd: newTestViewData(v, start, end, count1, count2),
|
||||||
want: []*monitoringpb.CreateTimeSeriesRequest{{
|
want: []*monitoringpb.CreateTimeSeriesRequest{{
|
||||||
Name: monitoring.MetricProjectPath("proj-id"),
|
Name: monitoring.MetricProjectPath("proj-id"),
|
||||||
TimeSeries: []*monitoringpb.TimeSeries{
|
TimeSeries: []*monitoringpb.TimeSeries{
|
||||||
@@ -190,7 +178,7 @@ func TestExporter_makeReq(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "sum agg + timeline",
|
name: "sum agg + timeline",
|
||||||
projID: "proj-id",
|
projID: "proj-id",
|
||||||
vd: newTestViewData(v, start, end, &sum1, &sum2),
|
vd: newTestViewData(v, start, end, sum1, sum2),
|
||||||
want: []*monitoringpb.CreateTimeSeriesRequest{{
|
want: []*monitoringpb.CreateTimeSeriesRequest{{
|
||||||
Name: monitoring.MetricProjectPath("proj-id"),
|
Name: monitoring.MetricProjectPath("proj-id"),
|
||||||
TimeSeries: []*monitoringpb.TimeSeries{
|
TimeSeries: []*monitoringpb.TimeSeries{
|
||||||
@@ -256,9 +244,9 @@ func TestExporter_makeReq(t *testing.T) {
|
|||||||
}},
|
}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "mean agg + timeline",
|
name: "last value agg",
|
||||||
projID: "proj-id",
|
projID: "proj-id",
|
||||||
vd: newTestViewData(v, start, end, &mean1, &mean2),
|
vd: newTestViewData(v, start, end, &last1, &last2),
|
||||||
want: []*monitoringpb.CreateTimeSeriesRequest{{
|
want: []*monitoringpb.CreateTimeSeriesRequest{{
|
||||||
Name: monitoring.MetricProjectPath("proj-id"),
|
Name: monitoring.MetricProjectPath("proj-id"),
|
||||||
TimeSeries: []*monitoringpb.TimeSeries{
|
TimeSeries: []*monitoringpb.TimeSeries{
|
||||||
@@ -285,20 +273,8 @@ func TestExporter_makeReq(t *testing.T) {
|
|||||||
Nanos: int32(end.Nanosecond()),
|
Nanos: int32(end.Nanosecond()),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{
|
Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
|
||||||
DistributionValue: &distributionpb.Distribution{
|
DoubleValue: 100,
|
||||||
Count: 7,
|
|
||||||
Mean: 3.3,
|
|
||||||
SumOfSquaredDeviation: 0,
|
|
||||||
BucketOptions: &distributionpb.Distribution_BucketOptions{
|
|
||||||
Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{
|
|
||||||
ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{
|
|
||||||
Bounds: []float64{0},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
BucketCounts: []int64{0, 7},
|
|
||||||
},
|
|
||||||
}},
|
}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -326,20 +302,8 @@ func TestExporter_makeReq(t *testing.T) {
|
|||||||
Nanos: int32(end.Nanosecond()),
|
Nanos: int32(end.Nanosecond()),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{
|
Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
|
||||||
DistributionValue: &distributionpb.Distribution{
|
DoubleValue: 200,
|
||||||
Count: 5,
|
|
||||||
Mean: -7.7,
|
|
||||||
SumOfSquaredDeviation: 0,
|
|
||||||
BucketOptions: &distributionpb.Distribution_BucketOptions{
|
|
||||||
Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{
|
|
||||||
ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{
|
|
||||||
Bounds: []float64{0},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
BucketCounts: []int64{0, 5},
|
|
||||||
},
|
|
||||||
}},
|
}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -378,10 +342,7 @@ func TestExporter_makeReq(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestExporter_makeReq_batching(t *testing.T) {
|
func TestExporter_makeReq_batching(t *testing.T) {
|
||||||
m, err := stats.Float64("test-measure/makeReq_batching", "measure desc", "unit")
|
m := stats.Float64("test-measure/makeReq_batching", "measure desc", "unit")
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
key, err := tag.NewKey("test_key")
|
key, err := tag.NewKey("test_key")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -426,13 +387,13 @@ func TestExporter_makeReq_batching(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
count1 := view.CountData(10)
|
count1 := &view.CountData{Value: 10}
|
||||||
count2 := view.CountData(16)
|
count2 := &view.CountData{Value: 16}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
var vds []*view.Data
|
var vds []*view.Data
|
||||||
for i := 0; i < tt.iter; i++ {
|
for i := 0; i < tt.iter; i++ {
|
||||||
vds = append(vds, newTestViewData(v, time.Now(), time.Now(), &count1, &count2))
|
vds = append(vds, newTestViewData(v, time.Now(), time.Now(), count1, count2))
|
||||||
}
|
}
|
||||||
|
|
||||||
e := &statsExporter{}
|
e := &statsExporter{}
|
||||||
@@ -457,58 +418,97 @@ func TestEqualAggWindowTagKeys(t *testing.T) {
|
|||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
md *metricpb.MetricDescriptor
|
md *metricpb.MetricDescriptor
|
||||||
|
m stats.Measure
|
||||||
agg *view.Aggregation
|
agg *view.Aggregation
|
||||||
keys []tag.Key
|
keys []tag.Key
|
||||||
wantErr bool
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "count agg",
|
name: "count agg with in64 measure",
|
||||||
md: &metricpb.MetricDescriptor{
|
md: &metricpb.MetricDescriptor{
|
||||||
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
|
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
|
||||||
ValueType: metricpb.MetricDescriptor_INT64,
|
ValueType: metricpb.MetricDescriptor_INT64,
|
||||||
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
|
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
|
||||||
},
|
},
|
||||||
|
m: stats.Int64("name", "", ""),
|
||||||
agg: view.Count(),
|
agg: view.Count(),
|
||||||
wantErr: false,
|
wantErr: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "sum agg",
|
name: "count agg with double measure",
|
||||||
|
md: &metricpb.MetricDescriptor{
|
||||||
|
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
|
||||||
|
ValueType: metricpb.MetricDescriptor_INT64,
|
||||||
|
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
|
||||||
|
},
|
||||||
|
m: stats.Float64("name", "", ""),
|
||||||
|
agg: view.Count(),
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "sum agg double",
|
||||||
md: &metricpb.MetricDescriptor{
|
md: &metricpb.MetricDescriptor{
|
||||||
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
|
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
|
||||||
ValueType: metricpb.MetricDescriptor_DOUBLE,
|
ValueType: metricpb.MetricDescriptor_DOUBLE,
|
||||||
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
|
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
|
||||||
},
|
},
|
||||||
|
m: stats.Float64("name", "", ""),
|
||||||
agg: view.Sum(),
|
agg: view.Sum(),
|
||||||
wantErr: false,
|
wantErr: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "mean agg",
|
name: "sum agg int64",
|
||||||
md: &metricpb.MetricDescriptor{
|
md: &metricpb.MetricDescriptor{
|
||||||
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
|
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
|
||||||
ValueType: metricpb.MetricDescriptor_DISTRIBUTION,
|
ValueType: metricpb.MetricDescriptor_INT64,
|
||||||
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
|
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
|
||||||
},
|
},
|
||||||
agg: view.Mean(),
|
m: stats.Int64("name", "", ""),
|
||||||
|
agg: view.Sum(),
|
||||||
wantErr: false,
|
wantErr: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "distribution agg - mismatch",
|
name: "last value agg double",
|
||||||
md: &metricpb.MetricDescriptor{
|
|
||||||
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
|
|
||||||
ValueType: metricpb.MetricDescriptor_DISTRIBUTION,
|
|
||||||
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
|
|
||||||
},
|
|
||||||
agg: view.Count(),
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "mean agg - mismatch",
|
|
||||||
md: &metricpb.MetricDescriptor{
|
md: &metricpb.MetricDescriptor{
|
||||||
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
|
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
|
||||||
ValueType: metricpb.MetricDescriptor_DOUBLE,
|
ValueType: metricpb.MetricDescriptor_DOUBLE,
|
||||||
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
|
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
|
||||||
},
|
},
|
||||||
agg: view.Mean(),
|
m: stats.Float64("name", "", ""),
|
||||||
|
agg: view.LastValue(),
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "last value agg int64",
|
||||||
|
md: &metricpb.MetricDescriptor{
|
||||||
|
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
|
||||||
|
ValueType: metricpb.MetricDescriptor_INT64,
|
||||||
|
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
|
||||||
|
},
|
||||||
|
m: stats.Int64("name", "", ""),
|
||||||
|
agg: view.LastValue(),
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "distribution - mismatch",
|
||||||
|
md: &metricpb.MetricDescriptor{
|
||||||
|
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
|
||||||
|
ValueType: metricpb.MetricDescriptor_DISTRIBUTION,
|
||||||
|
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
|
||||||
|
},
|
||||||
|
m: stats.Int64("name", "", ""),
|
||||||
|
agg: view.Count(),
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "last value - measure mismatch",
|
||||||
|
md: &metricpb.MetricDescriptor{
|
||||||
|
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
|
||||||
|
ValueType: metricpb.MetricDescriptor_INT64,
|
||||||
|
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
|
||||||
|
},
|
||||||
|
m: stats.Float64("name", "", ""),
|
||||||
|
agg: view.LastValue(),
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -522,6 +522,7 @@ func TestEqualAggWindowTagKeys(t *testing.T) {
|
|||||||
{Key: opencensusTaskKey},
|
{Key: opencensusTaskKey},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
m: stats.Int64("name", "", ""),
|
||||||
agg: view.Distribution(),
|
agg: view.Distribution(),
|
||||||
keys: []tag.Key{key1, key2},
|
keys: []tag.Key{key1, key2},
|
||||||
wantErr: false,
|
wantErr: false,
|
||||||
@@ -532,6 +533,7 @@ func TestEqualAggWindowTagKeys(t *testing.T) {
|
|||||||
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
|
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
|
||||||
ValueType: metricpb.MetricDescriptor_DISTRIBUTION,
|
ValueType: metricpb.MetricDescriptor_DISTRIBUTION,
|
||||||
},
|
},
|
||||||
|
m: stats.Int64("name", "", ""),
|
||||||
agg: view.Distribution(),
|
agg: view.Distribution(),
|
||||||
keys: []tag.Key{key1, key2},
|
keys: []tag.Key{key1, key2},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
@@ -543,13 +545,14 @@ func TestEqualAggWindowTagKeys(t *testing.T) {
|
|||||||
ValueType: metricpb.MetricDescriptor_INT64,
|
ValueType: metricpb.MetricDescriptor_INT64,
|
||||||
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
|
Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}},
|
||||||
},
|
},
|
||||||
|
m: stats.Int64("name", "", ""),
|
||||||
agg: view.Count(),
|
agg: view.Count(),
|
||||||
wantErr: false,
|
wantErr: false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
err := equalAggTagKeys(tt.md, tt.agg, tt.keys)
|
err := equalMeasureAggTagKeys(tt.md, tt.m, tt.agg, tt.keys)
|
||||||
if err != nil && !tt.wantErr {
|
if err != nil && !tt.wantErr {
|
||||||
t.Errorf("equalAggTagKeys() = %q; want no error", err)
|
t.Errorf("equalAggTagKeys() = %q; want no error", err)
|
||||||
}
|
}
|
||||||
@@ -562,49 +565,62 @@ func TestEqualAggWindowTagKeys(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestExporter_createMeasure(t *testing.T) {
|
func TestExporter_createMeasure(t *testing.T) {
|
||||||
oldGetMetricDescriptor := getMetricDescriptor
|
|
||||||
oldCreateMetricDescriptor := createMetricDescriptor
|
oldCreateMetricDescriptor := createMetricDescriptor
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
getMetricDescriptor = oldGetMetricDescriptor
|
|
||||||
createMetricDescriptor = oldCreateMetricDescriptor
|
createMetricDescriptor = oldCreateMetricDescriptor
|
||||||
}()
|
}()
|
||||||
|
|
||||||
key, _ := tag.NewKey("test-key-one")
|
key, _ := tag.NewKey("test-key-one")
|
||||||
m, err := stats.Float64("test-measure/TestExporter_createMeasure", "measure desc", "unit")
|
m := stats.Float64("test-measure/TestExporter_createMeasure", "measure desc", stats.UnitMilliseconds)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
v := &view.View{
|
v := &view.View{
|
||||||
Name: "testview",
|
Name: "test_view_sum",
|
||||||
Description: "desc",
|
Description: "view_description",
|
||||||
TagKeys: []tag.Key{key},
|
TagKeys: []tag.Key{key},
|
||||||
Measure: m,
|
Measure: m,
|
||||||
Aggregation: view.Count(),
|
Aggregation: view.Sum(),
|
||||||
}
|
}
|
||||||
|
|
||||||
data := view.CountData(0)
|
data := &view.CountData{Value: 0}
|
||||||
vd := newTestViewData(v, time.Now(), time.Now(), &data, &data)
|
vd := newTestViewData(v, time.Now(), time.Now(), data, data)
|
||||||
|
|
||||||
e := &statsExporter{
|
e := &statsExporter{
|
||||||
createdViews: make(map[string]*metricpb.MetricDescriptor),
|
createdViews: make(map[string]*metricpb.MetricDescriptor),
|
||||||
|
o: Options{ProjectID: "test_project"},
|
||||||
}
|
}
|
||||||
|
|
||||||
var getCalls, createCalls int
|
var createCalls int
|
||||||
getMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
|
|
||||||
getCalls++
|
|
||||||
return nil, status.Error(codes.NotFound, "")
|
|
||||||
}
|
|
||||||
createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
|
createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
|
||||||
createCalls++
|
createCalls++
|
||||||
|
if got, want := mdr.MetricDescriptor.Name, "projects/test_project/metricDescriptors/custom.googleapis.com/opencensus/test_view_sum"; got != want {
|
||||||
|
t.Errorf("MetricDescriptor.Name = %q; want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := mdr.MetricDescriptor.Type, "custom.googleapis.com/opencensus/test_view_sum"; got != want {
|
||||||
|
t.Errorf("MetricDescriptor.Type = %q; want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := mdr.MetricDescriptor.ValueType, metricpb.MetricDescriptor_DOUBLE; got != want {
|
||||||
|
t.Errorf("MetricDescriptor.ValueType = %q; want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := mdr.MetricDescriptor.MetricKind, metricpb.MetricDescriptor_CUMULATIVE; got != want {
|
||||||
|
t.Errorf("MetricDescriptor.MetricKind = %q; want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := mdr.MetricDescriptor.Description, "view_description"; got != want {
|
||||||
|
t.Errorf("MetricDescriptor.Description = %q; want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := mdr.MetricDescriptor.DisplayName, "OpenCensus/test_view_sum"; got != want {
|
||||||
|
t.Errorf("MetricDescriptor.DisplayName = %q; want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := mdr.MetricDescriptor.Unit, stats.UnitMilliseconds; got != want {
|
||||||
|
t.Errorf("MetricDescriptor.Unit = %q; want %q", got, want)
|
||||||
|
}
|
||||||
return &metric.MetricDescriptor{
|
return &metric.MetricDescriptor{
|
||||||
DisplayName: "display",
|
DisplayName: "OpenCensus/test_view_sum",
|
||||||
Description: "desc",
|
Description: "view_description",
|
||||||
Unit: "unit",
|
Unit: stats.UnitMilliseconds,
|
||||||
Type: "hello",
|
Type: "custom.googleapis.com/opencensus/test_view_sum",
|
||||||
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
|
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
|
||||||
ValueType: metricpb.MetricDescriptor_INT64,
|
ValueType: metricpb.MetricDescriptor_DOUBLE,
|
||||||
Labels: newLabelDescriptors(vd.View.TagKeys),
|
Labels: newLabelDescriptors(vd.View.TagKeys),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@@ -616,9 +632,6 @@ func TestExporter_createMeasure(t *testing.T) {
|
|||||||
if err := e.createMeasure(ctx, vd); err != nil {
|
if err := e.createMeasure(ctx, vd); err != nil {
|
||||||
t.Errorf("Exporter.createMeasure() error = %v", err)
|
t.Errorf("Exporter.createMeasure() error = %v", err)
|
||||||
}
|
}
|
||||||
if count := getCalls; count != 1 {
|
|
||||||
t.Errorf("getMetricDescriptor needs to be called for once; called %v times", count)
|
|
||||||
}
|
|
||||||
if count := createCalls; count != 1 {
|
if count := createCalls; count != 1 {
|
||||||
t.Errorf("createMetricDescriptor needs to be called for once; called %v times", count)
|
t.Errorf("createMetricDescriptor needs to be called for once; called %v times", count)
|
||||||
}
|
}
|
||||||
@@ -627,12 +640,73 @@ func TestExporter_createMeasure(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestExporter_makeReq_withCustomMonitoredResource(t *testing.T) {
|
func TestExporter_createMeasure_CountAggregation(t *testing.T) {
|
||||||
m, err := stats.Float64("test-measure/TestExporter_makeReq_withCustomMonitoredResource", "measure desc", "unit")
|
oldCreateMetricDescriptor := createMetricDescriptor
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
defer func() {
|
||||||
|
createMetricDescriptor = oldCreateMetricDescriptor
|
||||||
|
}()
|
||||||
|
|
||||||
|
key, _ := tag.NewKey("test-key-one")
|
||||||
|
m := stats.Float64("test-measure/TestExporter_createMeasure", "measure desc", stats.UnitMilliseconds)
|
||||||
|
|
||||||
|
v := &view.View{
|
||||||
|
Name: "test_view_count",
|
||||||
|
Description: "view_description",
|
||||||
|
TagKeys: []tag.Key{key},
|
||||||
|
Measure: m,
|
||||||
|
Aggregation: view.Count(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
data := &view.CountData{Value: 0}
|
||||||
|
vd := newTestViewData(v, time.Now(), time.Now(), data, data)
|
||||||
|
|
||||||
|
e := &statsExporter{
|
||||||
|
createdViews: make(map[string]*metricpb.MetricDescriptor),
|
||||||
|
o: Options{ProjectID: "test_project"},
|
||||||
|
}
|
||||||
|
|
||||||
|
createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
|
||||||
|
if got, want := mdr.MetricDescriptor.Name, "projects/test_project/metricDescriptors/custom.googleapis.com/opencensus/test_view_count"; got != want {
|
||||||
|
t.Errorf("MetricDescriptor.Name = %q; want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := mdr.MetricDescriptor.Type, "custom.googleapis.com/opencensus/test_view_count"; got != want {
|
||||||
|
t.Errorf("MetricDescriptor.Type = %q; want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := mdr.MetricDescriptor.ValueType, metricpb.MetricDescriptor_INT64; got != want {
|
||||||
|
t.Errorf("MetricDescriptor.ValueType = %q; want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := mdr.MetricDescriptor.MetricKind, metricpb.MetricDescriptor_CUMULATIVE; got != want {
|
||||||
|
t.Errorf("MetricDescriptor.MetricKind = %q; want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := mdr.MetricDescriptor.Description, "view_description"; got != want {
|
||||||
|
t.Errorf("MetricDescriptor.Description = %q; want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := mdr.MetricDescriptor.DisplayName, "OpenCensus/test_view_count"; got != want {
|
||||||
|
t.Errorf("MetricDescriptor.DisplayName = %q; want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := mdr.MetricDescriptor.Unit, stats.UnitDimensionless; got != want {
|
||||||
|
t.Errorf("MetricDescriptor.Unit = %q; want %q", got, want)
|
||||||
|
}
|
||||||
|
return &metric.MetricDescriptor{
|
||||||
|
DisplayName: "OpenCensus/test_view_sum",
|
||||||
|
Description: "view_description",
|
||||||
|
Unit: stats.UnitDimensionless,
|
||||||
|
Type: "custom.googleapis.com/opencensus/test_view_count",
|
||||||
|
MetricKind: metricpb.MetricDescriptor_CUMULATIVE,
|
||||||
|
ValueType: metricpb.MetricDescriptor_INT64,
|
||||||
|
Labels: newLabelDescriptors(vd.View.TagKeys),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
ctx := context.Background()
|
||||||
|
if err := e.createMeasure(ctx, vd); err != nil {
|
||||||
|
t.Errorf("Exporter.createMeasure() error = %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExporter_makeReq_withCustomMonitoredResource(t *testing.T) {
|
||||||
|
m := stats.Float64("test-measure/TestExporter_makeReq_withCustomMonitoredResource", "measure desc", "unit")
|
||||||
|
|
||||||
key, err := tag.NewKey("test_key")
|
key, err := tag.NewKey("test_key")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -645,15 +719,15 @@ func TestExporter_makeReq_withCustomMonitoredResource(t *testing.T) {
|
|||||||
Measure: m,
|
Measure: m,
|
||||||
Aggregation: view.Count(),
|
Aggregation: view.Count(),
|
||||||
}
|
}
|
||||||
if err := view.Subscribe(v); err != nil {
|
if err := view.Register(v); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer view.Unsubscribe(v)
|
defer view.Unregister(v)
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
end := start.Add(time.Minute)
|
end := start.Add(time.Minute)
|
||||||
count1 := view.CountData(10)
|
count1 := &view.CountData{Value: 10}
|
||||||
count2 := view.CountData(16)
|
count2 := &view.CountData{Value: 16}
|
||||||
taskValue := getTaskValue()
|
taskValue := getTaskValue()
|
||||||
|
|
||||||
resource := &monitoredrespb.MonitoredResource{
|
resource := &monitoredrespb.MonitoredResource{
|
||||||
@@ -670,7 +744,7 @@ func TestExporter_makeReq_withCustomMonitoredResource(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "count agg timeline",
|
name: "count agg timeline",
|
||||||
projID: "proj-id",
|
projID: "proj-id",
|
||||||
vd: newTestViewData(v, start, end, &count1, &count2),
|
vd: newTestViewData(v, start, end, count1, count2),
|
||||||
want: []*monitoringpb.CreateTimeSeriesRequest{{
|
want: []*monitoringpb.CreateTimeSeriesRequest{{
|
||||||
Name: monitoring.MetricProjectPath("proj-id"),
|
Name: monitoring.MetricProjectPath("proj-id"),
|
||||||
TimeSeries: []*monitoringpb.TimeSeries{
|
TimeSeries: []*monitoringpb.TimeSeries{
|
||||||
|
|||||||
18
vendor/go.opencensus.io/exporter/stackdriver/trace.go
generated
vendored
18
vendor/go.opencensus.io/exporter/stackdriver/trace.go
generated
vendored
@@ -31,6 +31,7 @@ import (
|
|||||||
// Stackdriver.
|
// Stackdriver.
|
||||||
//
|
//
|
||||||
type traceExporter struct {
|
type traceExporter struct {
|
||||||
|
o Options
|
||||||
projectID string
|
projectID string
|
||||||
bundler *bundler.Bundler
|
bundler *bundler.Bundler
|
||||||
// uploadFn defaults to uploadSpans; it can be replaced for tests.
|
// uploadFn defaults to uploadSpans; it can be replaced for tests.
|
||||||
@@ -42,7 +43,7 @@ type traceExporter struct {
|
|||||||
var _ trace.Exporter = (*traceExporter)(nil)
|
var _ trace.Exporter = (*traceExporter)(nil)
|
||||||
|
|
||||||
func newTraceExporter(o Options) (*traceExporter, error) {
|
func newTraceExporter(o Options) (*traceExporter, error) {
|
||||||
client, err := tracingclient.NewClient(context.Background(), o.ClientOptions...)
|
client, err := tracingclient.NewClient(context.Background(), o.TraceClientOptions...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("stackdriver: couldn't initialize trace client: %v", err)
|
return nil, fmt.Errorf("stackdriver: couldn't initialize trace client: %v", err)
|
||||||
}
|
}
|
||||||
@@ -53,6 +54,7 @@ func newTraceExporterWithClient(o Options, c *tracingclient.Client) *traceExport
|
|||||||
e := &traceExporter{
|
e := &traceExporter{
|
||||||
projectID: o.ProjectID,
|
projectID: o.ProjectID,
|
||||||
client: c,
|
client: c,
|
||||||
|
o: o,
|
||||||
}
|
}
|
||||||
bundler := bundler.NewBundler((*trace.SpanData)(nil), func(bundle interface{}) {
|
bundler := bundler.NewBundler((*trace.SpanData)(nil), func(bundle interface{}) {
|
||||||
e.uploadFn(bundle.([]*trace.SpanData))
|
e.uploadFn(bundle.([]*trace.SpanData))
|
||||||
@@ -93,7 +95,7 @@ func (e *traceExporter) ExportSpan(s *trace.SpanData) {
|
|||||||
case bundler.ErrOverflow:
|
case bundler.ErrOverflow:
|
||||||
e.overflowLogger.log()
|
e.overflowLogger.log()
|
||||||
default:
|
default:
|
||||||
log.Println("OpenCensus Stackdriver exporter: failed to upload span:", err)
|
e.o.handleError(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -115,16 +117,18 @@ func (e *traceExporter) uploadSpans(spans []*trace.SpanData) {
|
|||||||
req.Spans = append(req.Spans, protoFromSpanData(span, e.projectID))
|
req.Spans = append(req.Spans, protoFromSpanData(span, e.projectID))
|
||||||
}
|
}
|
||||||
// Create a never-sampled span to prevent traces associated with exporter.
|
// Create a never-sampled span to prevent traces associated with exporter.
|
||||||
span := trace.NewSpan("go.opencensus.io/exporter/stackdriver.uploadSpans", nil, trace.StartOptions{Sampler: trace.NeverSample()})
|
ctx, span := trace.StartSpan( // TODO: add timeouts
|
||||||
|
context.Background(),
|
||||||
|
"go.opencensus.io/exporter/stackdriver.uploadSpans",
|
||||||
|
trace.WithSampler(trace.NeverSample()),
|
||||||
|
)
|
||||||
defer span.End()
|
defer span.End()
|
||||||
span.AddAttributes(trace.Int64Attribute("num_spans", int64(len(spans))))
|
span.AddAttributes(trace.Int64Attribute("num_spans", int64(len(spans))))
|
||||||
|
|
||||||
ctx := trace.WithSpan(context.Background(), span) // TODO: add timeouts
|
|
||||||
err := e.client.BatchWriteSpans(ctx, &req)
|
err := e.client.BatchWriteSpans(ctx, &req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
|
span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
|
||||||
// TODO: Allow configuring a logger for exporters.
|
e.o.handleError(err)
|
||||||
log.Printf("OpenCensus Stackdriver exporter: failed to upload %d spans: %v", len(spans), err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
4
vendor/go.opencensus.io/exporter/stackdriver/trace_proto.go
generated
vendored
4
vendor/go.opencensus.io/exporter/stackdriver/trace_proto.go
generated
vendored
@@ -15,7 +15,6 @@
|
|||||||
package stackdriver
|
package stackdriver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"math"
|
"math"
|
||||||
"time"
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
@@ -144,7 +143,7 @@ func protoFromSpanData(s *trace.SpanData, projectID string) *tracepb.Span {
|
|||||||
sp.Links.Link = make([]*tracepb.Span_Link, 0, len(s.Links))
|
sp.Links.Link = make([]*tracepb.Span_Link, 0, len(s.Links))
|
||||||
for _, l := range s.Links {
|
for _, l := range s.Links {
|
||||||
link := &tracepb.Span_Link{
|
link := &tracepb.Span_Link{
|
||||||
TraceId: fmt.Sprintf("projects/%s/traces/%s", projectID, l.TraceID),
|
TraceId: l.TraceID.String(),
|
||||||
SpanId: l.SpanID.String(),
|
SpanId: l.SpanID.String(),
|
||||||
Type: tracepb.Span_Link_Type(l.Type),
|
Type: tracepb.Span_Link_Type(l.Type),
|
||||||
}
|
}
|
||||||
@@ -152,7 +151,6 @@ func protoFromSpanData(s *trace.SpanData, projectID string) *tracepb.Span {
|
|||||||
sp.Links.Link = append(sp.Links.Link, link)
|
sp.Links.Link = append(sp.Links.Link, link)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return sp
|
return sp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
10
vendor/go.opencensus.io/exporter/stackdriver/trace_proto_test.go
generated
vendored
10
vendor/go.opencensus.io/exporter/stackdriver/trace_proto_test.go
generated
vendored
@@ -57,19 +57,21 @@ func (t *testExporter) ExportSpan(s *trace.SpanData) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestExportTrace(t *testing.T) {
|
func TestExportTrace(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
var te testExporter
|
var te testExporter
|
||||||
trace.RegisterExporter(&te)
|
trace.RegisterExporter(&te)
|
||||||
defer trace.UnregisterExporter(&te)
|
defer trace.UnregisterExporter(&te)
|
||||||
|
|
||||||
span0 := trace.NewSpanWithRemoteParent(
|
ctx, span0 := trace.StartSpanWithRemoteParent(
|
||||||
|
ctx,
|
||||||
"span0",
|
"span0",
|
||||||
trace.SpanContext{
|
trace.SpanContext{
|
||||||
TraceID: traceID,
|
TraceID: traceID,
|
||||||
SpanID: spanID,
|
SpanID: spanID,
|
||||||
TraceOptions: 1,
|
TraceOptions: 1,
|
||||||
},
|
},
|
||||||
trace.StartOptions{})
|
)
|
||||||
ctx := trace.WithSpan(context.Background(), span0)
|
|
||||||
{
|
{
|
||||||
ctx1, span1 := trace.StartSpan(ctx, "span1")
|
ctx1, span1 := trace.StartSpan(ctx, "span1")
|
||||||
{
|
{
|
||||||
@@ -289,7 +291,7 @@ func TestExportTrace(t *testing.T) {
|
|||||||
Links: &tracepb.Span_Links{
|
Links: &tracepb.Span_Links{
|
||||||
Link: []*tracepb.Span_Link{
|
Link: []*tracepb.Span_Link{
|
||||||
{
|
{
|
||||||
TraceId: "projects/testproject/traces/01020000000000000000000000000000",
|
TraceId: "01020000000000000000000000000000",
|
||||||
SpanId: "0300000000000000",
|
SpanId: "0300000000000000",
|
||||||
Type: tracepb.Span_Link_PARENT_LINKED_SPAN,
|
Type: tracepb.Span_Link_PARENT_LINKED_SPAN,
|
||||||
Attributes: &tracepb.Span_Attributes{
|
Attributes: &tracepb.Span_Attributes{
|
||||||
|
|||||||
3
vendor/go.opencensus.io/exporter/stackdriver/trace_test.go
generated
vendored
3
vendor/go.opencensus.io/exporter/stackdriver/trace_test.go
generated
vendored
@@ -35,9 +35,8 @@ func TestBundling(t *testing.T) {
|
|||||||
}
|
}
|
||||||
trace.RegisterExporter(exporter)
|
trace.RegisterExporter(exporter)
|
||||||
|
|
||||||
trace.SetDefaultSampler(trace.AlwaysSample())
|
|
||||||
for i := 0; i < 35; i++ {
|
for i := 0; i < 35; i++ {
|
||||||
_, span := trace.StartSpan(context.Background(), "span")
|
_, span := trace.StartSpan(context.Background(), "span", trace.WithSampler(trace.AlwaysSample()))
|
||||||
span.End()
|
span.End()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
2
vendor/go.opencensus.io/exporter/zipkin/example/main.go
generated
vendored
2
vendor/go.opencensus.io/exporter/zipkin/example/main.go
generated
vendored
@@ -42,7 +42,7 @@ func main() {
|
|||||||
trace.RegisterExporter(exporter)
|
trace.RegisterExporter(exporter)
|
||||||
|
|
||||||
// For example purposes, sample every trace.
|
// For example purposes, sample every trace.
|
||||||
trace.SetDefaultSampler(trace.AlwaysSample())
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
foo(ctx)
|
foo(ctx)
|
||||||
|
|||||||
4
vendor/go.opencensus.io/internal/internal.go
generated
vendored
4
vendor/go.opencensus.io/internal/internal.go
generated
vendored
@@ -12,13 +12,13 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package internal
|
package internal // import "go.opencensus.io/internal"
|
||||||
|
|
||||||
import "time"
|
import "time"
|
||||||
|
|
||||||
// UserAgent is the user agent to be added to the outgoing
|
// UserAgent is the user agent to be added to the outgoing
|
||||||
// requests from the exporters.
|
// requests from the exporters.
|
||||||
const UserAgent = "opencensus-go-v0.4.0"
|
const UserAgent = "opencensus-go [0.8.0]"
|
||||||
|
|
||||||
// MonotonicEndTime returns the end time at present
|
// MonotonicEndTime returns the end time at present
|
||||||
// but offset from start, monotonically.
|
// but offset from start, monotonically.
|
||||||
|
|||||||
2
vendor/go.opencensus.io/internal/readme/README.md
generated
vendored
2
vendor/go.opencensus.io/internal/readme/README.md
generated
vendored
@@ -2,5 +2,5 @@ Use the following commands to regenerate the README.
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ go get github.com/rakyll/embedmd
|
$ go get github.com/rakyll/embedmd
|
||||||
$ embedmd source.md > ../../README.md
|
$ embedmd -w ../../README.md
|
||||||
```
|
```
|
||||||
|
|||||||
141
vendor/go.opencensus.io/internal/readme/source.md
generated
vendored
141
vendor/go.opencensus.io/internal/readme/source.md
generated
vendored
@@ -1,141 +0,0 @@
|
|||||||
# OpenCensus Libraries for Go
|
|
||||||
|
|
||||||
[![Build Status][travis-image]][travis-url]
|
|
||||||
[![Windows Build Status][appveyor-image]][appveyor-url]
|
|
||||||
[![GoDoc][godoc-image]][godoc-url]
|
|
||||||
[![Gitter chat][gitter-image]][gitter-url]
|
|
||||||
|
|
||||||
OpenCensus Go is a Go implementation of OpenCensus, a toolkit for
|
|
||||||
collecting application performance and behavior monitoring data.
|
|
||||||
Currently it consists of three major components: tags, stats, and tracing.
|
|
||||||
|
|
||||||
This project is still at a very early stage of development. The API is changing
|
|
||||||
rapidly, vendoring is recommended.
|
|
||||||
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
```
|
|
||||||
$ go get -u go.opencensus.io
|
|
||||||
```
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
OpenCensus Go libraries require Go 1.8 or later.
|
|
||||||
|
|
||||||
## Exporters
|
|
||||||
|
|
||||||
OpenCensus can export instrumentation data to various backends.
|
|
||||||
Currently, OpenCensus supports:
|
|
||||||
|
|
||||||
* [Prometheus][exporter-prom] for stats
|
|
||||||
* [OpenZipkin][exporter-zipkin] for traces
|
|
||||||
* Stackdriver [Monitoring][exporter-stackdriver] and [Trace][exporter-stackdriver]
|
|
||||||
* [Jaeger][exporter-jaeger] for traces
|
|
||||||
* [AWS X-Ray][exporter-xray] for traces
|
|
||||||
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
In a microservices environment, a user request may go through
|
|
||||||
multiple services until there is a response. OpenCensus allows
|
|
||||||
you to instrument your services and collect diagnostics data all
|
|
||||||
through your services end-to-end.
|
|
||||||
|
|
||||||
Start with instrumenting HTTP and gRPC clients and servers,
|
|
||||||
then add additional custom instrumentation if needed.
|
|
||||||
|
|
||||||
* [HTTP guide](https://github.com/census-instrumentation/opencensus-go/tree/master/examples/http)
|
|
||||||
* [gRPC guide](https://github.com/census-instrumentation/opencensus-go/tree/master/examples/grpc)
|
|
||||||
|
|
||||||
|
|
||||||
## Tags
|
|
||||||
|
|
||||||
Tags represent propagated key-value pairs. They are propagated using context.Context
|
|
||||||
in the same process or can be encoded to be transmitted on the wire and decoded back
|
|
||||||
to a tag.Map at the destination.
|
|
||||||
|
|
||||||
Package tag provides a builder to create tag maps and put it
|
|
||||||
into the current context.
|
|
||||||
To propagate a tag map to downstream methods and RPCs, New
|
|
||||||
will add the produced tag map to the current context.
|
|
||||||
If there is already a tag map in the current context, it will be replaced.
|
|
||||||
|
|
||||||
[embedmd]:# (tags.go new)
|
|
||||||
|
|
||||||
## Stats
|
|
||||||
|
|
||||||
OpenCensus is a low-overhead framework even if instrumentation is always enabled.
|
|
||||||
In order to be so, it is optimized to make recording of data points fast
|
|
||||||
and separate from the data aggregation.
|
|
||||||
|
|
||||||
OpenCensus stats collection happens in two stages:
|
|
||||||
|
|
||||||
* Definition of measures and recording of data points
|
|
||||||
* Definition of views and aggregation of the recorded data
|
|
||||||
|
|
||||||
### Recording
|
|
||||||
|
|
||||||
Measurements are data points associated with a measure.
|
|
||||||
Recording implicitly tags the set of Measurements with the tags from the
|
|
||||||
provided context:
|
|
||||||
|
|
||||||
[embedmd]:# (stats.go record)
|
|
||||||
|
|
||||||
### Views
|
|
||||||
|
|
||||||
Views are how Measures are aggregated. You can think of them as queries over the
|
|
||||||
set of recorded data points (measurements).
|
|
||||||
|
|
||||||
Views have two parts: the tags to group by and the aggregation type used.
|
|
||||||
|
|
||||||
Currently four types of aggregations are supported:
|
|
||||||
* CountAggregation is used to count the number of times a sample was recorded.
|
|
||||||
* DistributionAggregation is used to provide a histogram of the values of the samples.
|
|
||||||
* SumAggregation is used to sum up all sample values.
|
|
||||||
* MeanAggregation is used to calculate the mean of sample values.
|
|
||||||
|
|
||||||
[embedmd]:# (stats.go aggs)
|
|
||||||
|
|
||||||
Here we create a view with the DistributionAggregation over our measure.
|
|
||||||
|
|
||||||
[embedmd]:# (stats.go view)
|
|
||||||
|
|
||||||
Subscribe begins collecting data for the view. Subscribed views' data will be
|
|
||||||
exported via the registered exporters.
|
|
||||||
|
|
||||||
## Traces
|
|
||||||
|
|
||||||
[embedmd]:# (trace.go startend)
|
|
||||||
|
|
||||||
## Profiles
|
|
||||||
|
|
||||||
OpenCensus tags can be applied as profiler labels
|
|
||||||
for users who are on Go 1.9 and above.
|
|
||||||
|
|
||||||
[embedmd]:# (tags.go profiler)
|
|
||||||
|
|
||||||
A screenshot of the CPU profile from the program above:
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master
|
|
||||||
[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go
|
|
||||||
[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true
|
|
||||||
[appveyor-url]: https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/branch/master
|
|
||||||
[godoc-image]: https://godoc.org/go.opencensus.io?status.svg
|
|
||||||
[godoc-url]: https://godoc.org/go.opencensus.io
|
|
||||||
[gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg
|
|
||||||
[gitter-url]: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
|
|
||||||
|
|
||||||
|
|
||||||
[new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap
|
|
||||||
[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace
|
|
||||||
|
|
||||||
[exporter-prom]: https://godoc.org/go.opencensus.io/exporter/prometheus
|
|
||||||
[exporter-stackdriver]: https://godoc.org/go.opencensus.io/exporter/stackdriver
|
|
||||||
[exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin
|
|
||||||
[exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger
|
|
||||||
[exporter-xray]: https://github.com/census-instrumentation/opencensus-go-exporter-aws
|
|
||||||
17
vendor/go.opencensus.io/internal/readme/stats.go
generated
vendored
17
vendor/go.opencensus.io/internal/readme/stats.go
generated
vendored
@@ -13,7 +13,7 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// Package readme generates the README.
|
// Package readme generates the README.
|
||||||
package readme
|
package readme // import "go.opencensus.io/internal/readme"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@@ -29,27 +29,18 @@ import (
|
|||||||
func statsExamples() {
|
func statsExamples() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
videoSize, err := stats.Int64("my.org/video_size", "processed video size", "MB")
|
videoSize := stats.Int64("my.org/video_size", "processed video size", "MB")
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
m := stats.FindMeasure("my.org/video_size")
|
|
||||||
if m == nil {
|
|
||||||
log.Fatalln("measure not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
// START aggs
|
// START aggs
|
||||||
distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32)
|
distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32)
|
||||||
countAgg := view.Count()
|
countAgg := view.Count()
|
||||||
sumAgg := view.Sum()
|
sumAgg := view.Sum()
|
||||||
meanAgg := view.Mean()
|
|
||||||
// END aggs
|
// END aggs
|
||||||
|
|
||||||
_, _, _, _ = distAgg, countAgg, sumAgg, meanAgg
|
_, _, _ = distAgg, countAgg, sumAgg
|
||||||
|
|
||||||
// START view
|
// START view
|
||||||
if err = view.Subscribe(&view.View{
|
if err := view.Register(&view.View{
|
||||||
Name: "my.org/video_size_distribution",
|
Name: "my.org/video_size_distribution",
|
||||||
Description: "distribution of processed video size over time",
|
Description: "distribution of processed video size over time",
|
||||||
Measure: videoSize,
|
Measure: videoSize,
|
||||||
|
|||||||
2
vendor/go.opencensus.io/internal/tagencoding/tagencoding.go
generated
vendored
2
vendor/go.opencensus.io/internal/tagencoding/tagencoding.go
generated
vendored
@@ -15,7 +15,7 @@
|
|||||||
|
|
||||||
// Package tagencoding contains the tag encoding
|
// Package tagencoding contains the tag encoding
|
||||||
// used interally by the stats collector.
|
// used interally by the stats collector.
|
||||||
package tagencoding
|
package tagencoding // import "go.opencensus.io/internal/tagencoding"
|
||||||
|
|
||||||
type Values struct {
|
type Values struct {
|
||||||
Buffer []byte
|
Buffer []byte
|
||||||
|
|||||||
2
vendor/go.opencensus.io/internal/testpb/test.pb.go
generated
vendored
2
vendor/go.opencensus.io/internal/testpb/test.pb.go
generated
vendored
@@ -11,7 +11,7 @@ It has these top-level messages:
|
|||||||
FooRequest
|
FooRequest
|
||||||
FooResponse
|
FooResponse
|
||||||
*/
|
*/
|
||||||
package testpb
|
package testpb // import "go.opencensus.io/internal/testpb"
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
import proto "github.com/golang/protobuf/proto"
|
||||||
import fmt "fmt"
|
import fmt "fmt"
|
||||||
|
|||||||
65
vendor/go.opencensus.io/plugin/ocgrpc/benchmark_test.go
generated
vendored
Normal file
65
vendor/go.opencensus.io/plugin/ocgrpc/benchmark_test.go
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package ocgrpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BenchmarkStatusCodeToString_OK(b *testing.B) {
|
||||||
|
st := status.New(codes.OK, "OK")
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s := statusCodeToString(st)
|
||||||
|
_ = s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkStatusCodeToString_Unauthenticated(b *testing.B) {
|
||||||
|
st := status.New(codes.Unauthenticated, "Unauthenticated")
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s := statusCodeToString(st)
|
||||||
|
_ = s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var codeToStringMap = map[codes.Code]string{
|
||||||
|
codes.OK: "OK",
|
||||||
|
codes.Canceled: "CANCELLED",
|
||||||
|
codes.Unknown: "UNKNOWN",
|
||||||
|
codes.InvalidArgument: "INVALID_ARGUMENT",
|
||||||
|
codes.DeadlineExceeded: "DEADLINE_EXCEEDED",
|
||||||
|
codes.NotFound: "NOT_FOUND",
|
||||||
|
codes.AlreadyExists: "ALREADY_EXISTS",
|
||||||
|
codes.PermissionDenied: "PERMISSION_DENIED",
|
||||||
|
codes.ResourceExhausted: "RESOURCE_EXHAUSTED",
|
||||||
|
codes.FailedPrecondition: "FAILED_PRECONDITION",
|
||||||
|
codes.Aborted: "ABORTED",
|
||||||
|
codes.OutOfRange: "OUT_OF_RANGE",
|
||||||
|
codes.Unimplemented: "UNIMPLEMENTED",
|
||||||
|
codes.Internal: "INTERNAL",
|
||||||
|
codes.Unavailable: "UNAVAILABLE",
|
||||||
|
codes.DataLoss: "DATA_LOSS",
|
||||||
|
codes.Unauthenticated: "UNAUTHENTICATED",
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkMapAlternativeImpl_OK(b *testing.B) {
|
||||||
|
st := status.New(codes.OK, "OK")
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
_ = codeToStringMap[st.Code()]
|
||||||
|
}
|
||||||
|
}
|
||||||
2
vendor/go.opencensus.io/plugin/ocgrpc/client.go
generated
vendored
2
vendor/go.opencensus.io/plugin/ocgrpc/client.go
generated
vendored
@@ -44,7 +44,7 @@ func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) con
|
|||||||
// HandleRPC implements per-RPC tracing and stats instrumentation.
|
// HandleRPC implements per-RPC tracing and stats instrumentation.
|
||||||
func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
|
func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
|
||||||
traceHandleRPC(ctx, rs)
|
traceHandleRPC(ctx, rs)
|
||||||
c.statsHandleRPC(ctx, rs)
|
statsHandleRPC(ctx, rs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TagRPC implements per-RPC context management.
|
// TagRPC implements per-RPC context management.
|
||||||
|
|||||||
111
vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go
generated
vendored
111
vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go
generated
vendored
@@ -23,14 +23,12 @@ import (
|
|||||||
|
|
||||||
// The following variables are measures are recorded by ClientHandler:
|
// The following variables are measures are recorded by ClientHandler:
|
||||||
var (
|
var (
|
||||||
ClientErrorCount, _ = stats.Int64("grpc.io/client/error_count", "RPC Errors", stats.UnitNone)
|
ClientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless)
|
||||||
ClientRequestBytes, _ = stats.Int64("grpc.io/client/request_bytes", "Request bytes", stats.UnitBytes)
|
ClientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes)
|
||||||
ClientResponseBytes, _ = stats.Int64("grpc.io/client/response_bytes", "Response bytes", stats.UnitBytes)
|
ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless)
|
||||||
ClientStartedCount, _ = stats.Int64("grpc.io/client/started_count", "Number of client RPCs (streams) started", stats.UnitNone)
|
ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes)
|
||||||
ClientFinishedCount, _ = stats.Int64("grpc.io/client/finished_count", "Number of client RPCs (streams) finished", stats.UnitNone)
|
ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds)
|
||||||
ClientRequestCount, _ = stats.Int64("grpc.io/client/request_count", "Number of client RPC request messages", stats.UnitNone)
|
ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds)
|
||||||
ClientResponseCount, _ = stats.Int64("grpc.io/client/response_count", "Number of client RPC response messages", stats.UnitNone)
|
|
||||||
ClientRoundTripLatency, _ = stats.Float64("grpc.io/client/roundtrip_latency", "RPC roundtrip latency in msecs", stats.UnitMilliseconds)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Predefined views may be subscribed to collect data for the above measures.
|
// Predefined views may be subscribed to collect data for the above measures.
|
||||||
@@ -38,63 +36,78 @@ var (
|
|||||||
// package. These are declared as a convenience only; none are subscribed by
|
// package. These are declared as a convenience only; none are subscribed by
|
||||||
// default.
|
// default.
|
||||||
var (
|
var (
|
||||||
ClientErrorCountView = &view.View{
|
ClientSentBytesPerRPCView = &view.View{
|
||||||
Name: "grpc.io/client/error_count",
|
Measure: ClientSentBytesPerRPC,
|
||||||
Description: "RPC Errors",
|
Name: "grpc.io/client/sent_bytes_per_rpc",
|
||||||
TagKeys: []tag.Key{KeyStatus, KeyMethod},
|
Description: "Distribution of bytes sent per RPC, by method.",
|
||||||
Measure: ClientErrorCount,
|
TagKeys: []tag.Key{KeyClientMethod},
|
||||||
Aggregation: view.Mean(),
|
Aggregation: DefaultBytesDistribution,
|
||||||
}
|
}
|
||||||
|
|
||||||
ClientRoundTripLatencyView = &view.View{
|
ClientReceivedBytesPerRPCView = &view.View{
|
||||||
|
Measure: ClientReceivedBytesPerRPC,
|
||||||
|
Name: "grpc.io/client/received_bytes_per_rpc",
|
||||||
|
Description: "Distribution of bytes received per RPC, by method.",
|
||||||
|
TagKeys: []tag.Key{KeyClientMethod},
|
||||||
|
Aggregation: DefaultBytesDistribution,
|
||||||
|
}
|
||||||
|
|
||||||
|
ClientRoundtripLatencyView = &view.View{
|
||||||
|
Measure: ClientRoundtripLatency,
|
||||||
Name: "grpc.io/client/roundtrip_latency",
|
Name: "grpc.io/client/roundtrip_latency",
|
||||||
Description: "Latency in msecs",
|
Description: "Distribution of round-trip latency, by method.",
|
||||||
TagKeys: []tag.Key{KeyMethod},
|
TagKeys: []tag.Key{KeyClientMethod},
|
||||||
Measure: ClientRoundTripLatency,
|
|
||||||
Aggregation: DefaultMillisecondsDistribution,
|
Aggregation: DefaultMillisecondsDistribution,
|
||||||
}
|
}
|
||||||
|
|
||||||
ClientRequestBytesView = &view.View{
|
ClientCompletedRPCsView = &view.View{
|
||||||
Name: "grpc.io/client/request_bytes",
|
Measure: ClientRoundtripLatency,
|
||||||
Description: "Request bytes",
|
Name: "grpc.io/client/completed_rpcs",
|
||||||
TagKeys: []tag.Key{KeyMethod},
|
Description: "Count of RPCs by method and status.",
|
||||||
Measure: ClientRequestBytes,
|
TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
|
||||||
Aggregation: DefaultBytesDistribution,
|
Aggregation: view.Count(),
|
||||||
}
|
}
|
||||||
|
|
||||||
ClientResponseBytesView = &view.View{
|
ClientSentMessagesPerRPCView = &view.View{
|
||||||
Name: "grpc.io/client/response_bytes",
|
Measure: ClientSentMessagesPerRPC,
|
||||||
Description: "Response bytes",
|
Name: "grpc.io/client/sent_messages_per_rpc",
|
||||||
TagKeys: []tag.Key{KeyMethod},
|
Description: "Distribution of sent messages count per RPC, by method.",
|
||||||
Measure: ClientResponseBytes,
|
TagKeys: []tag.Key{KeyClientMethod},
|
||||||
Aggregation: DefaultBytesDistribution,
|
Aggregation: DefaultMessageCountDistribution,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ClientReceivedMessagesPerRPCView = &view.View{
|
||||||
|
Measure: ClientReceivedMessagesPerRPC,
|
||||||
|
Name: "grpc.io/client/received_messages_per_rpc",
|
||||||
|
Description: "Distribution of received messages count per RPC, by method.",
|
||||||
|
TagKeys: []tag.Key{KeyClientMethod},
|
||||||
|
Aggregation: DefaultMessageCountDistribution,
|
||||||
|
}
|
||||||
|
|
||||||
|
ClientServerLatencyView = &view.View{
|
||||||
|
Measure: ClientServerLatency,
|
||||||
|
Name: "grpc.io/client/server_latency",
|
||||||
|
Description: "Distribution of server latency as viewed by client, by method.",
|
||||||
|
TagKeys: []tag.Key{KeyClientMethod},
|
||||||
|
Aggregation: DefaultMillisecondsDistribution,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: This view is going to be removed, if you need it please define it
|
||||||
|
// yourself.
|
||||||
ClientRequestCountView = &view.View{
|
ClientRequestCountView = &view.View{
|
||||||
Name: "grpc.io/client/request_count",
|
Name: "Count of request messages per client RPC",
|
||||||
Description: "Count of request messages per client RPC",
|
TagKeys: []tag.Key{KeyClientMethod},
|
||||||
TagKeys: []tag.Key{KeyMethod},
|
Measure: ClientRoundtripLatency,
|
||||||
Measure: ClientRequestCount,
|
Aggregation: view.Count(),
|
||||||
Aggregation: DefaultMessageCountDistribution,
|
|
||||||
}
|
|
||||||
|
|
||||||
ClientResponseCountView = &view.View{
|
|
||||||
Name: "grpc.io/client/response_count",
|
|
||||||
Description: "Count of response messages per client RPC",
|
|
||||||
TagKeys: []tag.Key{KeyMethod},
|
|
||||||
Measure: ClientResponseCount,
|
|
||||||
Aggregation: DefaultMessageCountDistribution,
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// DefaultClientViews are the default client views provided by this package.
|
// DefaultClientViews are the default client views provided by this package.
|
||||||
var DefaultClientViews = []*view.View{
|
var DefaultClientViews = []*view.View{
|
||||||
ClientErrorCountView,
|
ClientSentBytesPerRPCView,
|
||||||
ClientRoundTripLatencyView,
|
ClientReceivedBytesPerRPCView,
|
||||||
ClientRequestBytesView,
|
ClientRoundtripLatencyView,
|
||||||
ClientResponseBytesView,
|
ClientCompletedRPCsView,
|
||||||
ClientRequestCountView,
|
|
||||||
ClientResponseCountView,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count.
|
// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count.
|
||||||
|
|||||||
71
vendor/go.opencensus.io/plugin/ocgrpc/client_metrics_test.go
generated
vendored
71
vendor/go.opencensus.io/plugin/ocgrpc/client_metrics_test.go
generated
vendored
@@ -1,71 +0,0 @@
|
|||||||
// Copyright 2017, OpenCensus Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
//
|
|
||||||
|
|
||||||
package ocgrpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"go.opencensus.io/stats/view"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestViewsAggregationsConform(t *testing.T) {
|
|
||||||
// See Issue https://github.com/census-instrumentation/opencensus-go/issues/210.
|
|
||||||
// This test ensures that the types of our Views match up with those
|
|
||||||
// from the Java reference at
|
|
||||||
// https://github.com/census-instrumentation/opencensus-java/blob/2b464864e3dd3f80e8e4c9dc72fccc225444a939/contrib/grpc_metrics/src/main/java/io/opencensus/contrib/grpc/metrics/RpcViewConstants.java#L113-L658
|
|
||||||
// Add any other defined views to be type checked during tests to ensure we don't regress.
|
|
||||||
|
|
||||||
assertTypeOf := func(v *view.View, wantSample *view.Aggregation) {
|
|
||||||
aggregation := v.Aggregation
|
|
||||||
gotValue := reflect.ValueOf(aggregation)
|
|
||||||
wantValue := reflect.ValueOf(wantSample)
|
|
||||||
if gotValue.Type() != wantValue.Type() {
|
|
||||||
_, _, line, _ := runtime.Caller(1)
|
|
||||||
t.Errorf("Item on line: %d got %T want %T", line, aggregation, wantSample)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assertTypeOf(ClientErrorCountView, view.Mean())
|
|
||||||
assertTypeOf(ClientRoundTripLatencyView, view.Distribution())
|
|
||||||
assertTypeOf(ClientRequestBytesView, view.Distribution())
|
|
||||||
assertTypeOf(ClientResponseBytesView, view.Distribution())
|
|
||||||
assertTypeOf(ClientRequestCountView, view.Distribution())
|
|
||||||
assertTypeOf(ClientResponseCountView, view.Distribution())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStrictViewNames(t *testing.T) {
|
|
||||||
alreadySeen := make(map[string]int)
|
|
||||||
assertName := func(v *view.View, want string) {
|
|
||||||
_, _, line, _ := runtime.Caller(1)
|
|
||||||
if prevLine, ok := alreadySeen[v.Name]; ok {
|
|
||||||
t.Errorf("Item's Name on line %d was already used on line %d", line, prevLine)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if got := v.Name; got != want {
|
|
||||||
t.Errorf("Item on line: %d got %q want %q", line, got, want)
|
|
||||||
}
|
|
||||||
alreadySeen[v.Name] = line
|
|
||||||
}
|
|
||||||
|
|
||||||
assertName(ClientErrorCountView, "grpc.io/client/error_count")
|
|
||||||
assertName(ClientRoundTripLatencyView, "grpc.io/client/roundtrip_latency")
|
|
||||||
assertName(ClientRequestBytesView, "grpc.io/client/request_bytes")
|
|
||||||
assertName(ClientResponseBytesView, "grpc.io/client/response_bytes")
|
|
||||||
assertName(ClientRequestCountView, "grpc.io/client/request_count")
|
|
||||||
assertName(ClientResponseCountView, "grpc.io/client/response_count")
|
|
||||||
}
|
|
||||||
152
vendor/go.opencensus.io/plugin/ocgrpc/client_spec_test.go
generated
vendored
Normal file
152
vendor/go.opencensus.io/plugin/ocgrpc/client_spec_test.go
generated
vendored
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package ocgrpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"go.opencensus.io/stats"
|
||||||
|
"go.opencensus.io/stats/view"
|
||||||
|
)
|
||||||
|
|
||||||
|
var colSep = regexp.MustCompile(`\s*\|\s*`)
|
||||||
|
|
||||||
|
func TestSpecClientMeasures(t *testing.T) {
|
||||||
|
spec := `
|
||||||
|
| Measure name | Unit | Description |
|
||||||
|
|------------------------------------------|------|-----------------------------------------------------------------------------------------------|
|
||||||
|
| grpc.io/client/sent_messages_per_rpc | 1 | Number of messages sent in the RPC (always 1 for non-streaming RPCs). |
|
||||||
|
| grpc.io/client/sent_bytes_per_rpc | By | Total bytes sent across all request messages per RPC. |
|
||||||
|
| grpc.io/client/received_messages_per_rpc | 1 | Number of response messages received per RPC (always 1 for non-streaming RPCs). |
|
||||||
|
| grpc.io/client/received_bytes_per_rpc | By | Total bytes received across all response messages per RPC. |
|
||||||
|
| grpc.io/client/roundtrip_latency | ms | Time between first byte of request sent to last byte of response received, or terminal error. |
|
||||||
|
| grpc.io/client/server_latency | ms | Propagated from the server and should have the same value as "grpc.io/server/latency". |`
|
||||||
|
|
||||||
|
lines := strings.Split(spec, "\n")[3:]
|
||||||
|
type measureDef struct {
|
||||||
|
name string
|
||||||
|
unit string
|
||||||
|
desc string
|
||||||
|
}
|
||||||
|
measureDefs := make([]measureDef, 0, len(lines))
|
||||||
|
for _, line := range lines {
|
||||||
|
cols := colSep.Split(line, -1)[1:]
|
||||||
|
if len(cols) < 3 {
|
||||||
|
t.Fatalf("Invalid config line %#v", cols)
|
||||||
|
}
|
||||||
|
measureDefs = append(measureDefs, measureDef{cols[0], cols[1], cols[2]})
|
||||||
|
}
|
||||||
|
|
||||||
|
gotMeasures := []stats.Measure{
|
||||||
|
ClientSentMessagesPerRPC,
|
||||||
|
ClientSentBytesPerRPC,
|
||||||
|
ClientReceivedMessagesPerRPC,
|
||||||
|
ClientReceivedBytesPerRPC,
|
||||||
|
ClientRoundtripLatency,
|
||||||
|
ClientServerLatency,
|
||||||
|
}
|
||||||
|
|
||||||
|
if got, want := len(gotMeasures), len(measureDefs); got != want {
|
||||||
|
t.Fatalf("len(gotMeasures) = %d; want %d", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, m := range gotMeasures {
|
||||||
|
defn := measureDefs[i]
|
||||||
|
if got, want := m.Name(), defn.name; got != want {
|
||||||
|
t.Errorf("Name = %q; want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := m.Unit(), defn.unit; got != want {
|
||||||
|
t.Errorf("%q: Unit = %q; want %q", defn.name, got, want)
|
||||||
|
}
|
||||||
|
if got, want := m.Description(), defn.desc; got != want {
|
||||||
|
t.Errorf("%q: Description = %q; want %q", defn.name, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSpecClientViews(t *testing.T) {
|
||||||
|
defaultViewsSpec := `
|
||||||
|
| View name | Measure suffix | Aggregation | Tags |
|
||||||
|
|---------------------------------------|------------------------|--------------|------------------------------|
|
||||||
|
| grpc.io/client/sent_bytes_per_rpc | sent_bytes_per_rpc | distribution | client_method |
|
||||||
|
| grpc.io/client/received_bytes_per_rpc | received_bytes_per_rpc | distribution | client_method |
|
||||||
|
| grpc.io/client/roundtrip_latency | roundtrip_latency | distribution | client_method |
|
||||||
|
| grpc.io/client/completed_rpcs | roundtrip_latency | count | client_method, client_status |`
|
||||||
|
|
||||||
|
extraViewsSpec := `
|
||||||
|
| View name | Measure suffix | Aggregation | Tags suffix |
|
||||||
|
|------------------------------------------|---------------------------|--------------|---------------|
|
||||||
|
| grpc.io/client/sent_messages_per_rpc | sent_messages_per_rpc | distribution | client_method |
|
||||||
|
| grpc.io/client/received_messages_per_rpc | received_messages_per_rpc | distribution | client_method |
|
||||||
|
| grpc.io/client/server_latency | server_latency | distribution | client_method |`
|
||||||
|
|
||||||
|
lines := strings.Split(defaultViewsSpec, "\n")[3:]
|
||||||
|
lines = append(lines, strings.Split(extraViewsSpec, "\n")[3:]...)
|
||||||
|
type viewDef struct {
|
||||||
|
name string
|
||||||
|
measureSuffix string
|
||||||
|
aggregation string
|
||||||
|
tags string
|
||||||
|
}
|
||||||
|
viewDefs := make([]viewDef, 0, len(lines))
|
||||||
|
for _, line := range lines {
|
||||||
|
cols := colSep.Split(line, -1)[1:]
|
||||||
|
if len(cols) < 4 {
|
||||||
|
t.Fatalf("Invalid config line %#v", cols)
|
||||||
|
}
|
||||||
|
viewDefs = append(viewDefs, viewDef{cols[0], cols[1], cols[2], cols[3]})
|
||||||
|
}
|
||||||
|
|
||||||
|
views := DefaultClientViews
|
||||||
|
views = append(views, ClientSentMessagesPerRPCView, ClientReceivedMessagesPerRPCView, ClientServerLatencyView)
|
||||||
|
|
||||||
|
if got, want := len(views), len(viewDefs); got != want {
|
||||||
|
t.Fatalf("len(gotMeasures) = %d; want %d", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, v := range views {
|
||||||
|
defn := viewDefs[i]
|
||||||
|
if got, want := v.Name, defn.name; got != want {
|
||||||
|
t.Errorf("Name = %q; want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := v.Measure.Name(), "grpc.io/client/"+defn.measureSuffix; got != want {
|
||||||
|
t.Errorf("%q: Measure.Name = %q; want %q", defn.name, got, want)
|
||||||
|
}
|
||||||
|
switch v.Aggregation.Type {
|
||||||
|
case view.AggTypeDistribution:
|
||||||
|
if got, want := "distribution", defn.aggregation; got != want {
|
||||||
|
t.Errorf("%q: Description = %q; want %q", defn.name, got, want)
|
||||||
|
}
|
||||||
|
case view.AggTypeCount:
|
||||||
|
if got, want := "count", defn.aggregation; got != want {
|
||||||
|
t.Errorf("%q: Description = %q; want %q", defn.name, got, want)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
t.Errorf("Invalid aggregation type")
|
||||||
|
}
|
||||||
|
wantTags := strings.Split(defn.tags, ", ")
|
||||||
|
if got, want := len(v.TagKeys), len(wantTags); got != want {
|
||||||
|
t.Errorf("len(TagKeys) = %d; want %d", got, want)
|
||||||
|
}
|
||||||
|
for j := range wantTags {
|
||||||
|
if got, want := v.TagKeys[j].Name(), "grpc_"+wantTags[j]; got != want {
|
||||||
|
t.Errorf("TagKeys[%d].Name() = %q; want %q", j, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
81
vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go
generated
vendored
81
vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go
generated
vendored
@@ -16,15 +16,12 @@
|
|||||||
package ocgrpc
|
package ocgrpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
ocstats "go.opencensus.io/stats"
|
|
||||||
"go.opencensus.io/tag"
|
"go.opencensus.io/tag"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
"google.golang.org/grpc/stats"
|
"google.golang.org/grpc/stats"
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// statsTagRPC gets the tag.Map populated by the application code, serializes
|
// statsTagRPC gets the tag.Map populated by the application code, serializes
|
||||||
@@ -48,81 +45,5 @@ func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo)
|
|||||||
ctx = stats.SetTags(ctx, encoded)
|
ctx = stats.SetTags(ctx, encoded)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(acetechnologist): should we be recording this later? What is the
|
return context.WithValue(ctx, rpcDataKey, d)
|
||||||
// point of updating d.reqLen & d.reqCount if we update now?
|
|
||||||
record(ctx, d, "", ClientStartedCount.M(1))
|
|
||||||
return context.WithValue(ctx, grpcClientRPCKey, d)
|
|
||||||
}
|
|
||||||
|
|
||||||
// statsHandleRPC processes the RPC events.
|
|
||||||
func (h *ClientHandler) statsHandleRPC(ctx context.Context, s stats.RPCStats) {
|
|
||||||
switch st := s.(type) {
|
|
||||||
case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer:
|
|
||||||
// do nothing for client
|
|
||||||
case *stats.OutPayload:
|
|
||||||
h.handleRPCOutPayload(ctx, st)
|
|
||||||
case *stats.InPayload:
|
|
||||||
h.handleRPCInPayload(ctx, st)
|
|
||||||
case *stats.End:
|
|
||||||
h.handleRPCEnd(ctx, st)
|
|
||||||
default:
|
|
||||||
grpclog.Infof("unexpected stats: %T", st)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *ClientHandler) handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) {
|
|
||||||
d, ok := ctx.Value(grpcClientRPCKey).(*rpcData)
|
|
||||||
if !ok {
|
|
||||||
if grpclog.V(2) {
|
|
||||||
grpclog.Infoln("clientHandler.handleRPCOutPayload failed to retrieve *rpcData from context")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
record(ctx, d, "", ClientRequestBytes.M(int64(s.Length)))
|
|
||||||
atomic.AddInt64(&d.reqCount, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *ClientHandler) handleRPCInPayload(ctx context.Context, s *stats.InPayload) {
|
|
||||||
d, ok := ctx.Value(grpcClientRPCKey).(*rpcData)
|
|
||||||
if !ok {
|
|
||||||
if grpclog.V(2) {
|
|
||||||
grpclog.Infoln("failed to retrieve *rpcData from context")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
record(ctx, d, "", ClientResponseBytes.M(int64(s.Length)))
|
|
||||||
atomic.AddInt64(&d.respCount, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *ClientHandler) handleRPCEnd(ctx context.Context, s *stats.End) {
|
|
||||||
d, ok := ctx.Value(grpcClientRPCKey).(*rpcData)
|
|
||||||
if !ok {
|
|
||||||
if grpclog.V(2) {
|
|
||||||
grpclog.Infoln("failed to retrieve *rpcData from context")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
elapsedTime := time.Since(d.startTime)
|
|
||||||
reqCount := atomic.LoadInt64(&d.reqCount)
|
|
||||||
respCount := atomic.LoadInt64(&d.respCount)
|
|
||||||
|
|
||||||
m := []ocstats.Measurement{
|
|
||||||
ClientRequestCount.M(reqCount),
|
|
||||||
ClientResponseCount.M(respCount),
|
|
||||||
ClientFinishedCount.M(1),
|
|
||||||
ClientRoundTripLatency.M(float64(elapsedTime) / float64(time.Millisecond)),
|
|
||||||
}
|
|
||||||
|
|
||||||
var st string
|
|
||||||
if s.Error != nil {
|
|
||||||
s, ok := status.FromError(s.Error)
|
|
||||||
if ok {
|
|
||||||
st = s.Code().String()
|
|
||||||
}
|
|
||||||
m = append(m, ClientErrorCount.M(1))
|
|
||||||
}
|
|
||||||
record(ctx, d, st, m...)
|
|
||||||
}
|
}
|
||||||
|
|||||||
111
vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler_test.go
generated
vendored
111
vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler_test.go
generated
vendored
@@ -58,8 +58,8 @@ func TestClientDefaultCollections(t *testing.T) {
|
|||||||
}
|
}
|
||||||
tcs := []testCase{
|
tcs := []testCase{
|
||||||
{
|
{
|
||||||
"1",
|
label: "1",
|
||||||
[]*rpc{
|
rpcs: []*rpc{
|
||||||
{
|
{
|
||||||
[]tagPair{{k1, "v1"}},
|
[]tagPair{{k1, "v1"}},
|
||||||
&stats.RPCTagInfo{FullMethodName: "/package.service/method"},
|
&stats.RPCTagInfo{FullMethodName: "/package.service/method"},
|
||||||
@@ -72,46 +72,46 @@ func TestClientDefaultCollections(t *testing.T) {
|
|||||||
&stats.End{Error: nil},
|
&stats.End{Error: nil},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
[]*wantData{
|
wants: []*wantData{
|
||||||
{
|
{
|
||||||
func() *view.View { return ClientRequestCountView },
|
func() *view.View { return ClientSentMessagesPerRPCView },
|
||||||
[]*view.Row{
|
[]*view.Row{
|
||||||
{
|
{
|
||||||
Tags: []tag.Tag{
|
Tags: []tag.Tag{
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
{Key: KeyClientMethod, Value: "package.service/method"},
|
||||||
},
|
},
|
||||||
Data: newDistributionData([]int64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0),
|
Data: newDistributionData([]int64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
func() *view.View { return ClientResponseCountView },
|
func() *view.View { return ClientReceivedMessagesPerRPCView },
|
||||||
[]*view.Row{
|
[]*view.Row{
|
||||||
{
|
{
|
||||||
Tags: []tag.Tag{
|
Tags: []tag.Tag{
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
{Key: KeyClientMethod, Value: "package.service/method"},
|
||||||
},
|
},
|
||||||
Data: newDistributionData([]int64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0),
|
Data: newDistributionData([]int64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
func() *view.View { return ClientRequestBytesView },
|
func() *view.View { return ClientSentBytesPerRPCView },
|
||||||
[]*view.Row{
|
[]*view.Row{
|
||||||
{
|
{
|
||||||
Tags: []tag.Tag{
|
Tags: []tag.Tag{
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
{Key: KeyClientMethod, Value: "package.service/method"},
|
||||||
},
|
},
|
||||||
Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0),
|
Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
func() *view.View { return ClientResponseBytesView },
|
func() *view.View { return ClientReceivedBytesPerRPCView },
|
||||||
[]*view.Row{
|
[]*view.Row{
|
||||||
{
|
{
|
||||||
Tags: []tag.Tag{
|
Tags: []tag.Tag{
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
{Key: KeyClientMethod, Value: "package.service/method"},
|
||||||
},
|
},
|
||||||
Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0),
|
Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0),
|
||||||
},
|
},
|
||||||
@@ -120,8 +120,8 @@ func TestClientDefaultCollections(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"2",
|
label: "2",
|
||||||
[]*rpc{
|
rpcs: []*rpc{
|
||||||
{
|
{
|
||||||
[]tagPair{{k1, "v1"}},
|
[]tagPair{{k1, "v1"}},
|
||||||
&stats.RPCTagInfo{FullMethodName: "/package.service/method"},
|
&stats.RPCTagInfo{FullMethodName: "/package.service/method"},
|
||||||
@@ -149,36 +149,24 @@ func TestClientDefaultCollections(t *testing.T) {
|
|||||||
&stats.End{Error: status.Error(codes.Canceled, "canceled")},
|
&stats.End{Error: status.Error(codes.Canceled, "canceled")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
[]*wantData{
|
wants: []*wantData{
|
||||||
{
|
{
|
||||||
func() *view.View { return ClientErrorCountView },
|
func() *view.View { return ClientSentMessagesPerRPCView },
|
||||||
[]*view.Row{
|
[]*view.Row{
|
||||||
{
|
{
|
||||||
Tags: []tag.Tag{
|
Tags: []tag.Tag{
|
||||||
{Key: KeyStatus, Value: "Canceled"},
|
{Key: KeyClientMethod, Value: "package.service/method"},
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
|
||||||
},
|
|
||||||
Data: newMeanData(1, 1),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
func() *view.View { return ClientRequestCountView },
|
|
||||||
[]*view.Row{
|
|
||||||
{
|
|
||||||
Tags: []tag.Tag{
|
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
|
||||||
},
|
},
|
||||||
Data: newDistributionData([]int64{0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 2, 3, 2.5, 0.5),
|
Data: newDistributionData([]int64{0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 2, 3, 2.5, 0.5),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
func() *view.View { return ClientResponseCountView },
|
func() *view.View { return ClientReceivedMessagesPerRPCView },
|
||||||
[]*view.Row{
|
[]*view.Row{
|
||||||
{
|
{
|
||||||
Tags: []tag.Tag{
|
Tags: []tag.Tag{
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
{Key: KeyClientMethod, Value: "package.service/method"},
|
||||||
},
|
},
|
||||||
Data: newDistributionData([]int64{0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 1, 2, 1.5, 0.5),
|
Data: newDistributionData([]int64{0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 1, 2, 1.5, 0.5),
|
||||||
},
|
},
|
||||||
@@ -187,8 +175,8 @@ func TestClientDefaultCollections(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"3",
|
label: "3",
|
||||||
[]*rpc{
|
rpcs: []*rpc{
|
||||||
{
|
{
|
||||||
[]tagPair{{k1, "v1"}},
|
[]tagPair{{k1, "v1"}},
|
||||||
&stats.RPCTagInfo{FullMethodName: "/package.service/method"},
|
&stats.RPCTagInfo{FullMethodName: "/package.service/method"},
|
||||||
@@ -229,67 +217,48 @@ func TestClientDefaultCollections(t *testing.T) {
|
|||||||
&stats.End{Error: status.Error(codes.Aborted, "aborted")},
|
&stats.End{Error: status.Error(codes.Aborted, "aborted")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
[]*wantData{
|
wants: []*wantData{
|
||||||
{
|
{
|
||||||
func() *view.View { return ClientErrorCountView },
|
func() *view.View { return ClientSentMessagesPerRPCView },
|
||||||
[]*view.Row{
|
[]*view.Row{
|
||||||
{
|
{
|
||||||
Tags: []tag.Tag{
|
Tags: []tag.Tag{
|
||||||
{Key: KeyStatus, Value: "Canceled"},
|
{Key: KeyClientMethod, Value: "package.service/method"},
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
|
||||||
},
|
|
||||||
Data: newMeanData(1, 1),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Tags: []tag.Tag{
|
|
||||||
{Key: KeyStatus, Value: "Aborted"},
|
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
|
||||||
},
|
|
||||||
Data: newMeanData(1, 1),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
func() *view.View { return ClientRequestCountView },
|
|
||||||
[]*view.Row{
|
|
||||||
{
|
|
||||||
Tags: []tag.Tag{
|
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
|
||||||
},
|
},
|
||||||
Data: newDistributionData([]int64{0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 2, 3, 2.666666666, 0.333333333*2),
|
Data: newDistributionData([]int64{0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 2, 3, 2.666666666, 0.333333333*2),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
func() *view.View { return ClientResponseCountView },
|
func() *view.View { return ClientReceivedMessagesPerRPCView },
|
||||||
[]*view.Row{
|
[]*view.Row{
|
||||||
{
|
{
|
||||||
Tags: []tag.Tag{
|
Tags: []tag.Tag{
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
{Key: KeyClientMethod, Value: "package.service/method"},
|
||||||
},
|
},
|
||||||
Data: newDistributionData([]int64{0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 2, 1.333333333, 0.333333333*2),
|
Data: newDistributionData([]int64{0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 2, 1.333333333, 0.333333333*2),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
func() *view.View { return ClientRequestBytesView },
|
func() *view.View { return ClientSentBytesPerRPCView },
|
||||||
[]*view.Row{
|
[]*view.Row{
|
||||||
{
|
{
|
||||||
Tags: []tag.Tag{
|
Tags: []tag.Tag{
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
{Key: KeyClientMethod, Value: "package.service/method"},
|
||||||
},
|
},
|
||||||
Data: newDistributionData([]int64{0, 1, 1, 1, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 1, 65536, 13696.125, 481423542.982143*7),
|
Data: newDistributionData([]int64{0, 0, 0, 0, 0, 2 /*16384*/, 1 /*65536*/, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 20480, 66561, 36523, 1.355519318e+09),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
func() *view.View { return ClientResponseBytesView },
|
func() *view.View { return ClientReceivedBytesPerRPCView },
|
||||||
[]*view.Row{
|
[]*view.Row{
|
||||||
{
|
{
|
||||||
Tags: []tag.Tag{
|
Tags: []tag.Tag{
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
{Key: KeyClientMethod, Value: "package.service/method"},
|
||||||
},
|
},
|
||||||
Data: newDistributionData([]int64{0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 1, 16384, 4864.25, 59678208.25*3),
|
Data: newDistributionData([]int64{0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 18432, 6485.666667, 2.1459558466666666e+08),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -297,16 +266,25 @@ func TestClientDefaultCollections(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
views := []*view.View{
|
||||||
|
ClientSentBytesPerRPCView,
|
||||||
|
ClientReceivedBytesPerRPCView,
|
||||||
|
ClientRoundtripLatencyView,
|
||||||
|
ClientCompletedRPCsView,
|
||||||
|
ClientSentMessagesPerRPCView,
|
||||||
|
ClientReceivedMessagesPerRPCView,
|
||||||
|
}
|
||||||
|
|
||||||
for _, tc := range tcs {
|
for _, tc := range tcs {
|
||||||
// Register views.
|
// Register views.
|
||||||
if err := view.Subscribe(DefaultClientViews...); err != nil {
|
if err := view.Register(views...); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
h := &ClientHandler{}
|
h := &ClientHandler{}
|
||||||
h.StartOptions.Sampler = trace.NeverSample()
|
h.StartOptions.Sampler = trace.NeverSample()
|
||||||
for _, rpc := range tc.rpcs {
|
for _, rpc := range tc.rpcs {
|
||||||
mods := []tag.Mutator{}
|
var mods []tag.Mutator
|
||||||
for _, t := range rpc.tags {
|
for _, t := range rpc.tags {
|
||||||
mods = append(mods, tag.Upsert(t.k, t.v))
|
mods = append(mods, tag.Upsert(t.k, t.v))
|
||||||
}
|
}
|
||||||
@@ -318,11 +296,14 @@ func TestClientDefaultCollections(t *testing.T) {
|
|||||||
ctx = stats.SetTags(context.Background(), encoded)
|
ctx = stats.SetTags(context.Background(), encoded)
|
||||||
ctx = h.TagRPC(ctx, rpc.tagInfo)
|
ctx = h.TagRPC(ctx, rpc.tagInfo)
|
||||||
for _, out := range rpc.outPayloads {
|
for _, out := range rpc.outPayloads {
|
||||||
|
out.Client = true
|
||||||
h.HandleRPC(ctx, out)
|
h.HandleRPC(ctx, out)
|
||||||
}
|
}
|
||||||
for _, in := range rpc.inPayloads {
|
for _, in := range rpc.inPayloads {
|
||||||
|
in.Client = true
|
||||||
h.HandleRPC(ctx, in)
|
h.HandleRPC(ctx, in)
|
||||||
}
|
}
|
||||||
|
rpc.end.Client = true
|
||||||
h.HandleRPC(ctx, rpc.end)
|
h.HandleRPC(ctx, rpc.end)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -349,7 +330,7 @@ func TestClientDefaultCollections(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Unregister views to cleanup.
|
// Unregister views to cleanup.
|
||||||
view.Unsubscribe(DefaultClientViews...)
|
view.Unregister(views...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
239
vendor/go.opencensus.io/plugin/ocgrpc/end_to_end_test.go
generated
vendored
Normal file
239
vendor/go.opencensus.io/plugin/ocgrpc/end_to_end_test.go
generated
vendored
Normal file
@@ -0,0 +1,239 @@
|
|||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package ocgrpc_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"go.opencensus.io/internal/testpb"
|
||||||
|
"go.opencensus.io/plugin/ocgrpc"
|
||||||
|
"go.opencensus.io/stats/view"
|
||||||
|
"go.opencensus.io/tag"
|
||||||
|
)
|
||||||
|
|
||||||
|
var keyAccountId, _ = tag.NewKey("account_id")
|
||||||
|
|
||||||
|
func TestEndToEnd_Single(t *testing.T) {
|
||||||
|
view.Register(ocgrpc.DefaultClientViews...)
|
||||||
|
defer view.Unregister(ocgrpc.DefaultClientViews...)
|
||||||
|
view.Register(ocgrpc.DefaultServerViews...)
|
||||||
|
defer view.Unregister(ocgrpc.DefaultServerViews...)
|
||||||
|
|
||||||
|
extraViews := []*view.View{
|
||||||
|
ocgrpc.ServerReceivedMessagesPerRPCView,
|
||||||
|
ocgrpc.ClientReceivedMessagesPerRPCView,
|
||||||
|
ocgrpc.ServerSentMessagesPerRPCView,
|
||||||
|
ocgrpc.ClientSentMessagesPerRPCView,
|
||||||
|
}
|
||||||
|
view.Register(extraViews...)
|
||||||
|
defer view.Unregister(extraViews...)
|
||||||
|
|
||||||
|
client, done := testpb.NewTestClient(t)
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
ctx, _ = tag.New(ctx, tag.Insert(keyAccountId, "abc123"))
|
||||||
|
|
||||||
|
var (
|
||||||
|
clientMethodTag = tag.Tag{Key: ocgrpc.KeyClientMethod, Value: "testpb.Foo/Single"}
|
||||||
|
serverMethodTag = tag.Tag{Key: ocgrpc.KeyServerMethod, Value: "testpb.Foo/Single"}
|
||||||
|
clientStatusOKTag = tag.Tag{Key: ocgrpc.KeyClientStatus, Value: "OK"}
|
||||||
|
serverStatusOKTag = tag.Tag{Key: ocgrpc.KeyServerStatus, Value: "OK"}
|
||||||
|
serverStatusUnknownTag = tag.Tag{Key: ocgrpc.KeyClientStatus, Value: "UNKNOWN"}
|
||||||
|
clientStatusUnknownTag = tag.Tag{Key: ocgrpc.KeyServerStatus, Value: "UNKNOWN"}
|
||||||
|
)
|
||||||
|
|
||||||
|
_, err := client.Single(ctx, &testpb.FooRequest{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
checkCount(t, ocgrpc.ClientCompletedRPCsView, 1, clientMethodTag, clientStatusOKTag)
|
||||||
|
checkCount(t, ocgrpc.ServerCompletedRPCsView, 1, serverMethodTag, serverStatusOKTag)
|
||||||
|
|
||||||
|
_, _ = client.Single(ctx, &testpb.FooRequest{Fail: true})
|
||||||
|
checkCount(t, ocgrpc.ClientCompletedRPCsView, 1, clientMethodTag, serverStatusUnknownTag)
|
||||||
|
checkCount(t, ocgrpc.ServerCompletedRPCsView, 1, serverMethodTag, clientStatusUnknownTag)
|
||||||
|
|
||||||
|
tcs := []struct {
|
||||||
|
v *view.View
|
||||||
|
tags []tag.Tag
|
||||||
|
mean float64
|
||||||
|
}{
|
||||||
|
{ocgrpc.ClientSentMessagesPerRPCView, []tag.Tag{clientMethodTag}, 1.0},
|
||||||
|
{ocgrpc.ServerReceivedMessagesPerRPCView, []tag.Tag{serverMethodTag}, 1.0},
|
||||||
|
{ocgrpc.ClientReceivedMessagesPerRPCView, []tag.Tag{clientMethodTag}, 0.5},
|
||||||
|
{ocgrpc.ServerSentMessagesPerRPCView, []tag.Tag{serverMethodTag}, 0.5},
|
||||||
|
{ocgrpc.ClientSentBytesPerRPCView, []tag.Tag{clientMethodTag}, 1.0},
|
||||||
|
{ocgrpc.ServerReceivedBytesPerRPCView, []tag.Tag{serverMethodTag}, 1.0},
|
||||||
|
{ocgrpc.ClientReceivedBytesPerRPCView, []tag.Tag{clientMethodTag}, 0.0},
|
||||||
|
{ocgrpc.ServerSentBytesPerRPCView, []tag.Tag{serverMethodTag}, 0.0},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tcs {
|
||||||
|
t.Run("view="+tt.v.Name, func(t *testing.T) {
|
||||||
|
dist := getDistribution(t, tt.v, tt.tags...)
|
||||||
|
if got, want := dist.Count, int64(2); got != want {
|
||||||
|
t.Errorf("Count = %d; want %d", got, want)
|
||||||
|
}
|
||||||
|
if got, want := dist.Mean, tt.mean; got != want {
|
||||||
|
t.Errorf("Mean = %v; want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEndToEnd_Stream(t *testing.T) {
|
||||||
|
view.Register(ocgrpc.DefaultClientViews...)
|
||||||
|
defer view.Unregister(ocgrpc.DefaultClientViews...)
|
||||||
|
view.Register(ocgrpc.DefaultServerViews...)
|
||||||
|
defer view.Unregister(ocgrpc.DefaultServerViews...)
|
||||||
|
|
||||||
|
extraViews := []*view.View{
|
||||||
|
ocgrpc.ServerReceivedMessagesPerRPCView,
|
||||||
|
ocgrpc.ClientReceivedMessagesPerRPCView,
|
||||||
|
ocgrpc.ServerSentMessagesPerRPCView,
|
||||||
|
ocgrpc.ClientSentMessagesPerRPCView,
|
||||||
|
}
|
||||||
|
view.Register(extraViews...)
|
||||||
|
defer view.Unregister(extraViews...)
|
||||||
|
|
||||||
|
client, done := testpb.NewTestClient(t)
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
ctx, _ = tag.New(ctx, tag.Insert(keyAccountId, "abc123"))
|
||||||
|
|
||||||
|
var (
|
||||||
|
clientMethodTag = tag.Tag{Key: ocgrpc.KeyClientMethod, Value: "testpb.Foo/Multiple"}
|
||||||
|
serverMethodTag = tag.Tag{Key: ocgrpc.KeyServerMethod, Value: "testpb.Foo/Multiple"}
|
||||||
|
clientStatusOKTag = tag.Tag{Key: ocgrpc.KeyClientStatus, Value: "OK"}
|
||||||
|
serverStatusOKTag = tag.Tag{Key: ocgrpc.KeyServerStatus, Value: "OK"}
|
||||||
|
)
|
||||||
|
|
||||||
|
const msgCount = 3
|
||||||
|
|
||||||
|
stream, err := client.Multiple(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
for i := 0; i < msgCount; i++ {
|
||||||
|
stream.Send(&testpb.FooRequest{})
|
||||||
|
_, err := stream.Recv()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := stream.CloseSend(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, err = stream.Recv(); err != io.EOF {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
checkCount(t, ocgrpc.ClientCompletedRPCsView, 1, clientMethodTag, clientStatusOKTag)
|
||||||
|
checkCount(t, ocgrpc.ServerCompletedRPCsView, 1, serverMethodTag, serverStatusOKTag)
|
||||||
|
|
||||||
|
tcs := []struct {
|
||||||
|
v *view.View
|
||||||
|
tag tag.Tag
|
||||||
|
}{
|
||||||
|
{ocgrpc.ClientSentMessagesPerRPCView, clientMethodTag},
|
||||||
|
{ocgrpc.ServerReceivedMessagesPerRPCView, serverMethodTag},
|
||||||
|
{ocgrpc.ServerSentMessagesPerRPCView, serverMethodTag},
|
||||||
|
{ocgrpc.ClientReceivedMessagesPerRPCView, clientMethodTag},
|
||||||
|
}
|
||||||
|
for _, tt := range tcs {
|
||||||
|
serverSent := getDistribution(t, tt.v, tt.tag)
|
||||||
|
if got, want := serverSent.Mean, float64(msgCount); got != want {
|
||||||
|
t.Errorf("%q.Count = %v; want %v", ocgrpc.ServerSentMessagesPerRPCView.Name, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkCount(t *testing.T, v *view.View, want int64, tags ...tag.Tag) {
|
||||||
|
if got, ok := getCount(t, v, tags...); ok && got != want {
|
||||||
|
t.Errorf("View[name=%q].Row[tags=%v].Data = %d; want %d", v.Name, tags, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCount(t *testing.T, v *view.View, tags ...tag.Tag) (int64, bool) {
|
||||||
|
if len(tags) != len(v.TagKeys) {
|
||||||
|
t.Errorf("Invalid tag specification, want %#v tags got %#v", v.TagKeys, tags)
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
for i := range v.TagKeys {
|
||||||
|
if tags[i].Key != v.TagKeys[i] {
|
||||||
|
t.Errorf("Invalid tag specification, want %#v tags got %#v", v.TagKeys, tags)
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rows, err := view.RetrieveData(v.Name)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
var foundRow *view.Row
|
||||||
|
for _, row := range rows {
|
||||||
|
if reflect.DeepEqual(row.Tags, tags) {
|
||||||
|
foundRow = row
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if foundRow == nil {
|
||||||
|
var gotTags [][]tag.Tag
|
||||||
|
for _, row := range rows {
|
||||||
|
gotTags = append(gotTags, row.Tags)
|
||||||
|
}
|
||||||
|
t.Errorf("Failed to find row with keys %v among:\n%v", tags, gotTags)
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
return foundRow.Data.(*view.CountData).Value, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDistribution(t *testing.T, v *view.View, tags ...tag.Tag) *view.DistributionData {
|
||||||
|
if len(tags) != len(v.TagKeys) {
|
||||||
|
t.Fatalf("Invalid tag specification, want %#v tags got %#v", v.TagKeys, tags)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for i := range v.TagKeys {
|
||||||
|
if tags[i].Key != v.TagKeys[i] {
|
||||||
|
t.Fatalf("Invalid tag specification, want %#v tags got %#v", v.TagKeys, tags)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rows, err := view.RetrieveData(v.Name)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
var foundRow *view.Row
|
||||||
|
for _, row := range rows {
|
||||||
|
if reflect.DeepEqual(row.Tags, tags) {
|
||||||
|
foundRow = row
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if foundRow == nil {
|
||||||
|
var gotTags [][]tag.Tag
|
||||||
|
for _, row := range rows {
|
||||||
|
gotTags = append(gotTags, row.Tags)
|
||||||
|
}
|
||||||
|
t.Fatalf("Failed to find row with keys %v among:\n%v", tags, gotTags)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return foundRow.Data.(*view.DistributionData)
|
||||||
|
}
|
||||||
4
vendor/go.opencensus.io/plugin/ocgrpc/example_test.go
generated
vendored
4
vendor/go.opencensus.io/plugin/ocgrpc/example_test.go
generated
vendored
@@ -24,7 +24,7 @@ import (
|
|||||||
|
|
||||||
func ExampleClientHandler() {
|
func ExampleClientHandler() {
|
||||||
// Subscribe views to collect data.
|
// Subscribe views to collect data.
|
||||||
if err := view.Subscribe(ocgrpc.DefaultClientViews...); err != nil {
|
if err := view.Register(ocgrpc.DefaultClientViews...); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -39,7 +39,7 @@ func ExampleClientHandler() {
|
|||||||
|
|
||||||
func ExampleServerHandler() {
|
func ExampleServerHandler() {
|
||||||
// Subscribe to views to collect data.
|
// Subscribe to views to collect data.
|
||||||
if err := view.Subscribe(ocgrpc.DefaultServerViews...); err != nil {
|
if err := view.Register(ocgrpc.DefaultServerViews...); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
19
vendor/go.opencensus.io/plugin/ocgrpc/grpc_test.go
generated
vendored
19
vendor/go.opencensus.io/plugin/ocgrpc/grpc_test.go
generated
vendored
@@ -31,14 +31,12 @@ func TestClientHandler(t *testing.T) {
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
te := &traceExporter{}
|
te := &traceExporter{}
|
||||||
trace.RegisterExporter(te)
|
trace.RegisterExporter(te)
|
||||||
if err := ClientRequestCountView.Subscribe(); err != nil {
|
if err := view.Register(ClientSentMessagesPerRPCView); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer view.Unregister(ClientSentMessagesPerRPCView)
|
||||||
|
|
||||||
span := trace.NewSpan("/foo", nil, trace.StartOptions{
|
ctx, _ = trace.StartSpan(ctx, "/foo", trace.WithSampler(trace.AlwaysSample()))
|
||||||
Sampler: trace.AlwaysSample(),
|
|
||||||
})
|
|
||||||
ctx = trace.WithSpan(ctx, span)
|
|
||||||
|
|
||||||
var handler ClientHandler
|
var handler ClientHandler
|
||||||
ctx = handler.TagRPC(ctx, &stats.RPCTagInfo{
|
ctx = handler.TagRPC(ctx, &stats.RPCTagInfo{
|
||||||
@@ -53,7 +51,7 @@ func TestClientHandler(t *testing.T) {
|
|||||||
EndTime: time.Now(),
|
EndTime: time.Now(),
|
||||||
})
|
})
|
||||||
|
|
||||||
stats, err := view.RetrieveData(ClientRequestCountView.Name)
|
stats, err := view.RetrieveData(ClientSentMessagesPerRPCView.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -65,9 +63,6 @@ func TestClientHandler(t *testing.T) {
|
|||||||
if got, want := len(traces), 1; got != want {
|
if got, want := len(traces), 1; got != want {
|
||||||
t.Errorf("Got %v traces; want %v", got, want)
|
t.Errorf("Got %v traces; want %v", got, want)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleanup.
|
|
||||||
view.Unsubscribe(ClientErrorCountView)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServerHandler(t *testing.T) {
|
func TestServerHandler(t *testing.T) {
|
||||||
@@ -94,7 +89,7 @@ func TestServerHandler(t *testing.T) {
|
|||||||
|
|
||||||
te := &traceExporter{}
|
te := &traceExporter{}
|
||||||
trace.RegisterExporter(te)
|
trace.RegisterExporter(te)
|
||||||
if err := ServerRequestCountView.Subscribe(); err != nil {
|
if err := view.Register(ServerCompletedRPCsView); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -112,7 +107,7 @@ func TestServerHandler(t *testing.T) {
|
|||||||
EndTime: time.Now(),
|
EndTime: time.Now(),
|
||||||
})
|
})
|
||||||
|
|
||||||
rows, err := view.RetrieveData(ServerRequestCountView.Name)
|
rows, err := view.RetrieveData(ServerCompletedRPCsView.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -126,7 +121,7 @@ func TestServerHandler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Cleanup.
|
// Cleanup.
|
||||||
view.Unsubscribe(ServerRequestCountView)
|
view.Unregister(ServerCompletedRPCsView)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
2
vendor/go.opencensus.io/plugin/ocgrpc/server.go
generated
vendored
2
vendor/go.opencensus.io/plugin/ocgrpc/server.go
generated
vendored
@@ -69,7 +69,7 @@ func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) con
|
|||||||
// HandleRPC implements per-RPC tracing and stats instrumentation.
|
// HandleRPC implements per-RPC tracing and stats instrumentation.
|
||||||
func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
|
func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
|
||||||
traceHandleRPC(ctx, rs)
|
traceHandleRPC(ctx, rs)
|
||||||
s.statsHandleRPC(ctx, rs)
|
statsHandleRPC(ctx, rs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TagRPC implements per-RPC context management.
|
// TagRPC implements per-RPC context management.
|
||||||
|
|||||||
95
vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go
generated
vendored
95
vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go
generated
vendored
@@ -23,14 +23,11 @@ import (
|
|||||||
|
|
||||||
// The following variables are measures are recorded by ServerHandler:
|
// The following variables are measures are recorded by ServerHandler:
|
||||||
var (
|
var (
|
||||||
ServerErrorCount, _ = stats.Int64("grpc.io/server/error_count", "RPC Errors", stats.UnitNone)
|
ServerReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless)
|
||||||
ServerServerElapsedTime, _ = stats.Float64("grpc.io/server/server_elapsed_time", "Server elapsed time in msecs", stats.UnitMilliseconds)
|
ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes)
|
||||||
ServerRequestBytes, _ = stats.Int64("grpc.io/server/request_bytes", "Request bytes", stats.UnitBytes)
|
ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless)
|
||||||
ServerResponseBytes, _ = stats.Int64("grpc.io/server/response_bytes", "Response bytes", stats.UnitBytes)
|
ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes)
|
||||||
ServerStartedCount, _ = stats.Int64("grpc.io/server/started_count", "Number of server RPCs (streams) started", stats.UnitNone)
|
ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds)
|
||||||
ServerFinishedCount, _ = stats.Int64("grpc.io/server/finished_count", "Number of server RPCs (streams) finished", stats.UnitNone)
|
|
||||||
ServerRequestCount, _ = stats.Int64("grpc.io/server/request_count", "Number of server RPC request messages", stats.UnitNone)
|
|
||||||
ServerResponseCount, _ = stats.Int64("grpc.io/server/response_count", "Number of server RPC response messages", stats.UnitNone)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO(acetechnologist): This is temporary and will need to be replaced by a
|
// TODO(acetechnologist): This is temporary and will need to be replaced by a
|
||||||
@@ -42,63 +39,59 @@ var (
|
|||||||
// package. These are declared as a convenience only; none are subscribed by
|
// package. These are declared as a convenience only; none are subscribed by
|
||||||
// default.
|
// default.
|
||||||
var (
|
var (
|
||||||
ServerErrorCountView = &view.View{
|
ServerReceivedBytesPerRPCView = &view.View{
|
||||||
Name: "grpc.io/server/error_count",
|
Name: "grpc.io/server/received_bytes_per_rpc",
|
||||||
Description: "RPC Errors",
|
Description: "Distribution of received bytes per RPC, by method.",
|
||||||
TagKeys: []tag.Key{KeyMethod, KeyStatus},
|
Measure: ServerReceivedBytesPerRPC,
|
||||||
Measure: ServerErrorCount,
|
TagKeys: []tag.Key{KeyServerMethod},
|
||||||
Aggregation: view.Count(),
|
Aggregation: DefaultBytesDistribution,
|
||||||
}
|
}
|
||||||
|
|
||||||
ServerServerElapsedTimeView = &view.View{
|
ServerSentBytesPerRPCView = &view.View{
|
||||||
Name: "grpc.io/server/server_elapsed_time",
|
Name: "grpc.io/server/sent_bytes_per_rpc",
|
||||||
Description: "Server elapsed time in msecs",
|
Description: "Distribution of total sent bytes per RPC, by method.",
|
||||||
TagKeys: []tag.Key{KeyMethod},
|
Measure: ServerSentBytesPerRPC,
|
||||||
Measure: ServerServerElapsedTime,
|
TagKeys: []tag.Key{KeyServerMethod},
|
||||||
|
Aggregation: DefaultBytesDistribution,
|
||||||
|
}
|
||||||
|
|
||||||
|
ServerLatencyView = &view.View{
|
||||||
|
Name: "grpc.io/server/server_latency",
|
||||||
|
Description: "Distribution of server latency in milliseconds, by method.",
|
||||||
|
TagKeys: []tag.Key{KeyServerMethod},
|
||||||
|
Measure: ServerLatency,
|
||||||
Aggregation: DefaultMillisecondsDistribution,
|
Aggregation: DefaultMillisecondsDistribution,
|
||||||
}
|
}
|
||||||
|
|
||||||
ServerRequestBytesView = &view.View{
|
ServerCompletedRPCsView = &view.View{
|
||||||
Name: "grpc.io/server/request_bytes",
|
Name: "grpc.io/server/completed_rpcs",
|
||||||
Description: "Request bytes",
|
Description: "Count of RPCs by method and status.",
|
||||||
TagKeys: []tag.Key{KeyMethod},
|
TagKeys: []tag.Key{KeyServerMethod, KeyServerStatus},
|
||||||
Measure: ServerRequestBytes,
|
Measure: ServerLatency,
|
||||||
Aggregation: DefaultBytesDistribution,
|
Aggregation: view.Count(),
|
||||||
}
|
}
|
||||||
|
|
||||||
ServerResponseBytesView = &view.View{
|
ServerReceivedMessagesPerRPCView = &view.View{
|
||||||
Name: "grpc.io/server/response_bytes",
|
Name: "grpc.io/server/received_messages_per_rpc",
|
||||||
Description: "Response bytes",
|
Description: "Distribution of messages received count per RPC, by method.",
|
||||||
TagKeys: []tag.Key{KeyMethod},
|
TagKeys: []tag.Key{KeyServerMethod},
|
||||||
Measure: ServerResponseBytes,
|
Measure: ServerReceivedMessagesPerRPC,
|
||||||
Aggregation: DefaultBytesDistribution,
|
|
||||||
}
|
|
||||||
|
|
||||||
ServerRequestCountView = &view.View{
|
|
||||||
Name: "grpc.io/server/request_count",
|
|
||||||
Description: "Count of request messages per server RPC",
|
|
||||||
TagKeys: []tag.Key{KeyMethod},
|
|
||||||
Measure: ServerRequestCount,
|
|
||||||
Aggregation: DefaultMessageCountDistribution,
|
Aggregation: DefaultMessageCountDistribution,
|
||||||
}
|
}
|
||||||
|
|
||||||
ServerResponseCountView = &view.View{
|
ServerSentMessagesPerRPCView = &view.View{
|
||||||
Name: "grpc.io/server/response_count",
|
Name: "grpc.io/server/sent_messages_per_rpc",
|
||||||
Description: "Count of response messages per server RPC",
|
Description: "Distribution of messages sent count per RPC, by method.",
|
||||||
TagKeys: []tag.Key{KeyMethod},
|
TagKeys: []tag.Key{KeyServerMethod},
|
||||||
Measure: ServerResponseCount,
|
Measure: ServerSentMessagesPerRPC,
|
||||||
Aggregation: DefaultMessageCountDistribution,
|
Aggregation: DefaultMessageCountDistribution,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// DefaultServerViews are the default server views provided by this package.
|
// DefaultServerViews are the default server views provided by this package.
|
||||||
var DefaultServerViews = []*view.View{
|
var DefaultServerViews = []*view.View{
|
||||||
ServerErrorCountView,
|
ServerReceivedBytesPerRPCView,
|
||||||
ServerServerElapsedTimeView,
|
ServerSentBytesPerRPCView,
|
||||||
ServerRequestBytesView,
|
ServerLatencyView,
|
||||||
ServerResponseBytesView,
|
ServerCompletedRPCsView,
|
||||||
ServerRequestCountView,
|
|
||||||
ServerResponseCountView,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count.
|
|
||||||
|
|||||||
146
vendor/go.opencensus.io/plugin/ocgrpc/server_spec_test.go
generated
vendored
Normal file
146
vendor/go.opencensus.io/plugin/ocgrpc/server_spec_test.go
generated
vendored
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package ocgrpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"go.opencensus.io/stats"
|
||||||
|
"go.opencensus.io/stats/view"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSpecServerMeasures(t *testing.T) {
|
||||||
|
spec := `
|
||||||
|
| Measure name | Unit | Description |
|
||||||
|
|------------------------------------------|------|-----------------------------------------------------------------------------------------------|
|
||||||
|
| grpc.io/server/received_messages_per_rpc | 1 | Number of messages received in each RPC. Has value 1 for non-streaming RPCs. |
|
||||||
|
| grpc.io/server/received_bytes_per_rpc | By | Total bytes received across all messages per RPC. |
|
||||||
|
| grpc.io/server/sent_messages_per_rpc | 1 | Number of messages sent in each RPC. Has value 1 for non-streaming RPCs. |
|
||||||
|
| grpc.io/server/sent_bytes_per_rpc | By | Total bytes sent in across all response messages per RPC. |
|
||||||
|
| grpc.io/server/server_latency | ms | Time between first byte of request received to last byte of response sent, or terminal error. |`
|
||||||
|
|
||||||
|
lines := strings.Split(spec, "\n")[3:]
|
||||||
|
type measureDef struct {
|
||||||
|
name string
|
||||||
|
unit string
|
||||||
|
desc string
|
||||||
|
}
|
||||||
|
measureDefs := make([]measureDef, 0, len(lines))
|
||||||
|
for _, line := range lines {
|
||||||
|
cols := colSep.Split(line, -1)[1:]
|
||||||
|
if len(cols) < 3 {
|
||||||
|
t.Fatalf("Invalid config line %#v", cols)
|
||||||
|
}
|
||||||
|
measureDefs = append(measureDefs, measureDef{cols[0], cols[1], cols[2]})
|
||||||
|
}
|
||||||
|
|
||||||
|
gotMeasures := []stats.Measure{
|
||||||
|
ServerReceivedMessagesPerRPC,
|
||||||
|
ServerReceivedBytesPerRPC,
|
||||||
|
ServerSentMessagesPerRPC,
|
||||||
|
ServerSentBytesPerRPC,
|
||||||
|
ServerLatency,
|
||||||
|
}
|
||||||
|
|
||||||
|
if got, want := len(gotMeasures), len(measureDefs); got != want {
|
||||||
|
t.Fatalf("len(gotMeasures) = %d; want %d", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, m := range gotMeasures {
|
||||||
|
defn := measureDefs[i]
|
||||||
|
if got, want := m.Name(), defn.name; got != want {
|
||||||
|
t.Errorf("Name = %q; want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := m.Unit(), defn.unit; got != want {
|
||||||
|
t.Errorf("%q: Unit = %q; want %q", defn.name, got, want)
|
||||||
|
}
|
||||||
|
if got, want := m.Description(), defn.desc; got != want {
|
||||||
|
t.Errorf("%q: Description = %q; want %q", defn.name, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSpecServerViews(t *testing.T) {
|
||||||
|
defaultViewsSpec := `
|
||||||
|
| View name | Measure suffix | Aggregation | Tags suffix |
|
||||||
|
|---------------------------------------|------------------------|--------------|------------------------------|
|
||||||
|
| grpc.io/server/received_bytes_per_rpc | received_bytes_per_rpc | distribution | server_method |
|
||||||
|
| grpc.io/server/sent_bytes_per_rpc | sent_bytes_per_rpc | distribution | server_method |
|
||||||
|
| grpc.io/server/server_latency | server_latency | distribution | server_method |
|
||||||
|
| grpc.io/server/completed_rpcs | server_latency | count | server_method, server_status |`
|
||||||
|
|
||||||
|
extraViewsSpec := `
|
||||||
|
| View name | Measure suffix | Aggregation | Tags suffix |
|
||||||
|
|------------------------------------------|---------------------------|--------------|---------------|
|
||||||
|
| grpc.io/server/received_messages_per_rpc | received_messages_per_rpc | distribution | server_method |
|
||||||
|
| grpc.io/server/sent_messages_per_rpc | sent_messages_per_rpc | distribution | server_method |`
|
||||||
|
|
||||||
|
lines := strings.Split(defaultViewsSpec, "\n")[3:]
|
||||||
|
lines = append(lines, strings.Split(extraViewsSpec, "\n")[3:]...)
|
||||||
|
type viewDef struct {
|
||||||
|
name string
|
||||||
|
measureSuffix string
|
||||||
|
aggregation string
|
||||||
|
tags string
|
||||||
|
}
|
||||||
|
viewDefs := make([]viewDef, 0, len(lines))
|
||||||
|
for _, line := range lines {
|
||||||
|
cols := colSep.Split(line, -1)[1:]
|
||||||
|
if len(cols) < 4 {
|
||||||
|
t.Fatalf("Invalid config line %#v", cols)
|
||||||
|
}
|
||||||
|
viewDefs = append(viewDefs, viewDef{cols[0], cols[1], cols[2], cols[3]})
|
||||||
|
}
|
||||||
|
|
||||||
|
views := DefaultServerViews
|
||||||
|
views = append(views, ServerReceivedMessagesPerRPCView, ServerSentMessagesPerRPCView)
|
||||||
|
|
||||||
|
if got, want := len(views), len(viewDefs); got != want {
|
||||||
|
t.Fatalf("len(gotMeasures) = %d; want %d", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, v := range views {
|
||||||
|
defn := viewDefs[i]
|
||||||
|
if got, want := v.Name, defn.name; got != want {
|
||||||
|
t.Errorf("Name = %q; want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := v.Measure.Name(), "grpc.io/server/"+defn.measureSuffix; got != want {
|
||||||
|
t.Errorf("%q: Measure.Name = %q; want %q", defn.name, got, want)
|
||||||
|
}
|
||||||
|
switch v.Aggregation.Type {
|
||||||
|
case view.AggTypeDistribution:
|
||||||
|
if got, want := "distribution", defn.aggregation; got != want {
|
||||||
|
t.Errorf("%q: Description = %q; want %q", defn.name, got, want)
|
||||||
|
}
|
||||||
|
case view.AggTypeCount:
|
||||||
|
if got, want := "count", defn.aggregation; got != want {
|
||||||
|
t.Errorf("%q: Description = %q; want %q", defn.name, got, want)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
t.Errorf("Invalid aggregation type")
|
||||||
|
}
|
||||||
|
wantTags := strings.Split(defn.tags, ", ")
|
||||||
|
if got, want := len(v.TagKeys), len(wantTags); got != want {
|
||||||
|
t.Errorf("len(TagKeys) = %d; want %d", got, want)
|
||||||
|
}
|
||||||
|
for j := range wantTags {
|
||||||
|
if got, want := v.TagKeys[j].Name(), "grpc_"+wantTags[j]; got != want {
|
||||||
|
t.Errorf("TagKeys[%d].Name() = %q; want %q", j, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
100
vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go
generated
vendored
100
vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go
generated
vendored
@@ -16,17 +16,13 @@
|
|||||||
package ocgrpc
|
package ocgrpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
ocstats "go.opencensus.io/stats"
|
|
||||||
"go.opencensus.io/tag"
|
"go.opencensus.io/tag"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
"google.golang.org/grpc/stats"
|
"google.golang.org/grpc/stats"
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from
|
// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from
|
||||||
@@ -35,7 +31,7 @@ func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo)
|
|||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
if info == nil {
|
if info == nil {
|
||||||
if grpclog.V(2) {
|
if grpclog.V(2) {
|
||||||
grpclog.Infof("serverHandler.TagRPC called with nil info.", info.FullMethodName)
|
grpclog.Infof("opencensus: TagRPC called with nil info.")
|
||||||
}
|
}
|
||||||
return ctx
|
return ctx
|
||||||
}
|
}
|
||||||
@@ -43,95 +39,25 @@ func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo)
|
|||||||
startTime: startTime,
|
startTime: startTime,
|
||||||
method: info.FullMethodName,
|
method: info.FullMethodName,
|
||||||
}
|
}
|
||||||
ctx, _ = h.createTags(ctx)
|
propagated := h.extractPropagatedTags(ctx)
|
||||||
record(ctx, d, "", ServerStartedCount.M(1))
|
ctx = tag.NewContext(ctx, propagated)
|
||||||
return context.WithValue(ctx, grpcServerRPCKey, d)
|
ctx, _ = tag.New(ctx, tag.Upsert(KeyServerMethod, methodName(info.FullMethodName)))
|
||||||
|
return context.WithValue(ctx, rpcDataKey, d)
|
||||||
}
|
}
|
||||||
|
|
||||||
// statsHandleRPC processes the RPC events.
|
// extractPropagatedTags creates a new tag map containing the tags extracted from the
|
||||||
func (h *ServerHandler) statsHandleRPC(ctx context.Context, s stats.RPCStats) {
|
|
||||||
switch st := s.(type) {
|
|
||||||
case *stats.Begin, *stats.InHeader, *stats.InTrailer, *stats.OutHeader, *stats.OutTrailer:
|
|
||||||
// Do nothing for server
|
|
||||||
case *stats.InPayload:
|
|
||||||
h.handleRPCInPayload(ctx, st)
|
|
||||||
case *stats.OutPayload:
|
|
||||||
// For stream it can be called multiple times per RPC.
|
|
||||||
h.handleRPCOutPayload(ctx, st)
|
|
||||||
case *stats.End:
|
|
||||||
h.handleRPCEnd(ctx, st)
|
|
||||||
default:
|
|
||||||
grpclog.Infof("unexpected stats: %T", st)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *ServerHandler) handleRPCInPayload(ctx context.Context, s *stats.InPayload) {
|
|
||||||
d, ok := ctx.Value(grpcServerRPCKey).(*rpcData)
|
|
||||||
if !ok {
|
|
||||||
if grpclog.V(2) {
|
|
||||||
grpclog.Infoln("handleRPCInPayload: failed to retrieve *rpcData from context")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
record(ctx, d, "", ServerRequestBytes.M(int64(s.Length)))
|
|
||||||
atomic.AddInt64(&d.reqCount, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *ServerHandler) handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) {
|
|
||||||
d, ok := ctx.Value(grpcServerRPCKey).(*rpcData)
|
|
||||||
if !ok {
|
|
||||||
if grpclog.V(2) {
|
|
||||||
grpclog.Infoln("handleRPCOutPayload: failed to retrieve *rpcData from context")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
record(ctx, d, "", ServerResponseBytes.M(int64(s.Length)))
|
|
||||||
atomic.AddInt64(&d.respCount, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *ServerHandler) handleRPCEnd(ctx context.Context, s *stats.End) {
|
|
||||||
d, ok := ctx.Value(grpcServerRPCKey).(*rpcData)
|
|
||||||
if !ok {
|
|
||||||
if grpclog.V(2) {
|
|
||||||
grpclog.Infoln("serverHandler.handleRPCEnd failed to retrieve *rpcData from context")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
elapsedTime := time.Since(d.startTime)
|
|
||||||
reqCount := atomic.LoadInt64(&d.reqCount)
|
|
||||||
respCount := atomic.LoadInt64(&d.respCount)
|
|
||||||
|
|
||||||
m := []ocstats.Measurement{
|
|
||||||
ServerRequestCount.M(reqCount),
|
|
||||||
ServerResponseCount.M(respCount),
|
|
||||||
ServerFinishedCount.M(1),
|
|
||||||
ServerServerElapsedTime.M(float64(elapsedTime) / float64(time.Millisecond)),
|
|
||||||
}
|
|
||||||
|
|
||||||
var st string
|
|
||||||
if s.Error != nil {
|
|
||||||
s, ok := status.FromError(s.Error)
|
|
||||||
if ok {
|
|
||||||
st = s.Code().String()
|
|
||||||
}
|
|
||||||
m = append(m, ServerErrorCount.M(1))
|
|
||||||
}
|
|
||||||
record(ctx, d, st, m...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// createTags creates a new tag map containing the tags extracted from the
|
|
||||||
// gRPC metadata.
|
// gRPC metadata.
|
||||||
func (h *ServerHandler) createTags(ctx context.Context) (context.Context, error) {
|
func (h *ServerHandler) extractPropagatedTags(ctx context.Context) *tag.Map {
|
||||||
buf := stats.Tags(ctx)
|
buf := stats.Tags(ctx)
|
||||||
if buf == nil {
|
if buf == nil {
|
||||||
return ctx, nil
|
return nil
|
||||||
}
|
}
|
||||||
propagated, err := tag.Decode(buf)
|
propagated, err := tag.Decode(buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("serverHandler.createTags failed to decode: %v", err)
|
if grpclog.V(2) {
|
||||||
|
grpclog.Warningf("opencensus: Failed to decode tags from gRPC metadata failed to decode: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
return tag.NewContext(ctx, propagated), nil
|
return propagated
|
||||||
}
|
}
|
||||||
|
|||||||
89
vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler_test.go
generated
vendored
89
vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler_test.go
generated
vendored
@@ -74,44 +74,44 @@ func TestServerDefaultCollections(t *testing.T) {
|
|||||||
},
|
},
|
||||||
[]*wantData{
|
[]*wantData{
|
||||||
{
|
{
|
||||||
func() *view.View { return ServerRequestCountView },
|
func() *view.View { return ServerReceivedMessagesPerRPCView },
|
||||||
[]*view.Row{
|
[]*view.Row{
|
||||||
{
|
{
|
||||||
Tags: []tag.Tag{
|
Tags: []tag.Tag{
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
{Key: KeyServerMethod, Value: "package.service/method"},
|
||||||
},
|
},
|
||||||
Data: newDistributionData([]int64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0),
|
Data: newDistributionData([]int64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
func() *view.View { return ServerResponseCountView },
|
func() *view.View { return ServerSentMessagesPerRPCView },
|
||||||
[]*view.Row{
|
[]*view.Row{
|
||||||
{
|
{
|
||||||
Tags: []tag.Tag{
|
Tags: []tag.Tag{
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
{Key: KeyServerMethod, Value: "package.service/method"},
|
||||||
},
|
},
|
||||||
Data: newDistributionData([]int64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0),
|
Data: newDistributionData([]int64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
func() *view.View { return ServerRequestBytesView },
|
func() *view.View { return ServerReceivedBytesPerRPCView },
|
||||||
[]*view.Row{
|
[]*view.Row{
|
||||||
{
|
{
|
||||||
Tags: []tag.Tag{
|
Tags: []tag.Tag{
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
{Key: KeyServerMethod, Value: "package.service/method"},
|
||||||
},
|
},
|
||||||
Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0),
|
Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
func() *view.View { return ServerResponseBytesView },
|
func() *view.View { return ServerSentBytesPerRPCView },
|
||||||
[]*view.Row{
|
[]*view.Row{
|
||||||
{
|
{
|
||||||
Tags: []tag.Tag{
|
Tags: []tag.Tag{
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
{Key: KeyServerMethod, Value: "package.service/method"},
|
||||||
},
|
},
|
||||||
Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0),
|
Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0),
|
||||||
},
|
},
|
||||||
@@ -151,34 +151,22 @@ func TestServerDefaultCollections(t *testing.T) {
|
|||||||
},
|
},
|
||||||
[]*wantData{
|
[]*wantData{
|
||||||
{
|
{
|
||||||
func() *view.View { return ServerErrorCountView },
|
func() *view.View { return ServerReceivedMessagesPerRPCView },
|
||||||
[]*view.Row{
|
[]*view.Row{
|
||||||
{
|
{
|
||||||
Tags: []tag.Tag{
|
Tags: []tag.Tag{
|
||||||
{Key: KeyStatus, Value: "Canceled"},
|
{Key: KeyServerMethod, Value: "package.service/method"},
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
|
||||||
},
|
|
||||||
Data: newCountData(1),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
func() *view.View { return ServerRequestCountView },
|
|
||||||
[]*view.Row{
|
|
||||||
{
|
|
||||||
Tags: []tag.Tag{
|
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
|
||||||
},
|
},
|
||||||
Data: newDistributionData([]int64{0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 1, 2, 1.5, 0.5),
|
Data: newDistributionData([]int64{0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 1, 2, 1.5, 0.5),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
func() *view.View { return ServerResponseCountView },
|
func() *view.View { return ServerSentMessagesPerRPCView },
|
||||||
[]*view.Row{
|
[]*view.Row{
|
||||||
{
|
{
|
||||||
Tags: []tag.Tag{
|
Tags: []tag.Tag{
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
{Key: KeyServerMethod, Value: "package.service/method"},
|
||||||
},
|
},
|
||||||
Data: newDistributionData([]int64{0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 2, 3, 2.5, 0.5),
|
Data: newDistributionData([]int64{0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 2, 3, 2.5, 0.5),
|
||||||
},
|
},
|
||||||
@@ -231,65 +219,46 @@ func TestServerDefaultCollections(t *testing.T) {
|
|||||||
},
|
},
|
||||||
[]*wantData{
|
[]*wantData{
|
||||||
{
|
{
|
||||||
func() *view.View { return ServerErrorCountView },
|
func() *view.View { return ServerReceivedMessagesPerRPCView },
|
||||||
[]*view.Row{
|
[]*view.Row{
|
||||||
{
|
{
|
||||||
Tags: []tag.Tag{
|
Tags: []tag.Tag{
|
||||||
{Key: KeyStatus, Value: "Canceled"},
|
{Key: KeyServerMethod, Value: "package.service/method"},
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
|
||||||
},
|
|
||||||
Data: newCountData(1),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Tags: []tag.Tag{
|
|
||||||
{Key: KeyStatus, Value: "Aborted"},
|
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
|
||||||
},
|
|
||||||
Data: newCountData(1),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
func() *view.View { return ServerRequestCountView },
|
|
||||||
[]*view.Row{
|
|
||||||
{
|
|
||||||
Tags: []tag.Tag{
|
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
|
||||||
},
|
},
|
||||||
Data: newDistributionData([]int64{0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 2, 1.333333333, 0.333333333*2),
|
Data: newDistributionData([]int64{0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 2, 1.333333333, 0.333333333*2),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
func() *view.View { return ServerResponseCountView },
|
func() *view.View { return ServerSentMessagesPerRPCView },
|
||||||
[]*view.Row{
|
[]*view.Row{
|
||||||
{
|
{
|
||||||
Tags: []tag.Tag{
|
Tags: []tag.Tag{
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
{Key: KeyServerMethod, Value: "package.service/method"},
|
||||||
},
|
},
|
||||||
Data: newDistributionData([]int64{0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 2, 3, 2.666666666, 0.333333333*2),
|
Data: newDistributionData([]int64{0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 2, 3, 2.666666666, 0.333333333*2),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
func() *view.View { return ServerRequestBytesView },
|
func() *view.View { return ServerReceivedBytesPerRPCView },
|
||||||
[]*view.Row{
|
[]*view.Row{
|
||||||
{
|
{
|
||||||
Tags: []tag.Tag{
|
Tags: []tag.Tag{
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
{Key: KeyServerMethod, Value: "package.service/method"},
|
||||||
},
|
},
|
||||||
Data: newDistributionData([]int64{0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 4, 1, 16384, 4864.25, 59678208.25*3),
|
Data: newDistributionData([]int64{0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 18432, 6485.6666667, 2.1459558466666667e+08),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
func() *view.View { return ServerResponseBytesView },
|
func() *view.View { return ServerSentBytesPerRPCView },
|
||||||
[]*view.Row{
|
[]*view.Row{
|
||||||
{
|
{
|
||||||
Tags: []tag.Tag{
|
Tags: []tag.Tag{
|
||||||
{Key: KeyMethod, Value: "package.service/method"},
|
{Key: KeyServerMethod, Value: "package.service/method"},
|
||||||
},
|
},
|
||||||
Data: newDistributionData([]int64{0, 1, 1, 1, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 8, 1, 65536, 13696.125, 481423542.982143*7),
|
Data: newDistributionData([]int64{0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 20480, 66561, 36523, 1.355519318e+09),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -297,8 +266,10 @@ func TestServerDefaultCollections(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
views := append(DefaultServerViews[:], ServerReceivedMessagesPerRPCView, ServerSentMessagesPerRPCView)
|
||||||
|
|
||||||
for _, tc := range tcs {
|
for _, tc := range tcs {
|
||||||
if err := view.Subscribe(DefaultServerViews...); err != nil {
|
if err := view.Register(views...); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -349,18 +320,12 @@ func TestServerDefaultCollections(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Unregister views to cleanup.
|
// Unregister views to cleanup.
|
||||||
view.Unsubscribe(DefaultServerViews...)
|
view.Unregister(views...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCountData(v int) *view.CountData {
|
func newCountData(v int) *view.CountData {
|
||||||
cav := view.CountData(v)
|
return &view.CountData{Value: int64(v)}
|
||||||
return &cav
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMeanData(count int64, mean float64) *view.MeanData {
|
|
||||||
mav := view.MeanData{Count: count, Mean: mean}
|
|
||||||
return &mav
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDistributionData(countPerBucket []int64, count int64, min, max, mean, sumOfSquaredDev float64) *view.DistributionData {
|
func newDistributionData(countPerBucket []int64, count int64, min, max, mean, sumOfSquaredDev float64) *view.DistributionData {
|
||||||
|
|||||||
154
vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go
generated
vendored
154
vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go
generated
vendored
@@ -16,13 +16,19 @@
|
|||||||
package ocgrpc
|
package ocgrpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
ocstats "go.opencensus.io/stats"
|
ocstats "go.opencensus.io/stats"
|
||||||
"go.opencensus.io/stats/view"
|
"go.opencensus.io/stats/view"
|
||||||
"go.opencensus.io/tag"
|
"go.opencensus.io/tag"
|
||||||
"golang.org/x/net/context"
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/stats"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
type grpcInstrumentationKey string
|
type grpcInstrumentationKey string
|
||||||
@@ -33,7 +39,7 @@ type grpcInstrumentationKey string
|
|||||||
type rpcData struct {
|
type rpcData struct {
|
||||||
// reqCount and respCount has to be the first words
|
// reqCount and respCount has to be the first words
|
||||||
// in order to be 64-aligned on 32-bit architectures.
|
// in order to be 64-aligned on 32-bit architectures.
|
||||||
reqCount, respCount int64 // access atomically
|
sentCount, sentBytes, recvCount, recvBytes int64 // access atomically
|
||||||
|
|
||||||
// startTime represents the time at which TagRPC was invoked at the
|
// startTime represents the time at which TagRPC was invoked at the
|
||||||
// beginning of an RPC. It is an appoximation of the time when the
|
// beginning of an RPC. It is an appoximation of the time when the
|
||||||
@@ -46,32 +52,148 @@ type rpcData struct {
|
|||||||
// both the default GRPC client and GRPC server metrics.
|
// both the default GRPC client and GRPC server metrics.
|
||||||
var (
|
var (
|
||||||
DefaultBytesDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
|
DefaultBytesDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
|
||||||
DefaultMillisecondsDistribution = view.Distribution(0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
|
DefaultMillisecondsDistribution = view.Distribution(0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
|
||||||
DefaultMessageCountDistribution = view.Distribution(0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536)
|
DefaultMessageCountDistribution = view.Distribution(0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536)
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
KeyMethod, _ = tag.NewKey("method") // gRPC service and method name
|
KeyServerMethod, _ = tag.NewKey("grpc_server_method")
|
||||||
KeyStatus, _ = tag.NewKey("canonical_status") // Canonical status code
|
KeyClientMethod, _ = tag.NewKey("grpc_client_method")
|
||||||
|
KeyServerStatus, _ = tag.NewKey("grpc_server_status")
|
||||||
|
KeyClientStatus, _ = tag.NewKey("grpc_client_status")
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
grpcServerConnKey = grpcInstrumentationKey("server-conn")
|
rpcDataKey = grpcInstrumentationKey("opencensus-rpcData")
|
||||||
grpcServerRPCKey = grpcInstrumentationKey("server-rpc")
|
|
||||||
grpcClientRPCKey = grpcInstrumentationKey("client-rpc")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func methodName(fullname string) string {
|
func methodName(fullname string) string {
|
||||||
return strings.TrimLeft(fullname, "/")
|
return strings.TrimLeft(fullname, "/")
|
||||||
}
|
}
|
||||||
|
|
||||||
func record(ctx context.Context, data *rpcData, status string, m ...ocstats.Measurement) {
|
// statsHandleRPC processes the RPC events.
|
||||||
mods := []tag.Mutator{
|
func statsHandleRPC(ctx context.Context, s stats.RPCStats) {
|
||||||
tag.Upsert(KeyMethod, methodName(data.method)),
|
switch st := s.(type) {
|
||||||
|
case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer:
|
||||||
|
// do nothing for client
|
||||||
|
case *stats.OutPayload:
|
||||||
|
handleRPCOutPayload(ctx, st)
|
||||||
|
case *stats.InPayload:
|
||||||
|
handleRPCInPayload(ctx, st)
|
||||||
|
case *stats.End:
|
||||||
|
handleRPCEnd(ctx, st)
|
||||||
|
default:
|
||||||
|
grpclog.Infof("unexpected stats: %T", st)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) {
|
||||||
|
d, ok := ctx.Value(rpcDataKey).(*rpcData)
|
||||||
|
if !ok {
|
||||||
|
if grpclog.V(2) {
|
||||||
|
grpclog.Infoln("Failed to retrieve *rpcData from context.")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
atomic.AddInt64(&d.sentBytes, int64(s.Length))
|
||||||
|
atomic.AddInt64(&d.sentCount, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleRPCInPayload(ctx context.Context, s *stats.InPayload) {
|
||||||
|
d, ok := ctx.Value(rpcDataKey).(*rpcData)
|
||||||
|
if !ok {
|
||||||
|
if grpclog.V(2) {
|
||||||
|
grpclog.Infoln("Failed to retrieve *rpcData from context.")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
atomic.AddInt64(&d.recvBytes, int64(s.Length))
|
||||||
|
atomic.AddInt64(&d.recvCount, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleRPCEnd(ctx context.Context, s *stats.End) {
|
||||||
|
d, ok := ctx.Value(rpcDataKey).(*rpcData)
|
||||||
|
if !ok {
|
||||||
|
if grpclog.V(2) {
|
||||||
|
grpclog.Infoln("Failed to retrieve *rpcData from context.")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
elapsedTime := time.Since(d.startTime)
|
||||||
|
|
||||||
|
var st string
|
||||||
|
if s.Error != nil {
|
||||||
|
s, ok := status.FromError(s.Error)
|
||||||
|
if ok {
|
||||||
|
st = statusCodeToString(s)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
st = "OK"
|
||||||
|
}
|
||||||
|
|
||||||
|
latencyMillis := float64(elapsedTime) / float64(time.Millisecond)
|
||||||
|
if s.Client {
|
||||||
|
ctx, _ = tag.New(ctx,
|
||||||
|
tag.Upsert(KeyClientMethod, methodName(d.method)),
|
||||||
|
tag.Upsert(KeyClientStatus, st))
|
||||||
|
ocstats.Record(ctx,
|
||||||
|
ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
|
||||||
|
ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
|
||||||
|
ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
|
||||||
|
ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
|
||||||
|
ClientRoundtripLatency.M(latencyMillis))
|
||||||
|
} else {
|
||||||
|
ctx, _ = tag.New(ctx, tag.Upsert(KeyServerStatus, st))
|
||||||
|
ocstats.Record(ctx,
|
||||||
|
ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
|
||||||
|
ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
|
||||||
|
ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
|
||||||
|
ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
|
||||||
|
ServerLatency.M(latencyMillis))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func statusCodeToString(s *status.Status) string {
|
||||||
|
// see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
|
||||||
|
switch c := s.Code(); c {
|
||||||
|
case codes.OK:
|
||||||
|
return "OK"
|
||||||
|
case codes.Canceled:
|
||||||
|
return "CANCELLED"
|
||||||
|
case codes.Unknown:
|
||||||
|
return "UNKNOWN"
|
||||||
|
case codes.InvalidArgument:
|
||||||
|
return "INVALID_ARGUMENT"
|
||||||
|
case codes.DeadlineExceeded:
|
||||||
|
return "DEADLINE_EXCEEDED"
|
||||||
|
case codes.NotFound:
|
||||||
|
return "NOT_FOUND"
|
||||||
|
case codes.AlreadyExists:
|
||||||
|
return "ALREADY_EXISTS"
|
||||||
|
case codes.PermissionDenied:
|
||||||
|
return "PERMISSION_DENIED"
|
||||||
|
case codes.ResourceExhausted:
|
||||||
|
return "RESOURCE_EXHAUSTED"
|
||||||
|
case codes.FailedPrecondition:
|
||||||
|
return "FAILED_PRECONDITION"
|
||||||
|
case codes.Aborted:
|
||||||
|
return "ABORTED"
|
||||||
|
case codes.OutOfRange:
|
||||||
|
return "OUT_OF_RANGE"
|
||||||
|
case codes.Unimplemented:
|
||||||
|
return "UNIMPLEMENTED"
|
||||||
|
case codes.Internal:
|
||||||
|
return "INTERNAL"
|
||||||
|
case codes.Unavailable:
|
||||||
|
return "UNAVAILABLE"
|
||||||
|
case codes.DataLoss:
|
||||||
|
return "DATA_LOSS"
|
||||||
|
case codes.Unauthenticated:
|
||||||
|
return "UNAUTHENTICATED"
|
||||||
|
default:
|
||||||
|
return "CODE_" + strconv.FormatInt(int64(c), 10)
|
||||||
}
|
}
|
||||||
if status != "" {
|
|
||||||
mods = append(mods, tag.Upsert(KeyStatus, status))
|
|
||||||
}
|
|
||||||
ctx, _ = tag.New(ctx, mods...)
|
|
||||||
ocstats.Record(ctx, m...)
|
|
||||||
}
|
}
|
||||||
|
|||||||
26
vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go
generated
vendored
26
vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go
generated
vendored
@@ -36,11 +36,9 @@ const traceContextKey = "grpc-trace-bin"
|
|||||||
func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
|
func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
|
||||||
name := strings.TrimPrefix(rti.FullMethodName, "/")
|
name := strings.TrimPrefix(rti.FullMethodName, "/")
|
||||||
name = strings.Replace(name, "/", ".", -1)
|
name = strings.Replace(name, "/", ".", -1)
|
||||||
span := trace.NewSpan(name, trace.FromContext(ctx), trace.StartOptions{
|
ctx, span := trace.StartSpan(ctx, name,
|
||||||
Sampler: c.StartOptions.Sampler,
|
trace.WithSampler(c.StartOptions.Sampler),
|
||||||
SpanKind: trace.SpanKindClient,
|
trace.WithSpanKind(trace.SpanKindClient)) // span is ended by traceHandleRPC
|
||||||
}) // span is ended by traceHandleRPC
|
|
||||||
ctx = trace.WithSpan(ctx, span)
|
|
||||||
traceContextBinary := propagation.Binary(span.SpanContext())
|
traceContextBinary := propagation.Binary(span.SpanContext())
|
||||||
return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary))
|
return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary))
|
||||||
}
|
}
|
||||||
@@ -52,11 +50,6 @@ func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo)
|
|||||||
//
|
//
|
||||||
// It returns ctx, with the new trace span added.
|
// It returns ctx, with the new trace span added.
|
||||||
func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
|
func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
|
||||||
opts := trace.StartOptions{
|
|
||||||
Sampler: s.StartOptions.Sampler,
|
|
||||||
SpanKind: trace.SpanKindServer,
|
|
||||||
}
|
|
||||||
|
|
||||||
md, _ := metadata.FromIncomingContext(ctx)
|
md, _ := metadata.FromIncomingContext(ctx)
|
||||||
name := strings.TrimPrefix(rti.FullMethodName, "/")
|
name := strings.TrimPrefix(rti.FullMethodName, "/")
|
||||||
name = strings.Replace(name, "/", ".", -1)
|
name = strings.Replace(name, "/", ".", -1)
|
||||||
@@ -72,15 +65,20 @@ func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo)
|
|||||||
traceContextBinary := []byte(traceContext[0])
|
traceContextBinary := []byte(traceContext[0])
|
||||||
parent, haveParent = propagation.FromBinary(traceContextBinary)
|
parent, haveParent = propagation.FromBinary(traceContextBinary)
|
||||||
if haveParent && !s.IsPublicEndpoint {
|
if haveParent && !s.IsPublicEndpoint {
|
||||||
span := trace.NewSpanWithRemoteParent(name, parent, opts)
|
ctx, _ := trace.StartSpanWithRemoteParent(ctx, name, parent,
|
||||||
return trace.WithSpan(ctx, span)
|
trace.WithSpanKind(trace.SpanKindServer),
|
||||||
|
trace.WithSampler(s.StartOptions.Sampler),
|
||||||
|
)
|
||||||
|
return ctx
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
span := trace.NewSpan(name, nil, opts)
|
ctx, span := trace.StartSpan(ctx, name,
|
||||||
|
trace.WithSpanKind(trace.SpanKindServer),
|
||||||
|
trace.WithSampler(s.StartOptions.Sampler))
|
||||||
if haveParent {
|
if haveParent {
|
||||||
span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild})
|
span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild})
|
||||||
}
|
}
|
||||||
return trace.WithSpan(ctx, span)
|
return ctx
|
||||||
}
|
}
|
||||||
|
|
||||||
func traceHandleRPC(ctx context.Context, rs stats.RPCStats) {
|
func traceHandleRPC(ctx context.Context, rs stats.RPCStats) {
|
||||||
|
|||||||
10
vendor/go.opencensus.io/plugin/ocgrpc/trace_test.go
generated
vendored
10
vendor/go.opencensus.io/plugin/ocgrpc/trace_test.go
generated
vendored
@@ -33,7 +33,7 @@ func (t *testExporter) ExportSpan(s *trace.SpanData) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestStreaming(t *testing.T) {
|
func TestStreaming(t *testing.T) {
|
||||||
trace.SetDefaultSampler(trace.AlwaysSample())
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||||
te := testExporter{make(chan *trace.SpanData)}
|
te := testExporter{make(chan *trace.SpanData)}
|
||||||
trace.RegisterExporter(&te)
|
trace.RegisterExporter(&te)
|
||||||
defer trace.UnregisterExporter(&te)
|
defer trace.UnregisterExporter(&te)
|
||||||
@@ -76,7 +76,7 @@ func TestStreaming(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestStreamingFail(t *testing.T) {
|
func TestStreamingFail(t *testing.T) {
|
||||||
trace.SetDefaultSampler(trace.AlwaysSample())
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||||
te := testExporter{make(chan *trace.SpanData)}
|
te := testExporter{make(chan *trace.SpanData)}
|
||||||
trace.RegisterExporter(&te)
|
trace.RegisterExporter(&te)
|
||||||
defer trace.UnregisterExporter(&te)
|
defer trace.UnregisterExporter(&te)
|
||||||
@@ -117,7 +117,7 @@ func TestStreamingFail(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSingle(t *testing.T) {
|
func TestSingle(t *testing.T) {
|
||||||
trace.SetDefaultSampler(trace.AlwaysSample())
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||||
te := testExporter{make(chan *trace.SpanData)}
|
te := testExporter{make(chan *trace.SpanData)}
|
||||||
trace.RegisterExporter(&te)
|
trace.RegisterExporter(&te)
|
||||||
defer trace.UnregisterExporter(&te)
|
defer trace.UnregisterExporter(&te)
|
||||||
@@ -150,7 +150,7 @@ func TestServerSpanDuration(t *testing.T) {
|
|||||||
trace.RegisterExporter(&te)
|
trace.RegisterExporter(&te)
|
||||||
defer trace.UnregisterExporter(&te)
|
defer trace.UnregisterExporter(&te)
|
||||||
|
|
||||||
trace.SetDefaultSampler(trace.AlwaysSample())
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
const sleep = 100 * time.Millisecond
|
const sleep = 100 * time.Millisecond
|
||||||
@@ -174,7 +174,7 @@ loop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSingleFail(t *testing.T) {
|
func TestSingleFail(t *testing.T) {
|
||||||
trace.SetDefaultSampler(trace.AlwaysSample())
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||||
te := testExporter{make(chan *trace.SpanData)}
|
te := testExporter{make(chan *trace.SpanData)}
|
||||||
trace.RegisterExporter(&te)
|
trace.RegisterExporter(&te)
|
||||||
defer trace.UnregisterExporter(&te)
|
defer trace.UnregisterExporter(&te)
|
||||||
|
|||||||
21
vendor/go.opencensus.io/plugin/ochttp/client_test.go
generated
vendored
21
vendor/go.opencensus.io/plugin/ochttp/client_test.go
generated
vendored
@@ -36,8 +36,8 @@ func TestClient(t *testing.T) {
|
|||||||
}))
|
}))
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
|
|
||||||
for _, v := range ochttp.DefaultClientViews {
|
if err := view.Register(ochttp.DefaultClientViews...); err != nil {
|
||||||
v.Subscribe()
|
t.Fatalf("Failed to register ochttp.DefaultClientViews error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
views := []string{
|
views := []string{
|
||||||
@@ -54,15 +54,14 @@ func TestClient(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var wg sync.WaitGroup
|
||||||
w sync.WaitGroup
|
var tr ochttp.Transport
|
||||||
tr ochttp.Transport
|
errs := make(chan error, reqCount)
|
||||||
errs = make(chan error, reqCount)
|
wg.Add(reqCount)
|
||||||
)
|
|
||||||
w.Add(reqCount)
|
|
||||||
for i := 0; i < reqCount; i++ {
|
for i := 0; i < reqCount; i++ {
|
||||||
go func() {
|
go func() {
|
||||||
defer w.Done()
|
defer wg.Done()
|
||||||
req, err := http.NewRequest("POST", server.URL, strings.NewReader("req-body"))
|
req, err := http.NewRequest("POST", server.URL, strings.NewReader("req-body"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs <- fmt.Errorf("error creating request: %v", err)
|
errs <- fmt.Errorf("error creating request: %v", err)
|
||||||
@@ -81,7 +80,7 @@ func TestClient(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
w.Wait()
|
wg.Wait()
|
||||||
close(errs)
|
close(errs)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -110,7 +109,7 @@ func TestClient(t *testing.T) {
|
|||||||
var count int64
|
var count int64
|
||||||
switch data := data.(type) {
|
switch data := data.(type) {
|
||||||
case *view.CountData:
|
case *view.CountData:
|
||||||
count = *(*int64)(data)
|
count = data.Value
|
||||||
case *view.DistributionData:
|
case *view.DistributionData:
|
||||||
count = data.Count
|
count = data.Count
|
||||||
default:
|
default:
|
||||||
|
|||||||
6
vendor/go.opencensus.io/plugin/ochttp/example_test.go
generated
vendored
6
vendor/go.opencensus.io/plugin/ochttp/example_test.go
generated
vendored
@@ -25,13 +25,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func ExampleTransport() {
|
func ExampleTransport() {
|
||||||
if err := view.Subscribe(
|
if err := view.Register(
|
||||||
// Subscribe to a few default views.
|
// Register to a few default views.
|
||||||
ochttp.ClientRequestCountByMethod,
|
ochttp.ClientRequestCountByMethod,
|
||||||
ochttp.ClientResponseCountByStatusCode,
|
ochttp.ClientResponseCountByStatusCode,
|
||||||
ochttp.ClientLatencyView,
|
ochttp.ClientLatencyView,
|
||||||
|
|
||||||
// Subscribe to a custom view.
|
// Register to a custom view.
|
||||||
&view.View{
|
&view.View{
|
||||||
Name: "httpclient_latency_by_hostpath",
|
Name: "httpclient_latency_by_hostpath",
|
||||||
TagKeys: []tag.Key{ochttp.Host, ochttp.Path},
|
TagKeys: []tag.Key{ochttp.Host, ochttp.Path},
|
||||||
|
|||||||
2
vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go
generated
vendored
2
vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go
generated
vendored
@@ -15,7 +15,7 @@
|
|||||||
// Package b3 contains a propagation.HTTPFormat implementation
|
// Package b3 contains a propagation.HTTPFormat implementation
|
||||||
// for B3 propagation. See https://github.com/openzipkin/b3-propagation
|
// for B3 propagation. See https://github.com/openzipkin/b3-propagation
|
||||||
// for more details.
|
// for more details.
|
||||||
package b3
|
package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
|||||||
2
vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go
generated
vendored
2
vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go
generated
vendored
@@ -14,7 +14,7 @@
|
|||||||
|
|
||||||
// Package tracecontext contains HTTP propagator for TraceContext standard.
|
// Package tracecontext contains HTTP propagator for TraceContext standard.
|
||||||
// See https://github.com/w3c/distributed-tracing for more information.
|
// See https://github.com/w3c/distributed-tracing for more information.
|
||||||
package tracecontext
|
package tracecontext // import "go.opencensus.io/plugin/ochttp/propagation/tracecontext"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
|||||||
2
vendor/go.opencensus.io/plugin/ochttp/propagation_test.go
generated
vendored
2
vendor/go.opencensus.io/plugin/ochttp/propagation_test.go
generated
vendored
@@ -36,7 +36,7 @@ func TestRoundTripAllFormats(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
trace.SetDefaultSampler(trace.AlwaysSample())
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||||
ctx, span := trace.StartSpan(ctx, "test")
|
ctx, span := trace.StartSpan(ctx, "test")
|
||||||
sc := span.SpanContext()
|
sc := span.SpanContext()
|
||||||
wantStr := fmt.Sprintf("trace_id=%x, span_id=%x, options=%d", sc.TraceID, sc.SpanID, sc.TraceOptions)
|
wantStr := fmt.Sprintf("trace_id=%x, span_id=%x, options=%d", sc.TraceID, sc.SpanID, sc.TraceOptions)
|
||||||
|
|||||||
39
vendor/go.opencensus.io/plugin/ochttp/server.go
generated
vendored
39
vendor/go.opencensus.io/plugin/ochttp/server.go
generated
vendored
@@ -15,7 +15,10 @@
|
|||||||
package ochttp
|
package ochttp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -65,7 +68,6 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||||||
defer traceEnd()
|
defer traceEnd()
|
||||||
w, statsEnd = h.startStats(w, r)
|
w, statsEnd = h.startStats(w, r)
|
||||||
defer statsEnd()
|
defer statsEnd()
|
||||||
|
|
||||||
handler := h.Handler
|
handler := h.Handler
|
||||||
if handler == nil {
|
if handler == nil {
|
||||||
handler = http.DefaultServeMux
|
handler = http.DefaultServeMux
|
||||||
@@ -74,20 +76,19 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) {
|
func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) {
|
||||||
opts := trace.StartOptions{
|
|
||||||
Sampler: h.StartOptions.Sampler,
|
|
||||||
SpanKind: trace.SpanKindServer,
|
|
||||||
}
|
|
||||||
|
|
||||||
name := spanNameFromURL(r.URL)
|
name := spanNameFromURL(r.URL)
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
var span *trace.Span
|
var span *trace.Span
|
||||||
sc, ok := h.extractSpanContext(r)
|
sc, ok := h.extractSpanContext(r)
|
||||||
if ok && !h.IsPublicEndpoint {
|
if ok && !h.IsPublicEndpoint {
|
||||||
span = trace.NewSpanWithRemoteParent(name, sc, opts)
|
ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc,
|
||||||
ctx = trace.WithSpan(ctx, span)
|
trace.WithSampler(h.StartOptions.Sampler),
|
||||||
|
trace.WithSpanKind(trace.SpanKindServer))
|
||||||
} else {
|
} else {
|
||||||
span = trace.NewSpan(name, nil, opts)
|
ctx, span = trace.StartSpan(ctx, name,
|
||||||
|
trace.WithSampler(h.StartOptions.Sampler),
|
||||||
|
trace.WithSpanKind(trace.SpanKindServer),
|
||||||
|
)
|
||||||
if ok {
|
if ok {
|
||||||
span.AddLink(trace.Link{
|
span.AddLink(trace.Link{
|
||||||
TraceID: sc.TraceID,
|
TraceID: sc.TraceID,
|
||||||
@@ -97,9 +98,8 @@ func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Requ
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ctx = trace.WithSpan(ctx, span)
|
|
||||||
span.AddAttributes(requestAttrs(r)...)
|
span.AddAttributes(requestAttrs(r)...)
|
||||||
return r.WithContext(trace.WithSpan(r.Context(), span)), span.End
|
return r.WithContext(ctx), span.End
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) {
|
func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) {
|
||||||
@@ -135,17 +135,33 @@ type trackingResponseWriter struct {
|
|||||||
respSize int64
|
respSize int64
|
||||||
start time.Time
|
start time.Time
|
||||||
statusCode int
|
statusCode int
|
||||||
|
statusLine string
|
||||||
endOnce sync.Once
|
endOnce sync.Once
|
||||||
writer http.ResponseWriter
|
writer http.ResponseWriter
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ http.ResponseWriter = (*trackingResponseWriter)(nil)
|
var _ http.ResponseWriter = (*trackingResponseWriter)(nil)
|
||||||
|
var _ http.Hijacker = (*trackingResponseWriter)(nil)
|
||||||
|
|
||||||
|
var errHijackerUnimplemented = errors.New("ResponseWriter does not implement http.Hijacker")
|
||||||
|
|
||||||
|
func (t *trackingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||||
|
hj, ok := t.writer.(http.Hijacker)
|
||||||
|
if !ok {
|
||||||
|
return nil, nil, errHijackerUnimplemented
|
||||||
|
}
|
||||||
|
return hj.Hijack()
|
||||||
|
}
|
||||||
|
|
||||||
func (t *trackingResponseWriter) end() {
|
func (t *trackingResponseWriter) end() {
|
||||||
t.endOnce.Do(func() {
|
t.endOnce.Do(func() {
|
||||||
if t.statusCode == 0 {
|
if t.statusCode == 0 {
|
||||||
t.statusCode = 200
|
t.statusCode = 200
|
||||||
}
|
}
|
||||||
|
|
||||||
|
span := trace.FromContext(t.ctx)
|
||||||
|
span.SetStatus(TraceStatus(t.statusCode, t.statusLine))
|
||||||
|
|
||||||
m := []stats.Measurement{
|
m := []stats.Measurement{
|
||||||
ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)),
|
ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)),
|
||||||
ServerResponseBytes.M(t.respSize),
|
ServerResponseBytes.M(t.respSize),
|
||||||
@@ -171,6 +187,7 @@ func (t *trackingResponseWriter) Write(data []byte) (int, error) {
|
|||||||
func (t *trackingResponseWriter) WriteHeader(statusCode int) {
|
func (t *trackingResponseWriter) WriteHeader(statusCode int) {
|
||||||
t.writer.WriteHeader(statusCode)
|
t.writer.WriteHeader(statusCode)
|
||||||
t.statusCode = statusCode
|
t.statusCode = statusCode
|
||||||
|
t.statusLine = http.StatusText(t.statusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *trackingResponseWriter) Flush() {
|
func (t *trackingResponseWriter) Flush() {
|
||||||
|
|||||||
241
vendor/go.opencensus.io/plugin/ochttp/server_test.go
generated
vendored
241
vendor/go.opencensus.io/plugin/ochttp/server_test.go
generated
vendored
@@ -1,11 +1,22 @@
|
|||||||
package ochttp
|
package ochttp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
|
||||||
"go.opencensus.io/stats/view"
|
"go.opencensus.io/stats/view"
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
)
|
)
|
||||||
@@ -26,8 +37,8 @@ func updateMean(mean float64, sample, count int) float64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestHandlerStatsCollection(t *testing.T) {
|
func TestHandlerStatsCollection(t *testing.T) {
|
||||||
for _, v := range DefaultServerViews {
|
if err := view.Register(DefaultServerViews...); err != nil {
|
||||||
v.Subscribe()
|
t.Fatalf("Failed to register ochttp.DefaultServerViews error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
views := []string{
|
views := []string{
|
||||||
@@ -90,7 +101,7 @@ func TestHandlerStatsCollection(t *testing.T) {
|
|||||||
var sum float64
|
var sum float64
|
||||||
switch data := data.(type) {
|
switch data := data.(type) {
|
||||||
case *view.CountData:
|
case *view.CountData:
|
||||||
count = int(*data)
|
count = int(data.Value)
|
||||||
case *view.DistributionData:
|
case *view.DistributionData:
|
||||||
count = int(data.Count)
|
count = int(data.Count)
|
||||||
sum = data.Sum()
|
sum = data.Sum()
|
||||||
@@ -116,3 +127,227 @@ func TestHandlerStatsCollection(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type testResponseWriterHijacker struct {
|
||||||
|
httptest.ResponseRecorder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (trw *testResponseWriterHijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||||
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnitTestHandlerProxiesHijack(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
w http.ResponseWriter
|
||||||
|
wantErr string
|
||||||
|
}{
|
||||||
|
{httptest.NewRecorder(), "ResponseWriter does not implement http.Hijacker"},
|
||||||
|
{nil, "ResponseWriter does not implement http.Hijacker"},
|
||||||
|
{new(testResponseWriterHijacker), ""},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tt := range tests {
|
||||||
|
tw := &trackingResponseWriter{writer: tt.w}
|
||||||
|
conn, buf, err := tw.Hijack()
|
||||||
|
if tt.wantErr != "" {
|
||||||
|
if err == nil || !strings.Contains(err.Error(), tt.wantErr) {
|
||||||
|
t.Errorf("#%d got error (%v) want error substring (%q)", i, err, tt.wantErr)
|
||||||
|
}
|
||||||
|
if conn != nil {
|
||||||
|
t.Errorf("#%d inconsistent state got non-nil conn (%v)", i, conn)
|
||||||
|
}
|
||||||
|
if buf != nil {
|
||||||
|
t.Errorf("#%d inconsistent state got non-nil buf (%v)", i, buf)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("#%d got unexpected error %v", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Integration test with net/http to ensure that our Handler proxies to its
|
||||||
|
// response the call to (http.Hijack).Hijacker() and that that successfully
|
||||||
|
// passes with HTTP/1.1 connections. See Issue #642
|
||||||
|
func TestHandlerProxiesHijack_HTTP1(t *testing.T) {
|
||||||
|
cst := httptest.NewServer(&Handler{
|
||||||
|
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var writeMsg func(string)
|
||||||
|
defer func() {
|
||||||
|
err := recover()
|
||||||
|
writeMsg(fmt.Sprintf("Proto=%s\npanic=%v", r.Proto, err != nil))
|
||||||
|
}()
|
||||||
|
conn, _, _ := w.(http.Hijacker).Hijack()
|
||||||
|
writeMsg = func(msg string) {
|
||||||
|
fmt.Fprintf(conn, "%s 200\nContentLength: %d", r.Proto, len(msg))
|
||||||
|
fmt.Fprintf(conn, "\r\n\r\n%s", msg)
|
||||||
|
conn.Close()
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
defer cst.Close()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
tr *http.Transport
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "http1-transport",
|
||||||
|
tr: new(http.Transport),
|
||||||
|
want: "Proto=HTTP/1.1\npanic=false",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "http2-transport",
|
||||||
|
tr: func() *http.Transport {
|
||||||
|
tr := new(http.Transport)
|
||||||
|
http2.ConfigureTransport(tr)
|
||||||
|
return tr
|
||||||
|
}(),
|
||||||
|
want: "Proto=HTTP/1.1\npanic=false",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
c := &http.Client{Transport: &Transport{Base: tc.tr}}
|
||||||
|
res, err := c.Get(cst.URL)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("(%s) unexpected error %v", tc.name, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
blob, _ := ioutil.ReadAll(res.Body)
|
||||||
|
res.Body.Close()
|
||||||
|
if g, w := string(blob), tc.want; g != w {
|
||||||
|
t.Errorf("(%s) got = %q; want = %q", tc.name, g, w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Integration test with net/http, x/net/http2 to ensure that our Handler proxies
|
||||||
|
// to its response the call to (http.Hijack).Hijacker() and that that crashes
|
||||||
|
// since http.Hijacker and HTTP/2.0 connections are incompatible, but the
|
||||||
|
// detection is only at runtime and ensure that we can stream and flush to the
|
||||||
|
// connection even after invoking Hijack(). See Issue #642.
|
||||||
|
func TestHandlerProxiesHijack_HTTP2(t *testing.T) {
|
||||||
|
cst := httptest.NewUnstartedServer(&Handler{
|
||||||
|
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
conn, _, err := w.(http.Hijacker).Hijack()
|
||||||
|
if conn != nil {
|
||||||
|
data := fmt.Sprintf("Surprisingly got the Hijacker() Proto: %s", r.Proto)
|
||||||
|
fmt.Fprintf(conn, "%s 200\nContent-Length:%d\r\n\r\n%s", r.Proto, len(data), data)
|
||||||
|
conn.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
fmt.Fprintf(w, "Unexpectedly did not encounter an error!")
|
||||||
|
default:
|
||||||
|
fmt.Fprintf(w, "Unexpected error: %v", err)
|
||||||
|
case strings.Contains(err.(error).Error(), "Hijack"):
|
||||||
|
// Confirmed HTTP/2.0, let's stream to it
|
||||||
|
for i := 0; i < 5; i++ {
|
||||||
|
fmt.Fprintf(w, "%d\n", i)
|
||||||
|
w.(http.Flusher).Flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
cst.TLS = &tls.Config{NextProtos: []string{"h2"}}
|
||||||
|
cst.StartTLS()
|
||||||
|
defer cst.Close()
|
||||||
|
|
||||||
|
if wantPrefix := "https://"; !strings.HasPrefix(cst.URL, wantPrefix) {
|
||||||
|
t.Fatalf("URL got = %q wantPrefix = %q", cst.URL, wantPrefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
tr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}
|
||||||
|
http2.ConfigureTransport(tr)
|
||||||
|
c := &http.Client{Transport: tr}
|
||||||
|
res, err := c.Get(cst.URL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unexpected error %v", err)
|
||||||
|
}
|
||||||
|
blob, _ := ioutil.ReadAll(res.Body)
|
||||||
|
res.Body.Close()
|
||||||
|
if g, w := string(blob), "0\n1\n2\n3\n4\n"; g != w {
|
||||||
|
t.Errorf("got = %q; want = %q", g, w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnsureTrackingResponseWriterSetsStatusCode(t *testing.T) {
|
||||||
|
// Ensure that the trackingResponseWriter always sets the spanStatus on ending the span.
|
||||||
|
// Because we can only examine the Status after exporting, this test roundtrips a
|
||||||
|
// couple of requests and then later examines the exported spans.
|
||||||
|
// See Issue #700.
|
||||||
|
exporter := &spanExporter{cur: make(chan *trace.SpanData, 1)}
|
||||||
|
trace.RegisterExporter(exporter)
|
||||||
|
defer trace.UnregisterExporter(exporter)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
res *http.Response
|
||||||
|
want trace.Status
|
||||||
|
}{
|
||||||
|
{res: &http.Response{StatusCode: 200}, want: trace.Status{Code: trace.StatusCodeOK, Message: `"OK"`}},
|
||||||
|
{res: &http.Response{StatusCode: 500}, want: trace.Status{Code: trace.StatusCodeUnknown, Message: `"UNKNOWN"`}},
|
||||||
|
{res: &http.Response{StatusCode: 403}, want: trace.Status{Code: trace.StatusCodePermissionDenied, Message: `"PERMISSION_DENIED"`}},
|
||||||
|
{res: &http.Response{StatusCode: 401}, want: trace.Status{Code: trace.StatusCodeUnauthenticated, Message: `"UNAUTHENTICATED"`}},
|
||||||
|
{res: &http.Response{StatusCode: 429}, want: trace.Status{Code: trace.StatusCodeResourceExhausted, Message: `"RESOURCE_EXHAUSTED"`}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.want.Message, func(t *testing.T) {
|
||||||
|
span := trace.NewSpan("testing", nil, trace.StartOptions{Sampler: trace.AlwaysSample()})
|
||||||
|
ctx := trace.WithSpan(context.Background(), span)
|
||||||
|
prc, pwc := io.Pipe()
|
||||||
|
go func() {
|
||||||
|
pwc.Write([]byte("Foo"))
|
||||||
|
pwc.Close()
|
||||||
|
}()
|
||||||
|
inRes := tt.res
|
||||||
|
inRes.Body = prc
|
||||||
|
tr := &traceTransport{base: &testResponseTransport{res: inRes}}
|
||||||
|
req, err := http.NewRequest("POST", "https://example.org", bytes.NewReader([]byte("testing")))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewRequest error: %v", err)
|
||||||
|
}
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
res, err := tr.RoundTrip(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("RoundTrip error: %v", err)
|
||||||
|
}
|
||||||
|
_, _ = ioutil.ReadAll(res.Body)
|
||||||
|
res.Body.Close()
|
||||||
|
|
||||||
|
cur := <-exporter.cur
|
||||||
|
if got, want := cur.Status, tt.want; got != want {
|
||||||
|
t.Fatalf("SpanData:\ngot = (%#v)\nwant = (%#v)", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type spanExporter struct {
|
||||||
|
sync.Mutex
|
||||||
|
cur chan *trace.SpanData
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ trace.Exporter = (*spanExporter)(nil)
|
||||||
|
|
||||||
|
func (se *spanExporter) ExportSpan(sd *trace.SpanData) {
|
||||||
|
se.Lock()
|
||||||
|
se.cur <- sd
|
||||||
|
se.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
type testResponseTransport struct {
|
||||||
|
res *http.Response
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ http.RoundTripper = (*testResponseTransport)(nil)
|
||||||
|
|
||||||
|
func (rb *testResponseTransport) RoundTrip(*http.Request) (*http.Response, error) {
|
||||||
|
return rb.res, nil
|
||||||
|
}
|
||||||
|
|||||||
16
vendor/go.opencensus.io/plugin/ochttp/stats.go
generated
vendored
16
vendor/go.opencensus.io/plugin/ochttp/stats.go
generated
vendored
@@ -22,18 +22,18 @@ import (
|
|||||||
|
|
||||||
// The following client HTTP measures are supported for use in custom views.
|
// The following client HTTP measures are supported for use in custom views.
|
||||||
var (
|
var (
|
||||||
ClientRequestCount, _ = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitNone)
|
ClientRequestCount = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitDimensionless)
|
||||||
ClientRequestBytes, _ = stats.Int64("opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes)
|
ClientRequestBytes = stats.Int64("opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes)
|
||||||
ClientResponseBytes, _ = stats.Int64("opencensus.io/http/client/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes)
|
ClientResponseBytes = stats.Int64("opencensus.io/http/client/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes)
|
||||||
ClientLatency, _ = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds)
|
ClientLatency = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds)
|
||||||
)
|
)
|
||||||
|
|
||||||
// The following server HTTP measures are supported for use in custom views:
|
// The following server HTTP measures are supported for use in custom views:
|
||||||
var (
|
var (
|
||||||
ServerRequestCount, _ = stats.Int64("opencensus.io/http/server/request_count", "Number of HTTP requests started", stats.UnitNone)
|
ServerRequestCount = stats.Int64("opencensus.io/http/server/request_count", "Number of HTTP requests started", stats.UnitDimensionless)
|
||||||
ServerRequestBytes, _ = stats.Int64("opencensus.io/http/server/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes)
|
ServerRequestBytes = stats.Int64("opencensus.io/http/server/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes)
|
||||||
ServerResponseBytes, _ = stats.Int64("opencensus.io/http/server/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes)
|
ServerResponseBytes = stats.Int64("opencensus.io/http/server/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes)
|
||||||
ServerLatency, _ = stats.Float64("opencensus.io/http/server/latency", "End-to-end latency", stats.UnitMilliseconds)
|
ServerLatency = stats.Float64("opencensus.io/http/server/latency", "End-to-end latency", stats.UnitMilliseconds)
|
||||||
)
|
)
|
||||||
|
|
||||||
// The following tags are applied to stats recorded by this package. Host, Path
|
// The following tags are applied to stats recorded by this package. Host, Path
|
||||||
|
|||||||
96
vendor/go.opencensus.io/plugin/ochttp/trace.go
generated
vendored
96
vendor/go.opencensus.io/plugin/ochttp/trace.go
generated
vendored
@@ -53,10 +53,11 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
|||||||
name := spanNameFromURL(req.URL)
|
name := spanNameFromURL(req.URL)
|
||||||
// TODO(jbd): Discuss whether we want to prefix
|
// TODO(jbd): Discuss whether we want to prefix
|
||||||
// outgoing requests with Sent.
|
// outgoing requests with Sent.
|
||||||
parent := trace.FromContext(req.Context())
|
_, span := trace.StartSpan(req.Context(), name,
|
||||||
span := trace.NewSpan(name, parent, t.startOptions)
|
trace.WithSampler(t.startOptions.Sampler),
|
||||||
req = req.WithContext(trace.WithSpan(req.Context(), span))
|
trace.WithSpanKind(trace.SpanKindClient))
|
||||||
|
|
||||||
|
req = req.WithContext(trace.WithSpan(req.Context(), span))
|
||||||
if t.format != nil {
|
if t.format != nil {
|
||||||
t.format.SpanContextToRequest(span.SpanContext(), req)
|
t.format.SpanContextToRequest(span.SpanContext(), req)
|
||||||
}
|
}
|
||||||
@@ -64,13 +65,13 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
|||||||
span.AddAttributes(requestAttrs(req)...)
|
span.AddAttributes(requestAttrs(req)...)
|
||||||
resp, err := t.base.RoundTrip(req)
|
resp, err := t.base.RoundTrip(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
|
span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
|
||||||
span.End()
|
span.End()
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
span.AddAttributes(responseAttrs(resp)...)
|
span.AddAttributes(responseAttrs(resp)...)
|
||||||
span.SetStatus(status(resp.StatusCode))
|
span.SetStatus(TraceStatus(resp.StatusCode, resp.Status))
|
||||||
|
|
||||||
// span.End() will be invoked after
|
// span.End() will be invoked after
|
||||||
// a read from resp.Body returns io.EOF or when
|
// a read from resp.Body returns io.EOF or when
|
||||||
@@ -145,71 +146,54 @@ func responseAttrs(resp *http.Response) []trace.Attribute {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func status(statusCode int) trace.Status {
|
// HTTPStatusToTraceStatus converts the HTTP status code to a trace.Status that
|
||||||
|
// represents the outcome as closely as possible.
|
||||||
|
func TraceStatus(httpStatusCode int, statusLine string) trace.Status {
|
||||||
var code int32
|
var code int32
|
||||||
if statusCode < 200 || statusCode >= 400 {
|
if httpStatusCode < 200 || httpStatusCode >= 400 {
|
||||||
code = codeUnknown
|
code = trace.StatusCodeUnknown
|
||||||
}
|
}
|
||||||
switch statusCode {
|
switch httpStatusCode {
|
||||||
case 499:
|
case 499:
|
||||||
code = codeCancelled
|
code = trace.StatusCodeCancelled
|
||||||
case http.StatusBadRequest:
|
case http.StatusBadRequest:
|
||||||
code = codeInvalidArgument
|
code = trace.StatusCodeInvalidArgument
|
||||||
case http.StatusGatewayTimeout:
|
case http.StatusGatewayTimeout:
|
||||||
code = codeDeadlineExceeded
|
code = trace.StatusCodeDeadlineExceeded
|
||||||
case http.StatusNotFound:
|
case http.StatusNotFound:
|
||||||
code = codeNotFound
|
code = trace.StatusCodeNotFound
|
||||||
case http.StatusForbidden:
|
case http.StatusForbidden:
|
||||||
code = codePermissionDenied
|
code = trace.StatusCodePermissionDenied
|
||||||
case http.StatusUnauthorized: // 401 is actually unauthenticated.
|
case http.StatusUnauthorized: // 401 is actually unauthenticated.
|
||||||
code = codeUnathenticated
|
code = trace.StatusCodeUnauthenticated
|
||||||
case http.StatusTooManyRequests:
|
case http.StatusTooManyRequests:
|
||||||
code = codeResourceExhausted
|
code = trace.StatusCodeResourceExhausted
|
||||||
case http.StatusNotImplemented:
|
case http.StatusNotImplemented:
|
||||||
code = codeUnimplemented
|
code = trace.StatusCodeUnimplemented
|
||||||
case http.StatusServiceUnavailable:
|
case http.StatusServiceUnavailable:
|
||||||
code = codeUnavailable
|
code = trace.StatusCodeUnavailable
|
||||||
|
case http.StatusOK:
|
||||||
|
code = trace.StatusCodeOK
|
||||||
}
|
}
|
||||||
return trace.Status{Code: code, Message: codeToStr[code]}
|
return trace.Status{Code: code, Message: codeToStr[code]}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(jbd): Provide status codes from trace package.
|
|
||||||
const (
|
|
||||||
codeOK = 0
|
|
||||||
codeCancelled = 1
|
|
||||||
codeUnknown = 2
|
|
||||||
codeInvalidArgument = 3
|
|
||||||
codeDeadlineExceeded = 4
|
|
||||||
codeNotFound = 5
|
|
||||||
codeAlreadyExists = 6
|
|
||||||
codePermissionDenied = 7
|
|
||||||
codeResourceExhausted = 8
|
|
||||||
codeFailedPrecondition = 9
|
|
||||||
codeAborted = 10
|
|
||||||
codeOutOfRange = 11
|
|
||||||
codeUnimplemented = 12
|
|
||||||
codeInternal = 13
|
|
||||||
codeUnavailable = 14
|
|
||||||
codeDataLoss = 15
|
|
||||||
codeUnathenticated = 16
|
|
||||||
)
|
|
||||||
|
|
||||||
var codeToStr = map[int32]string{
|
var codeToStr = map[int32]string{
|
||||||
codeOK: `"OK"`,
|
trace.StatusCodeOK: `"OK"`,
|
||||||
codeCancelled: `"CANCELLED"`,
|
trace.StatusCodeCancelled: `"CANCELLED"`,
|
||||||
codeUnknown: `"UNKNOWN"`,
|
trace.StatusCodeUnknown: `"UNKNOWN"`,
|
||||||
codeInvalidArgument: `"INVALID_ARGUMENT"`,
|
trace.StatusCodeInvalidArgument: `"INVALID_ARGUMENT"`,
|
||||||
codeDeadlineExceeded: `"DEADLINE_EXCEEDED"`,
|
trace.StatusCodeDeadlineExceeded: `"DEADLINE_EXCEEDED"`,
|
||||||
codeNotFound: `"NOT_FOUND"`,
|
trace.StatusCodeNotFound: `"NOT_FOUND"`,
|
||||||
codeAlreadyExists: `"ALREADY_EXISTS"`,
|
trace.StatusCodeAlreadyExists: `"ALREADY_EXISTS"`,
|
||||||
codePermissionDenied: `"PERMISSION_DENIED"`,
|
trace.StatusCodePermissionDenied: `"PERMISSION_DENIED"`,
|
||||||
codeResourceExhausted: `"RESOURCE_EXHAUSTED"`,
|
trace.StatusCodeResourceExhausted: `"RESOURCE_EXHAUSTED"`,
|
||||||
codeFailedPrecondition: `"FAILED_PRECONDITION"`,
|
trace.StatusCodeFailedPrecondition: `"FAILED_PRECONDITION"`,
|
||||||
codeAborted: `"ABORTED"`,
|
trace.StatusCodeAborted: `"ABORTED"`,
|
||||||
codeOutOfRange: `"OUT_OF_RANGE"`,
|
trace.StatusCodeOutOfRange: `"OUT_OF_RANGE"`,
|
||||||
codeUnimplemented: `"UNIMPLEMENTED"`,
|
trace.StatusCodeUnimplemented: `"UNIMPLEMENTED"`,
|
||||||
codeInternal: `"INTERNAL"`,
|
trace.StatusCodeInternal: `"INTERNAL"`,
|
||||||
codeUnavailable: `"UNAVAILABLE"`,
|
trace.StatusCodeUnavailable: `"UNAVAILABLE"`,
|
||||||
codeDataLoss: `"DATA_LOSS"`,
|
trace.StatusCodeDataLoss: `"DATA_LOSS"`,
|
||||||
codeUnathenticated: `"UNAUTHENTICATED"`,
|
trace.StatusCodeUnauthenticated: `"UNAUTHENTICATED"`,
|
||||||
}
|
}
|
||||||
|
|||||||
37
vendor/go.opencensus.io/plugin/ochttp/trace_test.go
generated
vendored
37
vendor/go.opencensus.io/plugin/ochttp/trace_test.go
generated
vendored
@@ -73,7 +73,8 @@ func (t testPropagator) SpanContextToRequest(sc trace.SpanContext, req *http.Req
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTransport_RoundTrip(t *testing.T) {
|
func TestTransport_RoundTrip(t *testing.T) {
|
||||||
parent := trace.NewSpan("parent", nil, trace.StartOptions{})
|
ctx := context.Background()
|
||||||
|
ctx, parent := trace.StartSpan(ctx, "parent")
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
parent *trace.Span
|
parent *trace.Span
|
||||||
@@ -172,7 +173,7 @@ func (c *collector) ExportSpan(s *trace.SpanData) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestEndToEnd(t *testing.T) {
|
func TestEndToEnd(t *testing.T) {
|
||||||
trace.SetDefaultSampler(trace.AlwaysSample())
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||||
|
|
||||||
tc := []struct {
|
tc := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -221,12 +222,9 @@ func TestEndToEnd(t *testing.T) {
|
|||||||
url := serveHTTP(tt.handler, serverDone, serverReturn)
|
url := serveHTTP(tt.handler, serverDone, serverReturn)
|
||||||
|
|
||||||
// Start a root Span in the client.
|
// Start a root Span in the client.
|
||||||
root := trace.NewSpan(
|
ctx, root := trace.StartSpan(
|
||||||
"top-level",
|
context.Background(),
|
||||||
nil,
|
"top-level")
|
||||||
trace.StartOptions{})
|
|
||||||
ctx := trace.WithSpan(context.Background(), root)
|
|
||||||
|
|
||||||
// Make the request.
|
// Make the request.
|
||||||
req, err := http.NewRequest(
|
req, err := http.NewRequest(
|
||||||
http.MethodPost,
|
http.MethodPost,
|
||||||
@@ -278,7 +276,7 @@ func TestEndToEnd(t *testing.T) {
|
|||||||
t.Errorf("Span name: %q; want %q", got, want)
|
t.Errorf("Span name: %q; want %q", got, want)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
t.Fatalf("server or client span missing")
|
t.Fatalf("server or client span missing; kind = %v", sp.SpanKind)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -439,19 +437,20 @@ func TestStatusUnitTest(t *testing.T) {
|
|||||||
in int
|
in int
|
||||||
want trace.Status
|
want trace.Status
|
||||||
}{
|
}{
|
||||||
{200, trace.Status{Code: 0, Message: `"OK"`}},
|
{200, trace.Status{Code: trace.StatusCodeOK, Message: `"OK"`}},
|
||||||
{100, trace.Status{Code: 2, Message: `"UNKNOWN"`}},
|
{204, trace.Status{Code: trace.StatusCodeOK, Message: `"OK"`}},
|
||||||
{500, trace.Status{Code: 2, Message: `"UNKNOWN"`}},
|
{100, trace.Status{Code: trace.StatusCodeUnknown, Message: `"UNKNOWN"`}},
|
||||||
{404, trace.Status{Code: 5, Message: `"NOT_FOUND"`}},
|
{500, trace.Status{Code: trace.StatusCodeUnknown, Message: `"UNKNOWN"`}},
|
||||||
{600, trace.Status{Code: 2, Message: `"UNKNOWN"`}},
|
{404, trace.Status{Code: trace.StatusCodeNotFound, Message: `"NOT_FOUND"`}},
|
||||||
{401, trace.Status{Code: 16, Message: `"UNAUTHENTICATED"`}},
|
{600, trace.Status{Code: trace.StatusCodeUnknown, Message: `"UNKNOWN"`}},
|
||||||
{403, trace.Status{Code: 7, Message: `"PERMISSION_DENIED"`}},
|
{401, trace.Status{Code: trace.StatusCodeUnauthenticated, Message: `"UNAUTHENTICATED"`}},
|
||||||
{301, trace.Status{Code: 0, Message: `"OK"`}},
|
{403, trace.Status{Code: trace.StatusCodePermissionDenied, Message: `"PERMISSION_DENIED"`}},
|
||||||
{501, trace.Status{Code: 12, Message: `"UNIMPLEMENTED"`}},
|
{301, trace.Status{Code: trace.StatusCodeOK, Message: `"OK"`}},
|
||||||
|
{501, trace.Status{Code: trace.StatusCodeUnimplemented, Message: `"UNIMPLEMENTED"`}},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
got, want := status(tt.in), tt.want
|
got, want := TraceStatus(tt.in, ""), tt.want
|
||||||
if got != want {
|
if got != want {
|
||||||
t.Errorf("status(%d) got = (%#v) want = (%#v)", tt.in, got, want)
|
t.Errorf("status(%d) got = (%#v) want = (%#v)", tt.in, got, want)
|
||||||
}
|
}
|
||||||
|
|||||||
7
vendor/go.opencensus.io/stats/benchmark_test.go
generated
vendored
7
vendor/go.opencensus.io/stats/benchmark_test.go
generated
vendored
@@ -16,7 +16,6 @@ package stats_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"log"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"go.opencensus.io/stats"
|
"go.opencensus.io/stats"
|
||||||
@@ -93,9 +92,5 @@ func BenchmarkRecord8_8Tags(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func makeMeasure() *stats.Int64Measure {
|
func makeMeasure() *stats.Int64Measure {
|
||||||
m, err := stats.Int64("m", "test measure", "")
|
return stats.Int64("m", "test measure", "")
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
}
|
||||||
|
|||||||
10
vendor/go.opencensus.io/stats/example_test.go
generated
vendored
10
vendor/go.opencensus.io/stats/example_test.go
generated
vendored
@@ -16,16 +16,16 @@ package stats_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"log"
|
|
||||||
|
|
||||||
"go.opencensus.io/stats"
|
"go.opencensus.io/stats"
|
||||||
)
|
)
|
||||||
|
|
||||||
func ExampleRecord() {
|
func ExampleRecord() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
openConns, err := stats.Int64("my.org/measure/openconns", "open connections", stats.UnitNone)
|
|
||||||
if err != nil {
|
// Measures are usually declared as package-private global variables.
|
||||||
log.Fatal(err)
|
openConns := stats.Int64("my.org/measure/openconns", "open connections", stats.UnitDimensionless)
|
||||||
}
|
|
||||||
|
// Instrumented packages call stats.Record() to record measuremens.
|
||||||
stats.Record(ctx, openConns.M(124)) // Record 124 open connections.
|
stats.Record(ctx, openConns.M(124)) // Record 124 open connections.
|
||||||
}
|
}
|
||||||
|
|||||||
2
vendor/go.opencensus.io/stats/internal/validation.go
generated
vendored
2
vendor/go.opencensus.io/stats/internal/validation.go
generated
vendored
@@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package internal
|
package internal // import "go.opencensus.io/stats/internal"
|
||||||
|
|
||||||
const (
|
const (
|
||||||
MaxNameLength = 255
|
MaxNameLength = 255
|
||||||
|
|||||||
74
vendor/go.opencensus.io/stats/measure.go
generated
vendored
74
vendor/go.opencensus.io/stats/measure.go
generated
vendored
@@ -16,12 +16,8 @@
|
|||||||
package stats
|
package stats
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"go.opencensus.io/stats/internal"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Measure represents a type of metric to be tracked and recorded.
|
// Measure represents a type of metric to be tracked and recorded.
|
||||||
@@ -38,12 +34,13 @@ type Measure interface {
|
|||||||
Name() string
|
Name() string
|
||||||
Description() string
|
Description() string
|
||||||
Unit() string
|
Unit() string
|
||||||
|
|
||||||
subscribe()
|
|
||||||
subscribed() bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type measure struct {
|
// measureDescriptor is the untyped descriptor associated with each measure.
|
||||||
|
// Int64Measure and Float64Measure wrap measureDescriptor to provide typed
|
||||||
|
// recording APIs.
|
||||||
|
// Two Measures with the same name will have the same measureDescriptor.
|
||||||
|
type measureDescriptor struct {
|
||||||
subs int32 // access atomically
|
subs int32 // access atomically
|
||||||
|
|
||||||
name string
|
name string
|
||||||
@@ -51,56 +48,33 @@ type measure struct {
|
|||||||
unit string
|
unit string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *measure) subscribe() {
|
func (m *measureDescriptor) subscribe() {
|
||||||
atomic.StoreInt32(&m.subs, 1)
|
atomic.StoreInt32(&m.subs, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *measure) subscribed() bool {
|
func (m *measureDescriptor) subscribed() bool {
|
||||||
return atomic.LoadInt32(&m.subs) == 1
|
return atomic.LoadInt32(&m.subs) == 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name returns the name of the measure.
|
|
||||||
func (m *measure) Name() string {
|
|
||||||
return m.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Description returns the description of the measure.
|
|
||||||
func (m *measure) Description() string {
|
|
||||||
return m.description
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unit returns the unit of the measure.
|
|
||||||
func (m *measure) Unit() string {
|
|
||||||
return m.unit
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
measures = make(map[string]Measure)
|
measures = make(map[string]*measureDescriptor)
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
func registerMeasureHandle(name, desc, unit string) *measureDescriptor {
|
||||||
errDuplicate = errors.New("duplicate measure name")
|
|
||||||
errMeasureNameTooLong = fmt.Errorf("measure name cannot be longer than %v", internal.MaxNameLength)
|
|
||||||
)
|
|
||||||
|
|
||||||
// FindMeasure finds the Measure instance, if any, associated with the given name.
|
|
||||||
func FindMeasure(name string) Measure {
|
|
||||||
mu.RLock()
|
|
||||||
m := measures[name]
|
|
||||||
mu.RUnlock()
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
func register(m Measure) (Measure, error) {
|
|
||||||
key := m.Name()
|
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
defer mu.Unlock()
|
defer mu.Unlock()
|
||||||
if stored, ok := measures[key]; ok {
|
|
||||||
return stored, errDuplicate
|
if stored, ok := measures[name]; ok {
|
||||||
|
return stored
|
||||||
}
|
}
|
||||||
measures[key] = m
|
m := &measureDescriptor{
|
||||||
return m, nil
|
name: name,
|
||||||
|
description: desc,
|
||||||
|
unit: unit,
|
||||||
|
}
|
||||||
|
measures[name] = m
|
||||||
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
// Measurement is the numeric value measured when recording stats. Each measure
|
// Measurement is the numeric value measured when recording stats. Each measure
|
||||||
@@ -120,13 +94,3 @@ func (m Measurement) Value() float64 {
|
|||||||
func (m Measurement) Measure() Measure {
|
func (m Measurement) Measure() Measure {
|
||||||
return m.m
|
return m.m
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkName(name string) error {
|
|
||||||
if len(name) > internal.MaxNameLength {
|
|
||||||
return errMeasureNameTooLong
|
|
||||||
}
|
|
||||||
if !internal.IsPrintable(name) {
|
|
||||||
return errors.New("measure name needs to be an ASCII string")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|||||||
41
vendor/go.opencensus.io/stats/measure_float64.go
generated
vendored
41
vendor/go.opencensus.io/stats/measure_float64.go
generated
vendored
@@ -17,41 +17,36 @@ package stats
|
|||||||
|
|
||||||
// Float64Measure is a measure of type float64.
|
// Float64Measure is a measure of type float64.
|
||||||
type Float64Measure struct {
|
type Float64Measure struct {
|
||||||
measure
|
md *measureDescriptor
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Float64Measure) subscribe() {
|
// Name returns the name of the measure.
|
||||||
m.measure.subscribe()
|
func (m *Float64Measure) Name() string {
|
||||||
|
return m.md.name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Float64Measure) subscribed() bool {
|
// Description returns the description of the measure.
|
||||||
return m.measure.subscribed()
|
func (m *Float64Measure) Description() string {
|
||||||
|
return m.md.description
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unit returns the unit of the measure.
|
||||||
|
func (m *Float64Measure) Unit() string {
|
||||||
|
return m.md.unit
|
||||||
}
|
}
|
||||||
|
|
||||||
// M creates a new float64 measurement.
|
// M creates a new float64 measurement.
|
||||||
// Use Record to record measurements.
|
// Use Record to record measurements.
|
||||||
func (m *Float64Measure) M(v float64) Measurement {
|
func (m *Float64Measure) M(v float64) Measurement {
|
||||||
if !m.subscribed() {
|
if !m.md.subscribed() {
|
||||||
return Measurement{}
|
return Measurement{}
|
||||||
}
|
}
|
||||||
return Measurement{m: m, v: v}
|
return Measurement{m: m, v: v}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Float64 creates a new measure of type Float64Measure. It returns
|
// Float64 creates a new measure of type Float64Measure.
|
||||||
// an error if a measure with the same name already exists.
|
// It never returns an error.
|
||||||
func Float64(name, description, unit string) (*Float64Measure, error) {
|
func Float64(name, description, unit string) *Float64Measure {
|
||||||
if err := checkName(name); err != nil {
|
mi := registerMeasureHandle(name, description, unit)
|
||||||
return nil, err
|
return &Float64Measure{mi}
|
||||||
}
|
|
||||||
m := &Float64Measure{
|
|
||||||
measure: measure{
|
|
||||||
name: name,
|
|
||||||
description: description,
|
|
||||||
unit: unit,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if _, err := register(m); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
41
vendor/go.opencensus.io/stats/measure_int64.go
generated
vendored
41
vendor/go.opencensus.io/stats/measure_int64.go
generated
vendored
@@ -17,41 +17,36 @@ package stats
|
|||||||
|
|
||||||
// Int64Measure is a measure of type int64.
|
// Int64Measure is a measure of type int64.
|
||||||
type Int64Measure struct {
|
type Int64Measure struct {
|
||||||
measure
|
md *measureDescriptor
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Int64Measure) subscribe() {
|
// Name returns the name of the measure.
|
||||||
m.measure.subscribe()
|
func (m *Int64Measure) Name() string {
|
||||||
|
return m.md.name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Int64Measure) subscribed() bool {
|
// Description returns the description of the measure.
|
||||||
return m.measure.subscribed()
|
func (m *Int64Measure) Description() string {
|
||||||
|
return m.md.description
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unit returns the unit of the measure.
|
||||||
|
func (m *Int64Measure) Unit() string {
|
||||||
|
return m.md.unit
|
||||||
}
|
}
|
||||||
|
|
||||||
// M creates a new int64 measurement.
|
// M creates a new int64 measurement.
|
||||||
// Use Record to record measurements.
|
// Use Record to record measurements.
|
||||||
func (m *Int64Measure) M(v int64) Measurement {
|
func (m *Int64Measure) M(v int64) Measurement {
|
||||||
if !m.subscribed() {
|
if !m.md.subscribed() {
|
||||||
return Measurement{}
|
return Measurement{}
|
||||||
}
|
}
|
||||||
return Measurement{m: m, v: float64(v)}
|
return Measurement{m: m, v: float64(v)}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Int64 creates a new measure of type Int64Measure. It returns an
|
// Int64 creates a new measure of type Int64Measure.
|
||||||
// error if a measure with the same name already exists.
|
// It never returns an error.
|
||||||
func Int64(name, description, unit string) (*Int64Measure, error) {
|
func Int64(name, description, unit string) *Int64Measure {
|
||||||
if err := checkName(name); err != nil {
|
mi := registerMeasureHandle(name, description, unit)
|
||||||
return nil, err
|
return &Int64Measure{mi}
|
||||||
}
|
|
||||||
m := &Int64Measure{
|
|
||||||
measure: measure{
|
|
||||||
name: name,
|
|
||||||
description: description,
|
|
||||||
unit: unit,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if _, err := register(m); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
108
vendor/go.opencensus.io/stats/measure_test.go
generated
vendored
108
vendor/go.opencensus.io/stats/measure_test.go
generated
vendored
@@ -1,108 +0,0 @@
|
|||||||
package stats
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCheckMeasureName(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
view string
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "valid measure name",
|
|
||||||
view: "my.org/measures/response_size",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "long name",
|
|
||||||
view: strings.Repeat("a", 256),
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "name with non-ASCII",
|
|
||||||
view: "my.org/measures/\007",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no emoji for you!",
|
|
||||||
view: "💩",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
if err := checkName(tt.view); (err != nil) != tt.wantErr {
|
|
||||||
t.Errorf("checkName() error = %v, wantErr %v", err, tt.wantErr)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_FindMeasure(t *testing.T) {
|
|
||||||
mf1, err := Float64("MF1", "desc MF1", "unit")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("stats.Float64(\"MF1\", \"desc MF1\") got error %v, want no error", err)
|
|
||||||
}
|
|
||||||
mf2, err := Float64("MF2", "desc MF2", "unit")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("stats.Float64(\"MF2\", \"desc MF2\") got error %v, want no error", err)
|
|
||||||
}
|
|
||||||
mi1, err := Int64("MI1", "desc MI1", "unit")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("stats.Int64(\"MI1\", \"desc MI1\") got error %v, want no error", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
type testCase struct {
|
|
||||||
label string
|
|
||||||
name string
|
|
||||||
m Measure
|
|
||||||
}
|
|
||||||
|
|
||||||
tcs := []testCase{
|
|
||||||
{
|
|
||||||
"0",
|
|
||||||
mf1.Name(),
|
|
||||||
mf1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"1",
|
|
||||||
"MF1",
|
|
||||||
mf1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"2",
|
|
||||||
mf2.Name(),
|
|
||||||
mf2,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"3",
|
|
||||||
"MF2",
|
|
||||||
mf2,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"4",
|
|
||||||
mi1.Name(),
|
|
||||||
mi1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"5",
|
|
||||||
"MI1",
|
|
||||||
mi1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"6",
|
|
||||||
"other",
|
|
||||||
nil,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tcs {
|
|
||||||
m := FindMeasure(tc.name)
|
|
||||||
if m != tc.m {
|
|
||||||
t.Errorf("FindMeasure(%q) got measure %v; want %v", tc.label, m, tc.m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
7
vendor/go.opencensus.io/stats/units.go
generated
vendored
7
vendor/go.opencensus.io/stats/units.go
generated
vendored
@@ -18,7 +18,8 @@ package stats
|
|||||||
// Units are encoded according to the case-sensitive abbreviations from the
|
// Units are encoded according to the case-sensitive abbreviations from the
|
||||||
// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
|
// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
|
||||||
const (
|
const (
|
||||||
UnitNone = "1"
|
UnitNone = "1" // Deprecated: Use UnitDimensionless.
|
||||||
UnitBytes = "By"
|
UnitDimensionless = "1"
|
||||||
UnitMilliseconds = "ms"
|
UnitBytes = "By"
|
||||||
|
UnitMilliseconds = "ms"
|
||||||
)
|
)
|
||||||
|
|||||||
48
vendor/go.opencensus.io/stats/view/aggregation.go
generated
vendored
48
vendor/go.opencensus.io/stats/view/aggregation.go
generated
vendored
@@ -15,21 +15,32 @@
|
|||||||
|
|
||||||
package view
|
package view
|
||||||
|
|
||||||
//go:generate stringer -type AggType
|
|
||||||
|
|
||||||
// AggType represents the type of aggregation function used on a View.
|
// AggType represents the type of aggregation function used on a View.
|
||||||
type AggType int
|
type AggType int
|
||||||
|
|
||||||
|
// All available aggregation types.
|
||||||
const (
|
const (
|
||||||
AggTypeNone AggType = iota // no aggregation; reserved for future use.
|
AggTypeNone AggType = iota // no aggregation; reserved for future use.
|
||||||
AggTypeCount // the count aggregation, see Count.
|
AggTypeCount // the count aggregation, see Count.
|
||||||
AggTypeSum // the sum aggregation, see Sum.
|
AggTypeSum // the sum aggregation, see Sum.
|
||||||
AggTypeMean // the mean aggregation, see Mean.
|
|
||||||
AggTypeDistribution // the distribution aggregation, see Distribution.
|
AggTypeDistribution // the distribution aggregation, see Distribution.
|
||||||
|
AggTypeLastValue // the last value aggregation, see LastValue.
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func (t AggType) String() string {
|
||||||
|
return aggTypeName[t]
|
||||||
|
}
|
||||||
|
|
||||||
|
var aggTypeName = map[AggType]string{
|
||||||
|
AggTypeNone: "None",
|
||||||
|
AggTypeCount: "Count",
|
||||||
|
AggTypeSum: "Sum",
|
||||||
|
AggTypeDistribution: "Distribution",
|
||||||
|
AggTypeLastValue: "LastValue",
|
||||||
|
}
|
||||||
|
|
||||||
// Aggregation represents a data aggregation method. Use one of the functions:
|
// Aggregation represents a data aggregation method. Use one of the functions:
|
||||||
// Count, Sum, Mean, or Distribution to construct an Aggregation.
|
// Count, Sum, or Distribution to construct an Aggregation.
|
||||||
type Aggregation struct {
|
type Aggregation struct {
|
||||||
Type AggType // Type is the AggType of this Aggregation.
|
Type AggType // Type is the AggType of this Aggregation.
|
||||||
Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution.
|
Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution.
|
||||||
@@ -41,19 +52,13 @@ var (
|
|||||||
aggCount = &Aggregation{
|
aggCount = &Aggregation{
|
||||||
Type: AggTypeCount,
|
Type: AggTypeCount,
|
||||||
newData: func() AggregationData {
|
newData: func() AggregationData {
|
||||||
return newCountData(0)
|
return &CountData{}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
aggSum = &Aggregation{
|
aggSum = &Aggregation{
|
||||||
Type: AggTypeSum,
|
Type: AggTypeSum,
|
||||||
newData: func() AggregationData {
|
newData: func() AggregationData {
|
||||||
return newSumData(0)
|
return &SumData{}
|
||||||
},
|
|
||||||
}
|
|
||||||
aggMean = &Aggregation{
|
|
||||||
Type: AggTypeMean,
|
|
||||||
newData: func() AggregationData {
|
|
||||||
return newMeanData(0, 0)
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@@ -74,14 +79,6 @@ func Sum() *Aggregation {
|
|||||||
return aggSum
|
return aggSum
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mean indicates that collect and aggregate data and maintain
|
|
||||||
// the mean value.
|
|
||||||
// For example, average latency in milliseconds can be aggregated by using
|
|
||||||
// Mean, although in most cases it is preferable to use a Distribution.
|
|
||||||
func Mean() *Aggregation {
|
|
||||||
return aggMean
|
|
||||||
}
|
|
||||||
|
|
||||||
// Distribution indicates that the desired aggregation is
|
// Distribution indicates that the desired aggregation is
|
||||||
// a histogram distribution.
|
// a histogram distribution.
|
||||||
//
|
//
|
||||||
@@ -110,3 +107,14 @@ func Distribution(bounds ...float64) *Aggregation {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LastValue only reports the last value recorded using this
|
||||||
|
// aggregation. All other measurements will be dropped.
|
||||||
|
func LastValue() *Aggregation {
|
||||||
|
return &Aggregation{
|
||||||
|
Type: AggTypeLastValue,
|
||||||
|
newData: func() AggregationData {
|
||||||
|
return &LastValueData{}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
95
vendor/go.opencensus.io/stats/view/aggregation_data.go
generated
vendored
95
vendor/go.opencensus.io/stats/view/aggregation_data.go
generated
vendored
@@ -35,21 +35,18 @@ const epsilon = 1e-9
|
|||||||
// A count aggregation processes data and counts the recordings.
|
// A count aggregation processes data and counts the recordings.
|
||||||
//
|
//
|
||||||
// Most users won't directly access count data.
|
// Most users won't directly access count data.
|
||||||
type CountData int64
|
type CountData struct {
|
||||||
|
Value int64
|
||||||
func newCountData(v int64) *CountData {
|
|
||||||
tmp := CountData(v)
|
|
||||||
return &tmp
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *CountData) isAggregationData() bool { return true }
|
func (a *CountData) isAggregationData() bool { return true }
|
||||||
|
|
||||||
func (a *CountData) addSample(_ float64) {
|
func (a *CountData) addSample(v float64) {
|
||||||
*a = *a + 1
|
a.Value = a.Value + 1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *CountData) clone() AggregationData {
|
func (a *CountData) clone() AggregationData {
|
||||||
return newCountData(int64(*a))
|
return &CountData{Value: a.Value}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *CountData) equal(other AggregationData) bool {
|
func (a *CountData) equal(other AggregationData) bool {
|
||||||
@@ -58,28 +55,25 @@ func (a *CountData) equal(other AggregationData) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
return int64(*a) == int64(*a2)
|
return a.Value == a2.Value
|
||||||
}
|
}
|
||||||
|
|
||||||
// SumData is the aggregated data for the Sum aggregation.
|
// SumData is the aggregated data for the Sum aggregation.
|
||||||
// A sum aggregation processes data and sums up the recordings.
|
// A sum aggregation processes data and sums up the recordings.
|
||||||
//
|
//
|
||||||
// Most users won't directly access sum data.
|
// Most users won't directly access sum data.
|
||||||
type SumData float64
|
type SumData struct {
|
||||||
|
Value float64
|
||||||
func newSumData(v float64) *SumData {
|
|
||||||
tmp := SumData(v)
|
|
||||||
return &tmp
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *SumData) isAggregationData() bool { return true }
|
func (a *SumData) isAggregationData() bool { return true }
|
||||||
|
|
||||||
func (a *SumData) addSample(f float64) {
|
func (a *SumData) addSample(f float64) {
|
||||||
*a += SumData(f)
|
a.Value += f
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *SumData) clone() AggregationData {
|
func (a *SumData) clone() AggregationData {
|
||||||
return newSumData(float64(*a))
|
return &SumData{Value: a.Value}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *SumData) equal(other AggregationData) bool {
|
func (a *SumData) equal(other AggregationData) bool {
|
||||||
@@ -87,49 +81,7 @@ func (a *SumData) equal(other AggregationData) bool {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return math.Pow(float64(*a)-float64(*a2), 2) < epsilon
|
return math.Pow(a.Value-a2.Value, 2) < epsilon
|
||||||
}
|
|
||||||
|
|
||||||
// MeanData is the aggregated data for the Mean aggregation.
|
|
||||||
// A mean aggregation processes data and maintains the mean value.
|
|
||||||
//
|
|
||||||
// Most users won't directly access mean data.
|
|
||||||
type MeanData struct {
|
|
||||||
Count int64 // number of data points aggregated
|
|
||||||
Mean float64 // mean of all data points
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMeanData(mean float64, count int64) *MeanData {
|
|
||||||
return &MeanData{
|
|
||||||
Mean: mean,
|
|
||||||
Count: count,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sum returns the sum of all samples collected.
|
|
||||||
func (a *MeanData) Sum() float64 { return a.Mean * float64(a.Count) }
|
|
||||||
|
|
||||||
func (a *MeanData) isAggregationData() bool { return true }
|
|
||||||
|
|
||||||
func (a *MeanData) addSample(f float64) {
|
|
||||||
a.Count++
|
|
||||||
if a.Count == 1 {
|
|
||||||
a.Mean = f
|
|
||||||
return
|
|
||||||
}
|
|
||||||
a.Mean = a.Mean + (f-a.Mean)/float64(a.Count)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *MeanData) clone() AggregationData {
|
|
||||||
return newMeanData(a.Mean, a.Count)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *MeanData) equal(other AggregationData) bool {
|
|
||||||
a2, ok := other.(*MeanData)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return a.Count == a2.Count && math.Pow(a.Mean-a2.Mean, 2) < epsilon
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DistributionData is the aggregated data for the
|
// DistributionData is the aggregated data for the
|
||||||
@@ -228,3 +180,28 @@ func (a *DistributionData) equal(other AggregationData) bool {
|
|||||||
}
|
}
|
||||||
return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon
|
return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LastValueData returns the last value recorded for LastValue aggregation.
|
||||||
|
type LastValueData struct {
|
||||||
|
Value float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LastValueData) isAggregationData() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LastValueData) addSample(v float64) {
|
||||||
|
l.Value = v
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LastValueData) clone() AggregationData {
|
||||||
|
return &LastValueData{l.Value}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LastValueData) equal(other AggregationData) bool {
|
||||||
|
a2, ok := other.(*LastValueData)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return l.Value == a2.Value
|
||||||
|
}
|
||||||
|
|||||||
8
vendor/go.opencensus.io/stats/view/aggregation_data_test.go
generated
vendored
8
vendor/go.opencensus.io/stats/view/aggregation_data_test.go
generated
vendored
@@ -35,19 +35,15 @@ func TestDataClone(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "count data",
|
name: "count data",
|
||||||
src: newCountData(5),
|
src: &CountData{Value: 5},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "distribution data",
|
name: "distribution data",
|
||||||
src: dist,
|
src: dist,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "mean data",
|
|
||||||
src: newMeanData(11.0, 5),
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "sum data",
|
name: "sum data",
|
||||||
src: newSumData(65.7),
|
src: &SumData{Value: 65.7},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
|||||||
16
vendor/go.opencensus.io/stats/view/aggtype_string.go
generated
vendored
16
vendor/go.opencensus.io/stats/view/aggtype_string.go
generated
vendored
@@ -1,16 +0,0 @@
|
|||||||
// Code generated by "stringer -type AggType"; DO NOT EDIT.
|
|
||||||
|
|
||||||
package view
|
|
||||||
|
|
||||||
import "strconv"
|
|
||||||
|
|
||||||
const _AggType_name = "AggTypeNoneAggTypeCountAggTypeSumAggTypeMeanAggTypeDistribution"
|
|
||||||
|
|
||||||
var _AggType_index = [...]uint8{0, 11, 23, 33, 44, 63}
|
|
||||||
|
|
||||||
func (i AggType) String() string {
|
|
||||||
if i < 0 || i >= AggType(len(_AggType_index)-1) {
|
|
||||||
return "AggType(" + strconv.FormatInt(int64(i), 10) + ")"
|
|
||||||
}
|
|
||||||
return _AggType_name[_AggType_index[i]:_AggType_index[i+1]]
|
|
||||||
}
|
|
||||||
8
vendor/go.opencensus.io/stats/view/benchmark_test.go
generated
vendored
8
vendor/go.opencensus.io/stats/view/benchmark_test.go
generated
vendored
@@ -25,7 +25,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
m, _ = stats.Float64("m", "", "")
|
m = stats.Float64("m", "", "")
|
||||||
k1, _ = tag.NewKey("k1")
|
k1, _ = tag.NewKey("k1")
|
||||||
k2, _ = tag.NewKey("k2")
|
k2, _ = tag.NewKey("k2")
|
||||||
k3, _ = tag.NewKey("k3")
|
k3, _ = tag.NewKey("k3")
|
||||||
@@ -46,9 +46,9 @@ var (
|
|||||||
func BenchmarkRecordReqCommand(b *testing.B) {
|
func BenchmarkRecordReqCommand(b *testing.B) {
|
||||||
w := newWorker()
|
w := newWorker()
|
||||||
|
|
||||||
subscribe := &subscribeToViewReq{views: []*View{view}, err: make(chan error, 1)}
|
register := ®isterViewReq{views: []*View{view}, err: make(chan error, 1)}
|
||||||
subscribe.handleCommand(w)
|
register.handleCommand(w)
|
||||||
if err := <-subscribe.err; err != nil {
|
if err := <-register.err; err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
5
vendor/go.opencensus.io/stats/view/doc.go
generated
vendored
5
vendor/go.opencensus.io/stats/view/doc.go
generated
vendored
@@ -21,11 +21,10 @@ A view allows recorded measurements to be filtered and aggregated over a time wi
|
|||||||
|
|
||||||
All recorded measurements can be filtered by a list of tags.
|
All recorded measurements can be filtered by a list of tags.
|
||||||
|
|
||||||
OpenCensus provides several aggregation methods: count, distribution, sum and mean.
|
OpenCensus provides several aggregation methods: count, distribution and sum.
|
||||||
Count aggregation only counts the number of measurement points. Distribution
|
Count aggregation only counts the number of measurement points. Distribution
|
||||||
aggregation provides statistical summary of the aggregated data. Sum distribution
|
aggregation provides statistical summary of the aggregated data. Sum distribution
|
||||||
sums up the measurement points. Mean provides the mean of the recorded measurements.
|
sums up the measurement points. Aggregations are cumulative.
|
||||||
Aggregations can either happen cumulatively or over an interval.
|
|
||||||
|
|
||||||
Users can dynamically create and delete views.
|
Users can dynamically create and delete views.
|
||||||
|
|
||||||
|
|||||||
6
vendor/go.opencensus.io/stats/view/example_test.go
generated
vendored
6
vendor/go.opencensus.io/stats/view/example_test.go
generated
vendored
@@ -22,9 +22,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func Example() {
|
func Example() {
|
||||||
m, _ := stats.Int64("my.org/measure/openconns", "open connections", "")
|
// Measures are usually declared and used by instrumented packages.
|
||||||
|
m := stats.Int64("my.org/measure/openconns", "open connections", stats.UnitDimensionless)
|
||||||
|
|
||||||
if err := view.Subscribe(&view.View{
|
// Views are usually subscribed in your application main function.
|
||||||
|
if err := view.Register(&view.View{
|
||||||
Name: "my.org/views/openconns",
|
Name: "my.org/views/openconns",
|
||||||
Description: "open connections",
|
Description: "open connections",
|
||||||
Measure: m,
|
Measure: m,
|
||||||
|
|||||||
16
vendor/go.opencensus.io/stats/view/view.go
generated
vendored
16
vendor/go.opencensus.io/stats/view/view.go
generated
vendored
@@ -46,20 +46,6 @@ type View struct {
|
|||||||
Aggregation *Aggregation
|
Aggregation *Aggregation
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: Use &View{}.
|
|
||||||
func New(name, description string, keys []tag.Key, measure stats.Measure, agg *Aggregation) (*View, error) {
|
|
||||||
if measure == nil {
|
|
||||||
panic("measure may not be nil")
|
|
||||||
}
|
|
||||||
return &View{
|
|
||||||
Name: name,
|
|
||||||
Description: description,
|
|
||||||
TagKeys: keys,
|
|
||||||
Measure: measure,
|
|
||||||
Aggregation: agg,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithName returns a copy of the View with a new name. This is useful for
|
// WithName returns a copy of the View with a new name. This is useful for
|
||||||
// renaming views to cope with limitations placed on metric names by various
|
// renaming views to cope with limitations placed on metric names by various
|
||||||
// backends.
|
// backends.
|
||||||
@@ -176,7 +162,7 @@ func (r *Row) String() string {
|
|||||||
return buffer.String()
|
return buffer.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
// same returns true if both Rows are equal. Tags are expected to be ordered
|
// Equal returns true if both rows are equal. Tags are expected to be ordered
|
||||||
// by the key name. Even both rows have the same tags but the tags appear in
|
// by the key name. Even both rows have the same tags but the tags appear in
|
||||||
// different orders it will return false.
|
// different orders it will return false.
|
||||||
func (r *Row) Equal(other *Row) bool {
|
func (r *Row) Equal(other *Row) bool {
|
||||||
|
|||||||
50
vendor/go.opencensus.io/stats/view/view_measure_test.go
generated
vendored
Normal file
50
vendor/go.opencensus.io/stats/view/view_measure_test.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
package view
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"go.opencensus.io/stats"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMeasureFloat64AndInt64(t *testing.T) {
|
||||||
|
// Recording through both a Float64Measure and Int64Measure with the
|
||||||
|
// same name should work.
|
||||||
|
|
||||||
|
im := stats.Int64("TestMeasureFloat64AndInt64", "", stats.UnitDimensionless)
|
||||||
|
fm := stats.Float64("TestMeasureFloat64AndInt64", "", stats.UnitDimensionless)
|
||||||
|
|
||||||
|
if im == nil || fm == nil {
|
||||||
|
t.Fatal("Error creating Measures")
|
||||||
|
}
|
||||||
|
|
||||||
|
v1 := &View{
|
||||||
|
Name: "TestMeasureFloat64AndInt64/v1",
|
||||||
|
Measure: im,
|
||||||
|
Aggregation: Sum(),
|
||||||
|
}
|
||||||
|
v2 := &View{
|
||||||
|
Name: "TestMeasureFloat64AndInt64/v2",
|
||||||
|
Measure: fm,
|
||||||
|
Aggregation: Sum(),
|
||||||
|
}
|
||||||
|
Register(v1, v2)
|
||||||
|
|
||||||
|
stats.Record(context.Background(), im.M(5))
|
||||||
|
stats.Record(context.Background(), fm.M(2.2))
|
||||||
|
|
||||||
|
d1, _ := RetrieveData(v1.Name)
|
||||||
|
d2, _ := RetrieveData(v2.Name)
|
||||||
|
|
||||||
|
sum1 := d1[0].Data.(*SumData)
|
||||||
|
sum2 := d2[0].Data.(*SumData)
|
||||||
|
|
||||||
|
// We expect both views to return 7.2, as though we recorded on a single measure.
|
||||||
|
|
||||||
|
if got, want := sum1.Value, 7.2; got != want {
|
||||||
|
t.Errorf("sum1 = %v; want %v", got, want)
|
||||||
|
}
|
||||||
|
if got, want := sum2.Value, 7.2; got != want {
|
||||||
|
t.Errorf("sum2 = %v; want %v", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
175
vendor/go.opencensus.io/stats/view/view_test.go
generated
vendored
175
vendor/go.opencensus.io/stats/view/view_test.go
generated
vendored
@@ -28,7 +28,7 @@ func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) {
|
|||||||
k2, _ := tag.NewKey("k2")
|
k2, _ := tag.NewKey("k2")
|
||||||
k3, _ := tag.NewKey("k3")
|
k3, _ := tag.NewKey("k3")
|
||||||
agg1 := Distribution(2)
|
agg1 := Distribution(2)
|
||||||
m, _ := stats.Int64("Test_View_MeasureFloat64_AggregationDistribution/m1", "", stats.UnitNone)
|
m := stats.Int64("Test_View_MeasureFloat64_AggregationDistribution/m1", "", stats.UnitDimensionless)
|
||||||
view1 := &View{
|
view1 := &View{
|
||||||
TagKeys: []tag.Key{k1, k2},
|
TagKeys: []tag.Key{k1, k2},
|
||||||
Measure: m,
|
Measure: m,
|
||||||
@@ -197,7 +197,7 @@ func Test_View_MeasureFloat64_AggregationSum(t *testing.T) {
|
|||||||
k1, _ := tag.NewKey("k1")
|
k1, _ := tag.NewKey("k1")
|
||||||
k2, _ := tag.NewKey("k2")
|
k2, _ := tag.NewKey("k2")
|
||||||
k3, _ := tag.NewKey("k3")
|
k3, _ := tag.NewKey("k3")
|
||||||
m, _ := stats.Int64("Test_View_MeasureFloat64_AggregationSum/m1", "", stats.UnitNone)
|
m := stats.Int64("Test_View_MeasureFloat64_AggregationSum/m1", "", stats.UnitDimensionless)
|
||||||
view, err := newViewInternal(&View{TagKeys: []tag.Key{k1, k2}, Measure: m, Aggregation: Sum()})
|
view, err := newViewInternal(&View{TagKeys: []tag.Key{k1, k2}, Measure: m, Aggregation: Sum()})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -226,7 +226,7 @@ func Test_View_MeasureFloat64_AggregationSum(t *testing.T) {
|
|||||||
[]*Row{
|
[]*Row{
|
||||||
{
|
{
|
||||||
[]tag.Tag{{Key: k1, Value: "v1"}},
|
[]tag.Tag{{Key: k1, Value: "v1"}},
|
||||||
newSumData(6),
|
&SumData{Value: 6},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -239,11 +239,11 @@ func Test_View_MeasureFloat64_AggregationSum(t *testing.T) {
|
|||||||
[]*Row{
|
[]*Row{
|
||||||
{
|
{
|
||||||
[]tag.Tag{{Key: k1, Value: "v1"}},
|
[]tag.Tag{{Key: k1, Value: "v1"}},
|
||||||
newSumData(1),
|
&SumData{Value: 1},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
[]tag.Tag{{Key: k2, Value: "v2"}},
|
[]tag.Tag{{Key: k2, Value: "v2"}},
|
||||||
newSumData(5),
|
&SumData{Value: 5},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -259,19 +259,19 @@ func Test_View_MeasureFloat64_AggregationSum(t *testing.T) {
|
|||||||
[]*Row{
|
[]*Row{
|
||||||
{
|
{
|
||||||
[]tag.Tag{{Key: k1, Value: "v1"}},
|
[]tag.Tag{{Key: k1, Value: "v1"}},
|
||||||
newSumData(6),
|
&SumData{Value: 6},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
[]tag.Tag{{Key: k1, Value: "v1 other"}},
|
[]tag.Tag{{Key: k1, Value: "v1 other"}},
|
||||||
newSumData(1),
|
&SumData{Value: 1},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
[]tag.Tag{{Key: k2, Value: "v2"}},
|
[]tag.Tag{{Key: k2, Value: "v2"}},
|
||||||
newSumData(5),
|
&SumData{Value: 5},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
[]tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}},
|
[]tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}},
|
||||||
newSumData(5),
|
&SumData{Value: 5},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -312,8 +312,8 @@ func Test_View_MeasureFloat64_AggregationSum(t *testing.T) {
|
|||||||
func TestCanonicalize(t *testing.T) {
|
func TestCanonicalize(t *testing.T) {
|
||||||
k1, _ := tag.NewKey("k1")
|
k1, _ := tag.NewKey("k1")
|
||||||
k2, _ := tag.NewKey("k2")
|
k2, _ := tag.NewKey("k2")
|
||||||
m, _ := stats.Int64("TestCanonicalize/m1", "desc desc", stats.UnitNone)
|
m := stats.Int64("TestCanonicalize/m1", "desc desc", stats.UnitDimensionless)
|
||||||
v := &View{TagKeys: []tag.Key{k2, k1}, Measure: m, Aggregation: Mean()}
|
v := &View{TagKeys: []tag.Key{k2, k1}, Measure: m, Aggregation: Sum()}
|
||||||
err := v.canonicalize()
|
err := v.canonicalize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -332,138 +332,19 @@ func TestCanonicalize(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_View_MeasureFloat64_AggregationMean(t *testing.T) {
|
|
||||||
k1, _ := tag.NewKey("k1")
|
|
||||||
k2, _ := tag.NewKey("k2")
|
|
||||||
k3, _ := tag.NewKey("k3")
|
|
||||||
m, _ := stats.Int64("Test_View_MeasureFloat64_AggregationMean/m1", "", stats.UnitNone)
|
|
||||||
viewDesc := &View{TagKeys: []tag.Key{k1, k2}, Measure: m, Aggregation: Mean()}
|
|
||||||
view, err := newViewInternal(viewDesc)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
type tagString struct {
|
|
||||||
k tag.Key
|
|
||||||
v string
|
|
||||||
}
|
|
||||||
type record struct {
|
|
||||||
f float64
|
|
||||||
tags []tagString
|
|
||||||
}
|
|
||||||
|
|
||||||
tcs := []struct {
|
|
||||||
label string
|
|
||||||
records []record
|
|
||||||
wantRows []*Row
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"1",
|
|
||||||
[]record{
|
|
||||||
{1, []tagString{{k1, "v1"}}},
|
|
||||||
{5, []tagString{{k1, "v1"}}},
|
|
||||||
},
|
|
||||||
[]*Row{
|
|
||||||
{
|
|
||||||
[]tag.Tag{{Key: k1, Value: "v1"}},
|
|
||||||
newMeanData(3, 2),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"2",
|
|
||||||
[]record{
|
|
||||||
{1, []tagString{{k1, "v1"}}},
|
|
||||||
{5, []tagString{{k2, "v2"}}},
|
|
||||||
{-0.5, []tagString{{k2, "v2"}}},
|
|
||||||
},
|
|
||||||
[]*Row{
|
|
||||||
{
|
|
||||||
[]tag.Tag{{Key: k1, Value: "v1"}},
|
|
||||||
newMeanData(1, 1),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[]tag.Tag{{Key: k2, Value: "v2"}},
|
|
||||||
newMeanData(2.25, 2),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"3",
|
|
||||||
[]record{
|
|
||||||
{1, []tagString{{k1, "v1"}}},
|
|
||||||
{5, []tagString{{k1, "v1"}, {k3, "v3"}}},
|
|
||||||
{1, []tagString{{k1, "v1 other"}}},
|
|
||||||
{5, []tagString{{k2, "v2"}}},
|
|
||||||
{5, []tagString{{k1, "v1"}, {k2, "v2"}}},
|
|
||||||
{-4, []tagString{{k1, "v1"}, {k2, "v2"}}},
|
|
||||||
},
|
|
||||||
[]*Row{
|
|
||||||
{
|
|
||||||
[]tag.Tag{{Key: k1, Value: "v1"}},
|
|
||||||
newMeanData(3, 2),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[]tag.Tag{{Key: k1, Value: "v1 other"}},
|
|
||||||
newMeanData(1, 1),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[]tag.Tag{{Key: k2, Value: "v2"}},
|
|
||||||
newMeanData(5, 1),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
[]tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}},
|
|
||||||
newMeanData(0.5, 2),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tcs {
|
|
||||||
view.clearRows()
|
|
||||||
view.subscribe()
|
|
||||||
for _, r := range tt.records {
|
|
||||||
mods := []tag.Mutator{}
|
|
||||||
for _, t := range r.tags {
|
|
||||||
mods = append(mods, tag.Insert(t.k, t.v))
|
|
||||||
}
|
|
||||||
ctx, err := tag.New(context.Background(), mods...)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("%v: New = %v", tt.label, err)
|
|
||||||
}
|
|
||||||
view.addSample(tag.FromContext(ctx), r.f)
|
|
||||||
}
|
|
||||||
|
|
||||||
gotRows := view.collectedRows()
|
|
||||||
for i, got := range gotRows {
|
|
||||||
if !containsRow(tt.wantRows, got) {
|
|
||||||
t.Errorf("%v-%d: got row %v; want none", tt.label, i, got)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, want := range tt.wantRows {
|
|
||||||
if !containsRow(gotRows, want) {
|
|
||||||
t.Errorf("%v-%d: got none; want row %v", tt.label, i, want)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestViewSortedKeys(t *testing.T) {
|
func TestViewSortedKeys(t *testing.T) {
|
||||||
k1, _ := tag.NewKey("a")
|
k1, _ := tag.NewKey("a")
|
||||||
k2, _ := tag.NewKey("b")
|
k2, _ := tag.NewKey("b")
|
||||||
k3, _ := tag.NewKey("c")
|
k3, _ := tag.NewKey("c")
|
||||||
ks := []tag.Key{k1, k3, k2}
|
ks := []tag.Key{k1, k3, k2}
|
||||||
|
|
||||||
m, _ := stats.Int64("TestViewSortedKeys/m1", "", stats.UnitNone)
|
m := stats.Int64("TestViewSortedKeys/m1", "", stats.UnitDimensionless)
|
||||||
Subscribe(&View{
|
Register(&View{
|
||||||
Name: "sort_keys",
|
Name: "sort_keys",
|
||||||
Description: "desc sort_keys",
|
Description: "desc sort_keys",
|
||||||
TagKeys: ks,
|
TagKeys: ks,
|
||||||
Measure: m,
|
Measure: m,
|
||||||
Aggregation: Mean(),
|
Aggregation: Sum(),
|
||||||
})
|
})
|
||||||
// Subscribe normalizes the view by sorting the tag keys, retrieve the normalized view
|
// Subscribe normalizes the view by sorting the tag keys, retrieve the normalized view
|
||||||
v := Find("sort_keys")
|
v := Find("sort_keys")
|
||||||
@@ -490,3 +371,31 @@ func containsRow(rows []*Row, r *Row) bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRegisterUnregisterParity(t *testing.T) {
|
||||||
|
measures := []stats.Measure{
|
||||||
|
stats.Int64("ifoo", "iFOO", "iBar"),
|
||||||
|
stats.Float64("ffoo", "fFOO", "fBar"),
|
||||||
|
}
|
||||||
|
aggregations := []*Aggregation{
|
||||||
|
Count(),
|
||||||
|
Sum(),
|
||||||
|
Distribution(1, 2.0, 4.0, 8.0, 16.0),
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
for _, m := range measures {
|
||||||
|
for _, agg := range aggregations {
|
||||||
|
v := &View{
|
||||||
|
Aggregation: agg,
|
||||||
|
Name: "Lookup here",
|
||||||
|
Measure: m,
|
||||||
|
}
|
||||||
|
if err := Register(v); err != nil {
|
||||||
|
t.Errorf("Iteration #%d:\nMeasure: (%#v)\nAggregation (%#v)\nError: %v", i, m, agg, err)
|
||||||
|
}
|
||||||
|
Unregister(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
40
vendor/go.opencensus.io/stats/view/worker.go
generated
vendored
40
vendor/go.opencensus.io/stats/view/worker.go
generated
vendored
@@ -61,30 +61,15 @@ func Find(name string) (v *View) {
|
|||||||
return resp.v
|
return resp.v
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: Registering is a no-op. Use the Subscribe function.
|
// Register begins collecting data for the given views.
|
||||||
func Register(_ *View) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Unregistering is a no-op, see: Unsubscribe.
|
|
||||||
func Unregister(_ *View) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use the Subscribe function.
|
|
||||||
func (v *View) Subscribe() error {
|
|
||||||
return Subscribe(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subscribe begins collecting data for the given views.
|
|
||||||
// Once a view is subscribed, it reports data to the registered exporters.
|
// Once a view is subscribed, it reports data to the registered exporters.
|
||||||
func Subscribe(views ...*View) error {
|
func Register(views ...*View) error {
|
||||||
for _, v := range views {
|
for _, v := range views {
|
||||||
if err := v.canonicalize(); err != nil {
|
if err := v.canonicalize(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
req := &subscribeToViewReq{
|
req := ®isterViewReq{
|
||||||
views: views,
|
views: views,
|
||||||
err: make(chan error),
|
err: make(chan error),
|
||||||
}
|
}
|
||||||
@@ -92,16 +77,16 @@ func Subscribe(views ...*View) error {
|
|||||||
return <-req.err
|
return <-req.err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unsubscribe the given views. Data will not longer be exported for these views
|
// Unregister the given views. Data will not longer be exported for these views
|
||||||
// after Unsubscribe returns.
|
// after Unregister returns.
|
||||||
// It is not necessary to unsubscribe from views you expect to collect for the
|
// It is not necessary to unregister from views you expect to collect for the
|
||||||
// duration of your program execution.
|
// duration of your program execution.
|
||||||
func Unsubscribe(views ...*View) {
|
func Unregister(views ...*View) {
|
||||||
names := make([]string, len(views))
|
names := make([]string, len(views))
|
||||||
for i := range views {
|
for i := range views {
|
||||||
names[i] = views[i].Name
|
names[i] = views[i].Name
|
||||||
}
|
}
|
||||||
req := &unsubscribeFromViewReq{
|
req := &unregisterFromViewReq{
|
||||||
views: names,
|
views: names,
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
}
|
}
|
||||||
@@ -109,15 +94,6 @@ func Unsubscribe(views ...*View) {
|
|||||||
<-req.done
|
<-req.done
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: Use the Unsubscribe function instead.
|
|
||||||
func (v *View) Unsubscribe() error {
|
|
||||||
if v == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
Unsubscribe(v)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func RetrieveData(viewName string) ([]*Row, error) {
|
func RetrieveData(viewName string) ([]*Row, error) {
|
||||||
req := &retrieveDataReq{
|
req := &retrieveDataReq{
|
||||||
now: time.Now(),
|
now: time.Now(),
|
||||||
|
|||||||
20
vendor/go.opencensus.io/stats/view/worker_commands.go
generated
vendored
20
vendor/go.opencensus.io/stats/view/worker_commands.go
generated
vendored
@@ -41,16 +41,21 @@ type getViewByNameResp struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cmd *getViewByNameReq) handleCommand(w *worker) {
|
func (cmd *getViewByNameReq) handleCommand(w *worker) {
|
||||||
cmd.c <- &getViewByNameResp{w.views[cmd.name].view}
|
v := w.views[cmd.name]
|
||||||
|
if v == nil {
|
||||||
|
cmd.c <- &getViewByNameResp{nil}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cmd.c <- &getViewByNameResp{v.view}
|
||||||
}
|
}
|
||||||
|
|
||||||
// subscribeToViewReq is the command to subscribe to a view.
|
// registerViewReq is the command to register a view.
|
||||||
type subscribeToViewReq struct {
|
type registerViewReq struct {
|
||||||
views []*View
|
views []*View
|
||||||
err chan error
|
err chan error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cmd *subscribeToViewReq) handleCommand(w *worker) {
|
func (cmd *registerViewReq) handleCommand(w *worker) {
|
||||||
var errstr []string
|
var errstr []string
|
||||||
for _, view := range cmd.views {
|
for _, view := range cmd.views {
|
||||||
vi, err := w.tryRegisterView(view)
|
vi, err := w.tryRegisterView(view)
|
||||||
@@ -68,15 +73,15 @@ func (cmd *subscribeToViewReq) handleCommand(w *worker) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// unsubscribeFromViewReq is the command to unsubscribe to a view. Has no
|
// unregisterFromViewReq is the command to unsubscribe to a view. Has no
|
||||||
// impact on the data collection for client that are pulling data from the
|
// impact on the data collection for client that are pulling data from the
|
||||||
// library.
|
// library.
|
||||||
type unsubscribeFromViewReq struct {
|
type unregisterFromViewReq struct {
|
||||||
views []string
|
views []string
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cmd *unsubscribeFromViewReq) handleCommand(w *worker) {
|
func (cmd *unregisterFromViewReq) handleCommand(w *worker) {
|
||||||
for _, name := range cmd.views {
|
for _, name := range cmd.views {
|
||||||
vi, ok := w.views[name]
|
vi, ok := w.views[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -89,6 +94,7 @@ func (cmd *unsubscribeFromViewReq) handleCommand(w *worker) {
|
|||||||
// The collected data can be cleared.
|
// The collected data can be cleared.
|
||||||
vi.clearRows()
|
vi.clearRows()
|
||||||
}
|
}
|
||||||
|
delete(w.views, name)
|
||||||
}
|
}
|
||||||
cmd.done <- struct{}{}
|
cmd.done <- struct{}{}
|
||||||
}
|
}
|
||||||
|
|||||||
146
vendor/go.opencensus.io/stats/view/worker_test.go
generated
vendored
146
vendor/go.opencensus.io/stats/view/worker_test.go
generated
vendored
@@ -26,56 +26,24 @@ import (
|
|||||||
"go.opencensus.io/tag"
|
"go.opencensus.io/tag"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_Worker_MeasureCreation(t *testing.T) {
|
func Test_Worker_ViewRegistration(t *testing.T) {
|
||||||
restart()
|
|
||||||
|
|
||||||
if _, err := stats.Float64("MF1", "desc MF1", "unit"); err != nil {
|
|
||||||
t.Errorf("stats.Float64(\"MF1\", \"desc MF1\") got error %v, want no error", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := stats.Float64("MF1", "Duplicate measure with same name as MF1.", "unit"); err == nil {
|
|
||||||
t.Error("stats.Float64(\"MF1\", \"Duplicate Float64Measure with same name as MF1.\") got no error, want no error")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := stats.Int64("MF1", "Duplicate measure with same name as MF1.", "unit"); err == nil {
|
|
||||||
t.Error("stats.Int64(\"MF1\", \"Duplicate Int64Measure with same name as MF1.\") got no error, want no error")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := stats.Float64("MF2", "desc MF2", "unit"); err != nil {
|
|
||||||
t.Errorf("stats.Float64(\"MF2\", \"desc MF2\") got error %v, want no error", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := stats.Int64("MI1", "desc MI1", "unit"); err != nil {
|
|
||||||
t.Errorf("stats.Int64(\"MI1\", \"desc MI1\") got error %v, want no error", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := stats.Int64("MI1", "Duplicate measure with same name as MI1.", "unit"); err == nil {
|
|
||||||
t.Error("stats.Int64(\"MI1\", \"Duplicate Int64 with same name as MI1.\") got no error, want no error")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := stats.Float64("MI1", "Duplicate measure with same name as MI1.", "unit"); err == nil {
|
|
||||||
t.Error("stats.Float64(\"MI1\", \"Duplicate Float64 with same name as MI1.\") got no error, want no error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_Worker_ViewSubscription(t *testing.T) {
|
|
||||||
someError := errors.New("some error")
|
someError := errors.New("some error")
|
||||||
|
|
||||||
sc1 := make(chan *Data)
|
sc1 := make(chan *Data)
|
||||||
|
|
||||||
type subscription struct {
|
type registration struct {
|
||||||
c chan *Data
|
c chan *Data
|
||||||
vID string
|
vID string
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
type testCase struct {
|
type testCase struct {
|
||||||
label string
|
label string
|
||||||
subscriptions []subscription
|
registrations []registration
|
||||||
}
|
}
|
||||||
tcs := []testCase{
|
tcs := []testCase{
|
||||||
{
|
{
|
||||||
"register and subscribe to v1ID",
|
"register and subscribe to v1ID",
|
||||||
[]subscription{
|
[]registration{
|
||||||
{
|
{
|
||||||
sc1,
|
sc1,
|
||||||
"v1ID",
|
"v1ID",
|
||||||
@@ -85,7 +53,7 @@ func Test_Worker_ViewSubscription(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"register v1ID+v2ID, susbsribe to v1ID",
|
"register v1ID+v2ID, susbsribe to v1ID",
|
||||||
[]subscription{
|
[]registration{
|
||||||
{
|
{
|
||||||
sc1,
|
sc1,
|
||||||
"v1ID",
|
"v1ID",
|
||||||
@@ -95,7 +63,7 @@ func Test_Worker_ViewSubscription(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"register to v1ID; subscribe to v1ID and view with same ID",
|
"register to v1ID; subscribe to v1ID and view with same ID",
|
||||||
[]subscription{
|
[]registration{
|
||||||
{
|
{
|
||||||
sc1,
|
sc1,
|
||||||
"v1ID",
|
"v1ID",
|
||||||
@@ -110,8 +78,8 @@ func Test_Worker_ViewSubscription(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
mf1, _ := stats.Float64("MF1/Test_Worker_ViewSubscription", "desc MF1", "unit")
|
mf1 := stats.Float64("MF1/Test_Worker_ViewSubscription", "desc MF1", "unit")
|
||||||
mf2, _ := stats.Float64("MF2/Test_Worker_ViewSubscription", "desc MF2", "unit")
|
mf2 := stats.Float64("MF2/Test_Worker_ViewSubscription", "desc MF2", "unit")
|
||||||
|
|
||||||
for _, tc := range tcs {
|
for _, tc := range tcs {
|
||||||
t.Run(tc.label, func(t *testing.T) {
|
t.Run(tc.label, func(t *testing.T) {
|
||||||
@@ -137,11 +105,11 @@ func Test_Worker_ViewSubscription(t *testing.T) {
|
|||||||
"vNilID": nil,
|
"vNilID": nil,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, s := range tc.subscriptions {
|
for _, r := range tc.registrations {
|
||||||
v := views[s.vID]
|
v := views[r.vID]
|
||||||
err := Subscribe(v)
|
err := Register(v)
|
||||||
if (err != nil) != (s.err != nil) {
|
if (err != nil) != (r.err != nil) {
|
||||||
t.Errorf("%v: Subscribe() = %v, want %v", tc.label, err, s.err)
|
t.Errorf("%v: Register() = %v, want %v", tc.label, err, r.err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -152,10 +120,7 @@ func Test_Worker_RecordFloat64(t *testing.T) {
|
|||||||
restart()
|
restart()
|
||||||
|
|
||||||
someError := errors.New("some error")
|
someError := errors.New("some error")
|
||||||
m, err := stats.Float64("Test_Worker_RecordFloat64/MF1", "desc MF1", "unit")
|
m := stats.Float64("Test_Worker_RecordFloat64/MF1", "desc MF1", "unit")
|
||||||
if err != nil {
|
|
||||||
t.Errorf("stats.Float64(\"MF1\", \"desc MF1\") got error '%v', want no error", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
k1, _ := tag.NewKey("k1")
|
k1, _ := tag.NewKey("k1")
|
||||||
k2, _ := tag.NewKey("k2")
|
k2, _ := tag.NewKey("k2")
|
||||||
@@ -178,31 +143,28 @@ func Test_Worker_RecordFloat64(t *testing.T) {
|
|||||||
type testCase struct {
|
type testCase struct {
|
||||||
label string
|
label string
|
||||||
registrations []*View
|
registrations []*View
|
||||||
subscriptions []*View
|
|
||||||
records []float64
|
records []float64
|
||||||
wants []want
|
wants []want
|
||||||
}
|
}
|
||||||
|
|
||||||
tcs := []testCase{
|
tcs := []testCase{
|
||||||
{
|
{
|
||||||
"0",
|
label: "0",
|
||||||
[]*View{v1, v2},
|
registrations: []*View{},
|
||||||
[]*View{},
|
records: []float64{1, 1},
|
||||||
[]float64{1, 1},
|
wants: []want{{v1, nil, someError}, {v2, nil, someError}},
|
||||||
[]want{{v1, nil, someError}, {v2, nil, someError}},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"1",
|
label: "1",
|
||||||
[]*View{v1, v2},
|
registrations: []*View{v1},
|
||||||
[]*View{v1},
|
records: []float64{1, 1},
|
||||||
[]float64{1, 1},
|
wants: []want{
|
||||||
[]want{
|
|
||||||
{
|
{
|
||||||
v1,
|
v1,
|
||||||
[]*Row{
|
[]*Row{
|
||||||
{
|
{
|
||||||
[]tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}},
|
[]tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}},
|
||||||
newCountData(2),
|
&CountData{Value: 2},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nil,
|
nil,
|
||||||
@@ -211,17 +173,16 @@ func Test_Worker_RecordFloat64(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"2",
|
label: "2",
|
||||||
[]*View{v1, v2},
|
registrations: []*View{v1, v2},
|
||||||
[]*View{v1, v2},
|
records: []float64{1, 1},
|
||||||
[]float64{1, 1},
|
wants: []want{
|
||||||
[]want{
|
|
||||||
{
|
{
|
||||||
v1,
|
v1,
|
||||||
[]*Row{
|
[]*Row{
|
||||||
{
|
{
|
||||||
[]tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}},
|
[]tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}},
|
||||||
newCountData(2),
|
&CountData{Value: 2},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nil,
|
nil,
|
||||||
@@ -231,7 +192,7 @@ func Test_Worker_RecordFloat64(t *testing.T) {
|
|||||||
[]*Row{
|
[]*Row{
|
||||||
{
|
{
|
||||||
[]tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}},
|
[]tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}},
|
||||||
newCountData(2),
|
&CountData{Value: 2},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nil,
|
nil,
|
||||||
@@ -247,12 +208,6 @@ func Test_Worker_RecordFloat64(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, v := range tc.subscriptions {
|
|
||||||
if err := v.Subscribe(); err != nil {
|
|
||||||
t.Fatalf("%v: Subscribe(%v) = %v; want no errors", tc.label, v.Name, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, value := range tc.records {
|
for _, value := range tc.records {
|
||||||
stats.Record(ctx, m.M(value))
|
stats.Record(ctx, m.M(value))
|
||||||
}
|
}
|
||||||
@@ -260,44 +215,31 @@ func Test_Worker_RecordFloat64(t *testing.T) {
|
|||||||
for _, w := range tc.wants {
|
for _, w := range tc.wants {
|
||||||
gotRows, err := RetrieveData(w.v.Name)
|
gotRows, err := RetrieveData(w.v.Name)
|
||||||
if (err != nil) != (w.err != nil) {
|
if (err != nil) != (w.err != nil) {
|
||||||
t.Fatalf("%v: RetrieveData(%v) = %v; want no errors", tc.label, w.v.Name, err)
|
t.Fatalf("%s: RetrieveData(%v) = %v; want error = %v", tc.label, w.v.Name, err, w.err)
|
||||||
}
|
}
|
||||||
for _, got := range gotRows {
|
for _, got := range gotRows {
|
||||||
if !containsRow(w.rows, got) {
|
if !containsRow(w.rows, got) {
|
||||||
t.Errorf("%v: got row %v; want none", tc.label, got)
|
t.Errorf("%s: got row %#v; want none", tc.label, got)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, want := range w.rows {
|
for _, want := range w.rows {
|
||||||
if !containsRow(gotRows, want) {
|
if !containsRow(gotRows, want) {
|
||||||
t.Errorf("%v: got none; want %v'", tc.label, want)
|
t.Errorf("%s: got none; want %#v'", tc.label, want)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// cleaning up
|
// Cleaning up.
|
||||||
for _, v := range tc.subscriptions {
|
Unregister(tc.registrations...)
|
||||||
if err := v.Unsubscribe(); err != nil {
|
|
||||||
t.Fatalf("%v: Unsubscribing from view %v errored with %v; want no error", tc.label, v.Name, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, v := range tc.registrations {
|
|
||||||
if err := Unregister(v); err != nil {
|
|
||||||
t.Fatalf("%v: Unregistering view %v errrored with %v; want no error", tc.label, v.Name, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReportUsage(t *testing.T) {
|
func TestReportUsage(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
m, err := stats.Int64("measure", "desc", "unit")
|
m := stats.Int64("measure", "desc", "unit")
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("stats.Int64() = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -320,8 +262,7 @@ func TestReportUsage(t *testing.T) {
|
|||||||
restart()
|
restart()
|
||||||
SetReportingPeriod(25 * time.Millisecond)
|
SetReportingPeriod(25 * time.Millisecond)
|
||||||
|
|
||||||
err = Subscribe(tt.view)
|
if err := Register(tt.view); err != nil {
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("%v: cannot subscribe: %v", tt.name, err)
|
t.Fatalf("%v: cannot subscribe: %v", tt.name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -374,15 +315,16 @@ func TestWorkerStarttime(t *testing.T) {
|
|||||||
restart()
|
restart()
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
m, err := stats.Int64("measure/TestWorkerStarttime", "desc", "unit")
|
m := stats.Int64("measure/TestWorkerStarttime", "desc", "unit")
|
||||||
if err != nil {
|
v := &View{
|
||||||
t.Fatalf("stats.Int64() = %v", err)
|
Name: "testview",
|
||||||
|
Measure: m,
|
||||||
|
Aggregation: Count(),
|
||||||
}
|
}
|
||||||
v, _ := New("testview", "", nil, m, Count())
|
|
||||||
|
|
||||||
SetReportingPeriod(25 * time.Millisecond)
|
SetReportingPeriod(25 * time.Millisecond)
|
||||||
if err := v.Subscribe(); err != nil {
|
if err := Register(v); err != nil {
|
||||||
t.Fatalf("cannot subscribe to %v: %v", v.Name, err)
|
t.Fatalf("cannot register to %v: %v", v.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
e := &vdExporter{}
|
e := &vdExporter{}
|
||||||
@@ -433,7 +375,7 @@ func (e *countExporter) ExportView(vd *Data) {
|
|||||||
|
|
||||||
e.Lock()
|
e.Lock()
|
||||||
defer e.Unlock()
|
defer e.Unlock()
|
||||||
e.count = int64(*d)
|
e.count = d.Value
|
||||||
}
|
}
|
||||||
|
|
||||||
type vdExporter struct {
|
type vdExporter struct {
|
||||||
|
|||||||
1
vendor/go.opencensus.io/trace/basetypes.go
generated
vendored
1
vendor/go.opencensus.io/trace/basetypes.go
generated
vendored
@@ -22,6 +22,7 @@ import (
|
|||||||
type (
|
type (
|
||||||
// TraceID is a 16-byte identifier for a set of spans.
|
// TraceID is a 16-byte identifier for a set of spans.
|
||||||
TraceID [16]byte
|
TraceID [16]byte
|
||||||
|
|
||||||
// SpanID is an 8-byte identifier for a single span.
|
// SpanID is an 8-byte identifier for a single span.
|
||||||
SpanID [8]byte
|
SpanID [8]byte
|
||||||
)
|
)
|
||||||
|
|||||||
4
vendor/go.opencensus.io/trace/benchmark_test.go
generated
vendored
4
vendor/go.opencensus.io/trace/benchmark_test.go
generated
vendored
@@ -94,12 +94,12 @@ func BenchmarkSpanID_DotString(b *testing.B) {
|
|||||||
func traceBenchmark(b *testing.B, fn func(*testing.B)) {
|
func traceBenchmark(b *testing.B, fn func(*testing.B)) {
|
||||||
b.Run("AlwaysSample", func(b *testing.B) {
|
b.Run("AlwaysSample", func(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
SetDefaultSampler(AlwaysSample())
|
ApplyConfig(Config{DefaultSampler: AlwaysSample()})
|
||||||
fn(b)
|
fn(b)
|
||||||
})
|
})
|
||||||
b.Run("NeverSample", func(b *testing.B) {
|
b.Run("NeverSample", func(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
SetDefaultSampler(NeverSample())
|
ApplyConfig(Config{DefaultSampler: NeverSample()})
|
||||||
fn(b)
|
fn(b)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
40
vendor/go.opencensus.io/trace/config.go
generated
vendored
Normal file
40
vendor/go.opencensus.io/trace/config.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package trace
|
||||||
|
|
||||||
|
import "go.opencensus.io/trace/internal"
|
||||||
|
|
||||||
|
// Config represents the global tracing configuration.
|
||||||
|
type Config struct {
|
||||||
|
// DefaultSampler is the default sampler used when creating new spans.
|
||||||
|
DefaultSampler Sampler
|
||||||
|
|
||||||
|
// IDGenerator is for internal use only.
|
||||||
|
IDGenerator internal.IDGenerator
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyConfig applies changes to the global tracing configuration.
|
||||||
|
//
|
||||||
|
// Fields not provided in the given config are going to be preserved.
|
||||||
|
func ApplyConfig(cfg Config) {
|
||||||
|
c := config.Load().(*Config)
|
||||||
|
if cfg.DefaultSampler != nil {
|
||||||
|
c.DefaultSampler = cfg.DefaultSampler
|
||||||
|
}
|
||||||
|
if cfg.IDGenerator != nil {
|
||||||
|
c.IDGenerator = cfg.IDGenerator
|
||||||
|
}
|
||||||
|
config.Store(c)
|
||||||
|
}
|
||||||
33
vendor/go.opencensus.io/trace/config_test.go
generated
vendored
Normal file
33
vendor/go.opencensus.io/trace/config_test.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package trace
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestApplyZeroConfig(t *testing.T) {
|
||||||
|
cfg := config.Load().(*Config)
|
||||||
|
ApplyConfig(Config{})
|
||||||
|
currentCfg := config.Load().(*Config)
|
||||||
|
|
||||||
|
if got, want := reflect.ValueOf(currentCfg.DefaultSampler).Pointer(), reflect.ValueOf(cfg.DefaultSampler).Pointer(); got != want {
|
||||||
|
t.Fatalf("config.DefaultSampler = %#v; want %#v", got, want)
|
||||||
|
}
|
||||||
|
if got, want := currentCfg.IDGenerator, cfg.IDGenerator; got != want {
|
||||||
|
t.Fatalf("config.IDGenerator = %#v; want %#v", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
20
vendor/go.opencensus.io/trace/doc.go
generated
vendored
20
vendor/go.opencensus.io/trace/doc.go
generated
vendored
@@ -13,25 +13,24 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package trace contains types for representing trace information, and
|
Package trace contains support for OpenCensus distributed tracing.
|
||||||
functions for global configuration of tracing.
|
|
||||||
|
|
||||||
The following assumes a basic familiarity with OpenCensus concepts.
|
The following assumes a basic familiarity with OpenCensus concepts.
|
||||||
See http://opencensus.io.
|
See http://opencensus.io
|
||||||
|
|
||||||
|
|
||||||
Enabling Tracing for a Program
|
Exporting Traces
|
||||||
|
|
||||||
To use OpenCensus tracing, register at least one Exporter. You can use
|
To export collected tracing data, register at least one exporter. You can use
|
||||||
one of the provided exporters or write your own.
|
one of the provided exporters or write your own.
|
||||||
|
|
||||||
trace.RegisterExporter(anExporter)
|
trace.RegisterExporter(exporter)
|
||||||
|
|
||||||
By default, traces will be sampled relatively rarely. To change the sampling
|
By default, traces will be sampled relatively rarely. To change the sampling
|
||||||
frequency for your entire program, call SetDefaultSampler. Use a ProbabilitySampler
|
frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler
|
||||||
to sample a subset of traces, or use AlwaysSample to collect a trace on every run:
|
to sample a subset of traces, or use AlwaysSample to collect a trace on every run:
|
||||||
|
|
||||||
trace.SetDefaultSampler(trace.AlwaysSample())
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||||
|
|
||||||
|
|
||||||
Adding Spans to a Trace
|
Adding Spans to a Trace
|
||||||
@@ -43,13 +42,10 @@ It is common to want to capture all the activity of a function call in a span. F
|
|||||||
this to work, the function must take a context.Context as a parameter. Add these two
|
this to work, the function must take a context.Context as a parameter. Add these two
|
||||||
lines to the top of the function:
|
lines to the top of the function:
|
||||||
|
|
||||||
ctx, span := trace.StartSpan(ctx, "your choice of name")
|
ctx, span := trace.StartSpan(ctx, "my.org/Run")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
StartSpan will create a new top-level span if the context
|
StartSpan will create a new top-level span if the context
|
||||||
doesn't contain another span, otherwise it will create a child span.
|
doesn't contain another span, otherwise it will create a child span.
|
||||||
|
|
||||||
As a suggestion, use the fully-qualified function name as the span name, e.g.
|
|
||||||
"github.com/me/mypackage.Run".
|
|
||||||
*/
|
*/
|
||||||
package trace // import "go.opencensus.io/trace"
|
package trace // import "go.opencensus.io/trace"
|
||||||
|
|||||||
21
vendor/go.opencensus.io/trace/internal/internal.go
generated
vendored
Normal file
21
vendor/go.opencensus.io/trace/internal/internal.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package internal provides trace internals.
|
||||||
|
package internal
|
||||||
|
|
||||||
|
type IDGenerator interface {
|
||||||
|
NewTraceID() [16]byte
|
||||||
|
NewSpanID() [8]byte
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user