Docker stats to Prometheus (#486)

* Docker stats to Prometheus

* Fix compilation error in docker_test

* Refactor docker driver Run function to wait for  the container to have stopped before stopping the colleciton of statistics

* Fix go fmt errors

* Updates to sending docker stats to Prometheus

* remove new test TestWritResultImpl because we changes to support multiple waiters have been removed

* Update docker.Run to use channels not contextrs to shut down stats collector
This commit is contained in:
Nigel Deakin
2017-11-16 19:02:33 +00:00
committed by Reed Allman
parent 83145db6ba
commit 910612d0b1
8 changed files with 355 additions and 47 deletions

View File

@@ -19,6 +19,7 @@ import (
"github.com/fnproject/fn/api/id"
"github.com/fnproject/fn/api/models"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/log"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/sirupsen/logrus"
)
@@ -537,7 +538,7 @@ func (a *agent) launch(ctx context.Context, slots chan<- slot, call *call, tok T
}
go func() {
err := a.runHot(slots, call, tok)
err := a.runHot(ctx, slots, call, tok)
if err != nil {
ch <- err
}
@@ -574,8 +575,14 @@ func (a *agent) prepCold(ctx context.Context, slots chan<- slot, call *call, tok
return nil
}
// TODO add ctx back but be careful to only use for logs/spans
func (a *agent) runHot(slots chan<- slot, call *call, tok Token) error {
func (a *agent) runHot(ctxArg context.Context, slots chan<- slot, call *call, tok Token) error {
// We must be careful to only use ctxArg for logs/spans
// create a span from ctxArg but ignore the new Context
// instead we will create a new Context below and explicitly set its span
span, _ := opentracing.StartSpanFromContext(ctxArg, "docker_run_hot")
defer span.Finish()
if tok == nil {
// TODO we should panic, probably ;)
return errors.New("no token provided, not giving you a slot")
@@ -596,6 +603,9 @@ func (a *agent) runHot(slots chan<- slot, call *call, tok Token) error {
ctx, shutdownContainer := context.WithCancel(context.Background())
defer shutdownContainer() // close this if our waiter returns
// add the span we created above to the new Context
ctx = opentracing.ContextWithSpan(ctx, span)
cid := id.New().String()
// set up the stderr for the first one to capture any logs before the slot is
@@ -709,11 +719,22 @@ func (c *container) Logger() (io.Writer, io.Writer) { return c.stdout, c.stderr
func (c *container) Volumes() [][2]string { return nil }
func (c *container) WorkDir() string { return "" }
func (c *container) Close() {}
func (c *container) WriteStat(drivers.Stat) {}
func (c *container) Image() string { return c.image }
func (c *container) Timeout() time.Duration { return c.timeout }
func (c *container) EnvVars() map[string]string { return c.env }
func (c *container) Memory() uint64 { return c.memory * 1024 * 1024 } // convert MB
// Log the specified stats to a tracing span.
// Spans are not processed by the collector until the span ends, so to prevent any delay
// in processing the stats when the function is long-lived we create a new span for every call
func (c *container) WriteStat(ctx context.Context, stat drivers.Stat) {
span, ctx := opentracing.StartSpanFromContext(ctx, "docker_stats")
defer span.Finish()
for key, value := range stat.Metrics {
span.LogFields(log.Uint64("fn_"+key, value))
}
}
//func (c *container) DockerAuth() (docker.AuthConfiguration, error) {
// Implementing the docker.AuthConfiguration interface.
// TODO per call could implement this stored somewhere (vs. configured on host)