mirror of
https://github.com/fnproject/fn.git
synced 2022-10-28 21:29:17 +03:00
add opentracing spans for metrics
This commit is contained in:
committed by
Travis Reeder
parent
1cc1a5ad49
commit
dc5e67b6d2
@@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -14,28 +15,32 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"gitlab-odx.oracle.com/odx/functions/api/models"
|
||||
"gitlab-odx.oracle.com/odx/functions/api/runner/common"
|
||||
"gitlab-odx.oracle.com/odx/functions/api/runner/task"
|
||||
)
|
||||
|
||||
func getTask(ctx context.Context, url string) (*models.Task, error) {
|
||||
// TODO shove this ctx into the request?
|
||||
span, _ := opentracing.StartSpanFromContext(ctx, "get_task")
|
||||
defer span.Finish()
|
||||
|
||||
// TODO uh, make a better http client :facepalm:
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
var task models.Task
|
||||
err = json.NewDecoder(resp.Body).Decode(&task)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var task models.Task
|
||||
|
||||
if err := json.Unmarshal(body, &task); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if task.ID == "" {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -65,13 +70,17 @@ func getCfg(t *models.Task) *task.Config {
|
||||
return cfg
|
||||
}
|
||||
|
||||
func deleteTask(url string, task *models.Task) error {
|
||||
func deleteTask(ctx context.Context, url string, task *models.Task) error {
|
||||
span, _ := opentracing.StartSpanFromContext(ctx, "delete_task")
|
||||
defer span.Finish()
|
||||
|
||||
// Unmarshal task to be sent over as a json
|
||||
body, err := json.Marshal(task)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO use a reasonable http client..
|
||||
// Send out Delete request to delete task from queue
|
||||
req, err := http.NewRequest(http.MethodDelete, url, bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
@@ -106,55 +115,68 @@ func startAsyncRunners(ctx context.Context, url string, rnr *Runner, ds models.D
|
||||
case <-ctx.Done():
|
||||
wg.Wait()
|
||||
return
|
||||
|
||||
default:
|
||||
if !rnr.hasAsyncAvailableMemory() {
|
||||
log.Debug("memory full")
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
task, err := getTask(ctx, url)
|
||||
if err != nil {
|
||||
if err, ok := err.(net.Error); ok && err.Timeout() {
|
||||
log.WithError(err).Errorln("Could not fetch task, timeout.")
|
||||
continue
|
||||
}
|
||||
log.WithError(err).Error("Could not fetch task")
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
if task == nil {
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
ctx, log := common.LoggerWithFields(ctx, logrus.Fields{"call_id": task.ID})
|
||||
log.Info("Running task:", task.ID)
|
||||
// log.Infof("Task: %+v", task)
|
||||
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// Process Task
|
||||
_, err := rnr.RunTrackedTask(task, ctx, getCfg(task))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Cannot run task")
|
||||
}
|
||||
log.Debug("Processed task")
|
||||
}()
|
||||
|
||||
// Delete task from queue
|
||||
if err := deleteTask(url, task); err != nil {
|
||||
log.WithError(err).Error("Cannot delete task")
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info("Task complete")
|
||||
}
|
||||
|
||||
if !rnr.hasAsyncAvailableMemory() { // TODO this should be a channel to subscribe to
|
||||
log.Debug("memory full")
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
runAsyncTask(ctx, url, rnr, ds, &wg)
|
||||
}
|
||||
}
|
||||
|
||||
func runAsyncTask(ctx context.Context, url string, rnr *Runner, ds models.Datastore, wg *sync.WaitGroup) {
|
||||
// start a new span altogether, unrelated to the shared global context
|
||||
span := opentracing.GlobalTracer().StartSpan("async_task")
|
||||
ctx = opentracing.ContextWithSpan(ctx, span)
|
||||
defer span.Finish()
|
||||
log := common.Logger(ctx)
|
||||
|
||||
task, err := getTask(ctx, url)
|
||||
if err != nil {
|
||||
if err, ok := err.(net.Error); ok && err.Timeout() {
|
||||
log.WithError(err).Errorln("Could not fetch task, timeout.")
|
||||
return
|
||||
}
|
||||
log.WithError(err).Error("Could not fetch task")
|
||||
time.Sleep(1 * time.Second)
|
||||
return
|
||||
}
|
||||
if task == nil {
|
||||
time.Sleep(1 * time.Second)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, log = common.LoggerWithFields(ctx, logrus.Fields{"call_id": task.ID})
|
||||
log.Info("Running task async:", task.ID)
|
||||
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// Process Task
|
||||
_, err := rnr.RunTrackedTask(task, ctx, getCfg(task))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Cannot run task")
|
||||
}
|
||||
log.Debug("Processed task")
|
||||
}()
|
||||
|
||||
// TODO this is so wrong... fix later+asap
|
||||
|
||||
// Delete task from queue
|
||||
if err := deleteTask(ctx, url, task); err != nil {
|
||||
log.WithError(err).Error("Cannot delete task")
|
||||
return
|
||||
}
|
||||
|
||||
// TODO uh, even if we don't delete it it still runs but w/e
|
||||
log.Info("Task complete")
|
||||
}
|
||||
|
||||
func tasksrvURL(tasksrv string) string {
|
||||
parsed, err := url.Parse(tasksrv)
|
||||
if err != nil {
|
||||
|
||||
@@ -120,7 +120,7 @@ func TestGetTaskError(t *testing.T) {
|
||||
{
|
||||
"url": "/invalid",
|
||||
"task": getMockTask(),
|
||||
"error": "invalid character 'p' after top-level value",
|
||||
"error": "json: cannot unmarshal number into Go value of type models.Task", // TODO WTF!
|
||||
},
|
||||
}
|
||||
|
||||
@@ -150,24 +150,25 @@ func TestGetTaskError(t *testing.T) {
|
||||
func TestDeleteTask(t *testing.T) {
|
||||
buf := setLogBuffer()
|
||||
mockTask := getMockTask()
|
||||
ctx := context.Background()
|
||||
|
||||
ts := getTestServer([]*models.Task{&mockTask})
|
||||
defer ts.Close()
|
||||
|
||||
url := ts.URL + "/tasks"
|
||||
err := deleteTask(url, &mockTask)
|
||||
err := deleteTask(ctx, url, &mockTask)
|
||||
if err == nil {
|
||||
t.Log(buf.String())
|
||||
t.Error("expected error 'Not reserver', got", err)
|
||||
}
|
||||
|
||||
_, err = getTask(context.Background(), url)
|
||||
_, err = getTask(ctx, url)
|
||||
if err != nil {
|
||||
t.Log(buf.String())
|
||||
t.Error("expected no error, got", err)
|
||||
}
|
||||
|
||||
err = deleteTask(url, &mockTask)
|
||||
err = deleteTask(ctx, url, &mockTask)
|
||||
if err != nil {
|
||||
t.Log(buf.String())
|
||||
t.Error("expected no error, got", err)
|
||||
@@ -196,7 +197,7 @@ func testRunner(t *testing.T) (*Runner, context.CancelFunc) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ds := datastore.NewMock()
|
||||
fnl := logs.NewMock()
|
||||
r, err := New(ctx, NewFuncLogger(fnl), NewMetricLogger(), ds)
|
||||
r, err := New(ctx, NewFuncLogger(fnl), ds)
|
||||
if err != nil {
|
||||
t.Fatal("Test: failed to create new runner")
|
||||
}
|
||||
|
||||
@@ -17,8 +17,8 @@ import (
|
||||
manifest "github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
"github.com/heroku/docker-registry-client/registry"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"gitlab-odx.oracle.com/odx/functions/api/runner/common"
|
||||
"gitlab-odx.oracle.com/odx/functions/api/runner/common/stats"
|
||||
"gitlab-odx.oracle.com/odx/functions/api/runner/drivers"
|
||||
)
|
||||
|
||||
@@ -268,9 +268,7 @@ func (drv *DockerDriver) Prepare(ctx context.Context, task drivers.ContainerTask
|
||||
return nil, err
|
||||
}
|
||||
|
||||
createTimer := drv.NewTimer("docker", "create_container", 1.0)
|
||||
_, err = drv.docker.CreateContainer(container)
|
||||
createTimer.Measure()
|
||||
if err != nil {
|
||||
// since we retry under the hood, if the container gets created and retry fails, we can just ignore error
|
||||
if err != docker.ErrContainerAlreadyExists {
|
||||
@@ -296,17 +294,15 @@ type cookie struct {
|
||||
drv *DockerDriver
|
||||
}
|
||||
|
||||
func (c *cookie) Close() error { return c.drv.removeContainer(c.id) }
|
||||
func (c *cookie) Close(ctx context.Context) error { return c.drv.removeContainer(ctx, c.id) }
|
||||
|
||||
func (c *cookie) Run(ctx context.Context) (drivers.RunResult, error) {
|
||||
return c.drv.run(ctx, c.id, c.task)
|
||||
}
|
||||
|
||||
func (drv *DockerDriver) removeContainer(container string) error {
|
||||
removeTimer := drv.NewTimer("docker", "remove_container", 1.0)
|
||||
defer removeTimer.Measure()
|
||||
func (drv *DockerDriver) removeContainer(ctx context.Context, container string) error {
|
||||
err := drv.docker.RemoveContainer(docker.RemoveContainerOptions{
|
||||
ID: container, Force: true, RemoveVolumes: true})
|
||||
ID: container, Force: true, RemoveVolumes: true, Context: ctx})
|
||||
|
||||
if err != nil {
|
||||
logrus.WithError(err).WithFields(logrus.Fields{"container": container}).Error("error removing container")
|
||||
@@ -323,7 +319,9 @@ func (drv *DockerDriver) ensureImage(ctx context.Context, task drivers.Container
|
||||
var config docker.AuthConfiguration // default, tries docker hub w/o user/pass
|
||||
if task, ok := task.(Auther); ok {
|
||||
var err error
|
||||
span, _ := opentracing.StartSpanFromContext(ctx, "docker_auth")
|
||||
config, err = task.DockerAuth()
|
||||
span.Finish()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -334,7 +332,7 @@ func (drv *DockerDriver) ensureImage(ctx context.Context, task drivers.Container
|
||||
}
|
||||
|
||||
// see if we already have it, if not, pull it
|
||||
_, err := drv.docker.InspectImage(task.Image())
|
||||
_, err := drv.docker.InspectImage(ctx, task.Image())
|
||||
if err == docker.ErrNoSuchImage {
|
||||
err = drv.pullImage(ctx, task, config)
|
||||
}
|
||||
@@ -344,15 +342,8 @@ func (drv *DockerDriver) ensureImage(ctx context.Context, task drivers.Container
|
||||
|
||||
func (drv *DockerDriver) pullImage(ctx context.Context, task drivers.ContainerTask, config docker.AuthConfiguration) error {
|
||||
log := common.Logger(ctx)
|
||||
|
||||
reg, repo, tag := drivers.ParseImage(task.Image())
|
||||
globalRepo := path.Join(reg, repo)
|
||||
|
||||
pullTimer := drv.NewTimer("docker", "pull_image", 1.0)
|
||||
defer pullTimer.Measure()
|
||||
|
||||
drv.Inc("docker", "pull_image_count."+stats.AsStatField(task.Image()), 1, 1)
|
||||
|
||||
if reg != "" {
|
||||
config.ServerAddress = reg
|
||||
}
|
||||
@@ -367,7 +358,6 @@ func (drv *DockerDriver) pullImage(ctx context.Context, task drivers.ContainerTa
|
||||
|
||||
err = drv.docker.PullImage(docker.PullImageOptions{Repository: globalRepo, Tag: tag, Context: ctx}, config)
|
||||
if err != nil {
|
||||
drv.Inc("task", "error.pull."+stats.AsStatField(task.Image()), 1, 1)
|
||||
log.WithFields(logrus.Fields{"registry": config.ServerAddress, "username": config.Username, "image": task.Image()}).WithError(err).Error("Failed to pull image")
|
||||
|
||||
// TODO need to inspect for hub or network errors and pick.
|
||||
@@ -397,12 +387,10 @@ func (drv *DockerDriver) run(ctx context.Context, container string, task drivers
|
||||
|
||||
mwOut, mwErr := task.Logger()
|
||||
|
||||
timer := drv.NewTimer("docker", "attach_container", 1)
|
||||
waiter, err := drv.docker.AttachToContainerNonBlocking(docker.AttachToContainerOptions{
|
||||
waiter, err := drv.docker.AttachToContainerNonBlocking(ctx, docker.AttachToContainerOptions{
|
||||
Container: container, OutputStream: mwOut, ErrorStream: mwErr,
|
||||
Stream: true, Logs: true, Stdout: true, Stderr: true,
|
||||
Stdin: true, InputStream: task.Input()})
|
||||
timer.Measure()
|
||||
if err != nil && ctx.Err() == nil {
|
||||
// ignore if ctx has errored, rewrite status lay below
|
||||
return nil, err
|
||||
@@ -416,10 +404,7 @@ func (drv *DockerDriver) run(ctx context.Context, container string, task drivers
|
||||
return nil, err
|
||||
}
|
||||
|
||||
taskTimer := drv.NewTimer("docker", "container_runtime", 1)
|
||||
|
||||
defer func() {
|
||||
taskTimer.Measure()
|
||||
waiter.Close()
|
||||
waiter.Wait() // make sure we gather all logs
|
||||
}()
|
||||
@@ -528,10 +513,8 @@ func newContainerID(task drivers.ContainerTask) string {
|
||||
|
||||
func (drv *DockerDriver) startTask(ctx context.Context, container string) error {
|
||||
log := common.Logger(ctx)
|
||||
startTimer := drv.NewTimer("docker", "start_container", 1.0)
|
||||
log.WithFields(logrus.Fields{"container": container}).Debug("Starting container execution")
|
||||
err := drv.docker.StartContainerWithContext(container, nil, ctx)
|
||||
startTimer.Measure()
|
||||
if err != nil {
|
||||
dockerErr, ok := err.(*docker.Error)
|
||||
_, containerAlreadyRunning := err.(*docker.ContainerAlreadyRunning)
|
||||
|
||||
@@ -14,6 +14,8 @@ import (
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/opentracing/opentracing-go/log"
|
||||
"gitlab-odx.oracle.com/odx/functions/api/runner/common"
|
||||
)
|
||||
|
||||
@@ -29,13 +31,13 @@ const (
|
||||
type dockerClient interface {
|
||||
// Each of these are github.com/fsouza/go-dockerclient methods
|
||||
|
||||
AttachToContainerNonBlocking(opts docker.AttachToContainerOptions) (docker.CloseWaiter, error)
|
||||
AttachToContainerNonBlocking(ctx context.Context, opts docker.AttachToContainerOptions) (docker.CloseWaiter, error)
|
||||
WaitContainerWithContext(id string, ctx context.Context) (int, error)
|
||||
StartContainerWithContext(id string, hostConfig *docker.HostConfig, ctx context.Context) error
|
||||
CreateContainer(opts docker.CreateContainerOptions) (*docker.Container, error)
|
||||
RemoveContainer(opts docker.RemoveContainerOptions) error
|
||||
PullImage(opts docker.PullImageOptions, auth docker.AuthConfiguration) error
|
||||
InspectImage(name string) (*docker.Image, error)
|
||||
InspectImage(ctx context.Context, name string) (*docker.Image, error)
|
||||
Stats(opts docker.StatsOptions) error
|
||||
}
|
||||
|
||||
@@ -95,20 +97,24 @@ type dockerWrap struct {
|
||||
}
|
||||
|
||||
func (d *dockerWrap) retry(ctx context.Context, f func() error) error {
|
||||
log := common.Logger(ctx)
|
||||
var i int
|
||||
span := opentracing.SpanFromContext(ctx)
|
||||
defer func() { span.LogFields(log.Int("docker_call_retries", i)) }()
|
||||
|
||||
logger := common.Logger(ctx)
|
||||
var b common.Backoff
|
||||
for {
|
||||
for ; ; i++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
d.Inc("task", "fail.docker", 1, 1)
|
||||
log.WithError(ctx.Err()).Warnf("retrying on docker errors timed out, restart docker or rotate this instance?")
|
||||
logger.WithError(ctx.Err()).Warnf("retrying on docker errors timed out, restart docker or rotate this instance?")
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
err := filter(ctx, f())
|
||||
if common.IsTemporary(err) || isDocker50x(err) {
|
||||
log.WithError(err).Warn("docker temporary error, retrying")
|
||||
logger.WithError(err).Warn("docker temporary error, retrying")
|
||||
b.Sleep()
|
||||
d.Inc("task", "error.docker", 1, 1)
|
||||
continue
|
||||
@@ -183,24 +189,11 @@ func filterNoSuchContainer(ctx context.Context, err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func filterNotRunning(ctx context.Context, err error) error {
|
||||
log := common.Logger(ctx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
func (d *dockerWrap) AttachToContainerNonBlocking(ctx context.Context, opts docker.AttachToContainerOptions) (w docker.CloseWaiter, err error) {
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, "docker_attach_container")
|
||||
defer span.Finish()
|
||||
|
||||
_, containerNotRunning := err.(*docker.ContainerNotRunning)
|
||||
dockerErr, ok := err.(*docker.Error)
|
||||
if containerNotRunning || (ok && dockerErr.Status == 304) {
|
||||
log.WithError(err).Error("filtering error")
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *dockerWrap) AttachToContainerNonBlocking(opts docker.AttachToContainerOptions) (w docker.CloseWaiter, err error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), retryTimeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, retryTimeout)
|
||||
defer cancel()
|
||||
err = d.retry(ctx, func() error {
|
||||
w, err = d.docker.AttachToContainerNonBlocking(opts)
|
||||
@@ -214,6 +207,8 @@ func (d *dockerWrap) AttachToContainerNonBlocking(opts docker.AttachToContainerO
|
||||
}
|
||||
|
||||
func (d *dockerWrap) WaitContainerWithContext(id string, ctx context.Context) (code int, err error) {
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, "docker_wait_container")
|
||||
defer span.Finish()
|
||||
err = d.retry(ctx, func() error {
|
||||
code, err = d.dockerNoTimeout.WaitContainerWithContext(id, ctx)
|
||||
return err
|
||||
@@ -222,6 +217,8 @@ func (d *dockerWrap) WaitContainerWithContext(id string, ctx context.Context) (c
|
||||
}
|
||||
|
||||
func (d *dockerWrap) StartContainerWithContext(id string, hostConfig *docker.HostConfig, ctx context.Context) (err error) {
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, "docker_start_container")
|
||||
defer span.Finish()
|
||||
err = d.retry(ctx, func() error {
|
||||
err = d.dockerNoTimeout.StartContainerWithContext(id, hostConfig, ctx)
|
||||
if _, ok := err.(*docker.NoSuchContainer); ok {
|
||||
@@ -234,7 +231,9 @@ func (d *dockerWrap) StartContainerWithContext(id string, hostConfig *docker.Hos
|
||||
}
|
||||
|
||||
func (d *dockerWrap) CreateContainer(opts docker.CreateContainerOptions) (c *docker.Container, err error) {
|
||||
err = d.retry(opts.Context, func() error {
|
||||
span, ctx := opentracing.StartSpanFromContext(opts.Context, "docker_create_container")
|
||||
defer span.Finish()
|
||||
err = d.retry(ctx, func() error {
|
||||
c, err = d.dockerNoTimeout.CreateContainer(opts)
|
||||
return err
|
||||
})
|
||||
@@ -242,7 +241,9 @@ func (d *dockerWrap) CreateContainer(opts docker.CreateContainerOptions) (c *doc
|
||||
}
|
||||
|
||||
func (d *dockerWrap) PullImage(opts docker.PullImageOptions, auth docker.AuthConfiguration) (err error) {
|
||||
err = d.retry(opts.Context, func() error {
|
||||
span, ctx := opentracing.StartSpanFromContext(opts.Context, "docker_pull_image")
|
||||
defer span.Finish()
|
||||
err = d.retry(ctx, func() error {
|
||||
err = d.dockerNoTimeout.PullImage(opts, auth)
|
||||
return err
|
||||
})
|
||||
@@ -250,7 +251,13 @@ func (d *dockerWrap) PullImage(opts docker.PullImageOptions, auth docker.AuthCon
|
||||
}
|
||||
|
||||
func (d *dockerWrap) RemoveContainer(opts docker.RemoveContainerOptions) (err error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), retryTimeout)
|
||||
// extract the span, but do not keep the context, since the enclosing context
|
||||
// may be timed out, and we still want to remove the container. TODO in caller? who cares?
|
||||
span, _ := opentracing.StartSpanFromContext(opts.Context, "docker_remove_container")
|
||||
defer span.Finish()
|
||||
ctx := opentracing.ContextWithSpan(context.Background(), span)
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, retryTimeout)
|
||||
defer cancel()
|
||||
err = d.retry(ctx, func() error {
|
||||
err = d.docker.RemoveContainer(opts)
|
||||
@@ -259,8 +266,10 @@ func (d *dockerWrap) RemoveContainer(opts docker.RemoveContainerOptions) (err er
|
||||
return filterNoSuchContainer(ctx, err)
|
||||
}
|
||||
|
||||
func (d *dockerWrap) InspectImage(name string) (i *docker.Image, err error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), retryTimeout)
|
||||
func (d *dockerWrap) InspectImage(ctx context.Context, name string) (i *docker.Image, err error) {
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, "docker_inspect_image")
|
||||
defer span.Finish()
|
||||
ctx, cancel := context.WithTimeout(ctx, retryTimeout)
|
||||
defer cancel()
|
||||
err = d.retry(ctx, func() error {
|
||||
i, err = d.docker.InspectImage(name)
|
||||
|
||||
@@ -47,7 +47,7 @@ func TestRunnerDocker(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal("Couldn't prepare task test")
|
||||
}
|
||||
defer cookie.Close()
|
||||
defer cookie.Close(ctx)
|
||||
|
||||
result, err := cookie.Run(ctx)
|
||||
if err != nil {
|
||||
@@ -73,7 +73,7 @@ func TestRunnerDockerStdin(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal("Couldn't prepare task test")
|
||||
}
|
||||
defer cookie.Close()
|
||||
defer cookie.Close(ctx)
|
||||
|
||||
result, err := cookie.Run(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -17,7 +17,8 @@ import (
|
||||
// Clients should always call Close() on a DriverCookie after they are done
|
||||
// with it.
|
||||
type Cookie interface {
|
||||
io.Closer
|
||||
// Close should clean up any resources the cookie was using, or was going to use.
|
||||
Close(ctx context.Context) error
|
||||
|
||||
// Run should execute task on the implementation.
|
||||
// RunResult captures the result of task execution. This means if task
|
||||
|
||||
@@ -24,7 +24,7 @@ type cookie struct {
|
||||
m *Mocker
|
||||
}
|
||||
|
||||
func (c *cookie) Close() error { return nil }
|
||||
func (c *cookie) Close(context.Context) error { return nil }
|
||||
|
||||
func (c *cookie) Run(ctx context.Context) (drivers.RunResult, error) {
|
||||
c.m.count++
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"gitlab-odx.oracle.com/odx/functions/api/runner/common"
|
||||
)
|
||||
|
||||
type MetricLogger interface {
|
||||
Log(context.Context, map[string]interface{})
|
||||
LogCount(context.Context, string, int)
|
||||
LogGauge(context.Context, string, int)
|
||||
LogTime(context.Context, string, time.Duration)
|
||||
}
|
||||
|
||||
type Metric map[string]interface{}
|
||||
|
||||
func NewMetricLogger() MetricLogger {
|
||||
return &DefaultMetricLogger{}
|
||||
}
|
||||
|
||||
type DefaultMetricLogger struct{}
|
||||
|
||||
func (l *DefaultMetricLogger) Log(ctx context.Context, metric map[string]interface{}) {
|
||||
log := common.Logger(ctx)
|
||||
log.WithFields(logrus.Fields(metric)).Info()
|
||||
}
|
||||
|
||||
func (l *DefaultMetricLogger) LogCount(ctx context.Context, name string, value int) {
|
||||
l.Log(ctx, Metric{
|
||||
"name": name,
|
||||
"value": value,
|
||||
"type": "count",
|
||||
})
|
||||
}
|
||||
|
||||
func (l *DefaultMetricLogger) LogTime(ctx context.Context, name string, value time.Duration) {
|
||||
l.Log(ctx, Metric{
|
||||
"name": name,
|
||||
"value": value,
|
||||
"type": "time",
|
||||
})
|
||||
}
|
||||
|
||||
func (l *DefaultMetricLogger) LogGauge(ctx context.Context, name string, value int) {
|
||||
l.Log(ctx, Metric{
|
||||
"name": name,
|
||||
"value": value,
|
||||
"type": "gauge",
|
||||
})
|
||||
}
|
||||
@@ -14,6 +14,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/opentracing/opentracing-go/log"
|
||||
"gitlab-odx.oracle.com/odx/functions/api/models"
|
||||
"gitlab-odx.oracle.com/odx/functions/api/runner/common"
|
||||
"gitlab-odx.oracle.com/odx/functions/api/runner/drivers"
|
||||
@@ -28,7 +30,6 @@ import (
|
||||
type Runner struct {
|
||||
driver drivers.Driver
|
||||
taskQueue chan *containerTask
|
||||
mlog MetricLogger
|
||||
flog FuncLogger
|
||||
availableMem int64
|
||||
usedMem int64
|
||||
@@ -50,7 +51,7 @@ const (
|
||||
DefaultIdleTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
func New(ctx context.Context, flog FuncLogger, mlog MetricLogger, ds models.Datastore) (*Runner, error) {
|
||||
func New(ctx context.Context, flog FuncLogger, ds models.Datastore) (*Runner, error) {
|
||||
// TODO: Is this really required for the container drivers? Can we remove it?
|
||||
env := common.NewEnvironment(func(e *common.Environment) {})
|
||||
|
||||
@@ -64,7 +65,6 @@ func New(ctx context.Context, flog FuncLogger, mlog MetricLogger, ds models.Data
|
||||
driver: driver,
|
||||
taskQueue: make(chan *containerTask, 100),
|
||||
flog: flog,
|
||||
mlog: mlog,
|
||||
availableMem: getAvailableMemory(),
|
||||
usedMem: 0,
|
||||
datastore: ds,
|
||||
@@ -112,13 +112,8 @@ func (r *Runner) handleTask(task *containerTask) {
|
||||
time.Sleep(time.Microsecond)
|
||||
}
|
||||
|
||||
metricBaseName := fmt.Sprintf("run.%s.", task.cfg.AppName)
|
||||
r.mlog.LogTime(task.ctx, metricBaseName+"wait_time", waitTime)
|
||||
r.mlog.LogTime(task.ctx, "run.wait_time", waitTime)
|
||||
|
||||
if timedOut {
|
||||
// Send to a signal to this task saying it cannot run
|
||||
r.mlog.LogCount(task.ctx, metricBaseName+"timeout", 1)
|
||||
task.canRun <- false
|
||||
return
|
||||
}
|
||||
@@ -164,9 +159,35 @@ func (r *Runner) checkMemAndUse(req uint64) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *Runner) awaitSlot(ctask *containerTask) error {
|
||||
span, _ := opentracing.StartSpanFromContext(ctask.ctx, "wait_mem_slot")
|
||||
defer span.Finish()
|
||||
// Check if has enough available memory
|
||||
// If available, use it
|
||||
if !r.checkMemAndUse(ctask.cfg.Memory) {
|
||||
// If not, try add task to the queue
|
||||
select {
|
||||
case r.taskQueue <- ctask:
|
||||
default:
|
||||
span.LogFields(log.Int("queue full", 1))
|
||||
// If queue is full, return error
|
||||
return ErrFullQueue
|
||||
}
|
||||
|
||||
// If task was added to the queue, wait for permission
|
||||
if ok := <-ctask.canRun; !ok {
|
||||
span.LogFields(log.Int("memory timeout", 1))
|
||||
// This task timed out, not available memory
|
||||
return ErrTimeOutNoMemory
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// run is responsible for running 1 instance of a docker container
|
||||
func (r *Runner) run(ctx context.Context, cfg *task.Config) (drivers.RunResult, error) {
|
||||
var err error
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, "run_container")
|
||||
defer span.Finish()
|
||||
|
||||
if cfg.Memory == 0 {
|
||||
cfg.Memory = 128
|
||||
@@ -183,36 +204,19 @@ func (r *Runner) run(ctx context.Context, cfg *task.Config) (drivers.RunResult,
|
||||
canRun: make(chan bool),
|
||||
}
|
||||
|
||||
metricBaseName := fmt.Sprintf("run.%s.", cfg.AppName)
|
||||
r.mlog.LogCount(ctx, metricBaseName+"requests", 1)
|
||||
|
||||
// Check if has enough available memory
|
||||
// If available, use it
|
||||
if !r.checkMemAndUse(cfg.Memory) {
|
||||
// If not, try add task to the queue
|
||||
select {
|
||||
case r.taskQueue <- ctask:
|
||||
default:
|
||||
// If queue is full, return error
|
||||
r.mlog.LogCount(ctx, "queue.full", 1)
|
||||
return nil, ErrFullQueue
|
||||
}
|
||||
|
||||
// If task was added to the queue, wait for permission
|
||||
if ok := <-ctask.canRun; !ok {
|
||||
// This task timed out, not available memory
|
||||
return nil, ErrTimeOutNoMemory
|
||||
}
|
||||
} else {
|
||||
r.mlog.LogTime(ctx, metricBaseName+"waittime", 0)
|
||||
}
|
||||
defer r.addUsedMem(-1 * int64(cfg.Memory))
|
||||
|
||||
cookie, err := r.driver.Prepare(ctx, ctask)
|
||||
err := r.awaitSlot(ctask)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cookie.Close()
|
||||
defer r.addUsedMem(-1 * int64(cfg.Memory))
|
||||
|
||||
span, pctx := opentracing.StartSpanFromContext(ctx, "prepare")
|
||||
cookie, err := r.driver.Prepare(pctx, ctask)
|
||||
span.Finish()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cookie.Close(ctx)
|
||||
|
||||
select {
|
||||
case <-cfg.Ready:
|
||||
@@ -220,23 +224,14 @@ func (r *Runner) run(ctx context.Context, cfg *task.Config) (drivers.RunResult,
|
||||
close(cfg.Ready)
|
||||
}
|
||||
|
||||
metricStart := time.Now()
|
||||
|
||||
result, err := cookie.Run(ctx)
|
||||
span, rctx := opentracing.StartSpanFromContext(ctx, "run")
|
||||
result, err := cookie.Run(rctx)
|
||||
span.Finish()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if result.Status() == "success" {
|
||||
r.mlog.LogCount(ctx, metricBaseName+"succeeded", 1)
|
||||
} else {
|
||||
r.mlog.LogCount(ctx, metricBaseName+"error", 1)
|
||||
}
|
||||
|
||||
metricElapsed := time.Since(metricStart)
|
||||
r.mlog.LogTime(ctx, metricBaseName+"time", metricElapsed)
|
||||
r.mlog.LogTime(ctx, "run.exec_time", metricElapsed)
|
||||
|
||||
span.LogFields(log.String("status", result.Status()))
|
||||
return result, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ func TestRunnerHello(t *testing.T) {
|
||||
ds := datastore.NewMock()
|
||||
fnl := logs.NewMock()
|
||||
fLogger := NewFuncLogger(fnl)
|
||||
runner, err := New(ctx, fLogger, NewMetricLogger(), ds)
|
||||
runner, err := New(ctx, fLogger, ds)
|
||||
if err != nil {
|
||||
t.Fatalf("Test error during New() - %s", err)
|
||||
}
|
||||
@@ -82,7 +82,7 @@ func TestRunnerError(t *testing.T) {
|
||||
ds := datastore.NewMock()
|
||||
fnl := logs.NewMock()
|
||||
fLogger := NewFuncLogger(fnl)
|
||||
runner, err := New(ctx, fLogger, NewMetricLogger(), ds)
|
||||
runner, err := New(ctx, fLogger, ds)
|
||||
if err != nil {
|
||||
t.Fatalf("Test error during New() - %s", err)
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"gitlab-odx.oracle.com/odx/functions/api/id"
|
||||
"gitlab-odx.oracle.com/odx/functions/api/models"
|
||||
"gitlab-odx.oracle.com/odx/functions/api/runner/drivers"
|
||||
@@ -279,6 +280,8 @@ func (g *ghostWriter) Write(b []byte) (int, error) {
|
||||
func (g *ghostWriter) Close() error { return nil }
|
||||
|
||||
func (hc *htfn) serve(ctx context.Context) {
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, "run_hot_container")
|
||||
defer span.Finish()
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
cfg := *hc.cfg
|
||||
@@ -291,6 +294,7 @@ func (hc *htfn) serve(ctx context.Context) {
|
||||
|
||||
stderr := &ghostWriter{inner: bwLog}
|
||||
|
||||
first := true
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
@@ -308,6 +312,19 @@ func (hc *htfn) serve(ctx context.Context) {
|
||||
logger.Info("Canceling inactive hot function")
|
||||
cancel()
|
||||
case t := <-hc.tasks:
|
||||
var span opentracing.Span
|
||||
if first {
|
||||
// TODO this doesn't work as intended; beyond me atm, but the spans do come up.
|
||||
// need a way to add the span from starting container to the first execution, basically.
|
||||
spanHot := opentracing.SpanFromContext(ctx)
|
||||
spanTask := opentracing.SpanFromContext(t.Ctx)
|
||||
span = opentracing.StartSpan("dispatch", opentracing.ChildOf(spanTask.Context()), opentracing.FollowsFrom(spanHot.Context()))
|
||||
ctx = opentracing.ContextWithSpan(t.Ctx, span)
|
||||
first = false
|
||||
} else {
|
||||
span, ctx = opentracing.StartSpanFromContext(t.Ctx, "dispatch")
|
||||
}
|
||||
|
||||
// swap logs to log to the task logger instead of stderr
|
||||
tlog := hc.rnr.flog.Writer(ctx, cfg.AppName, cfg.Path, cfg.Image, cfg.ID)
|
||||
stderr.swap(tlog)
|
||||
@@ -319,6 +336,7 @@ func (hc *htfn) serve(ctx context.Context) {
|
||||
status = "error"
|
||||
logrus.WithField("ctx", ctx).Info("task failed")
|
||||
}
|
||||
span.Finish()
|
||||
hc.once()
|
||||
|
||||
stderr.swap(bwLog) // swap back out before flush
|
||||
@@ -338,6 +356,7 @@ func (hc *htfn) serve(ctx context.Context) {
|
||||
cfg.Stdout = hc.containerOut
|
||||
cfg.Stderr = stderr
|
||||
|
||||
// TODO how to tie a span from the first task into this? yikes
|
||||
result, err := hc.rnr.run(ctx, &cfg)
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("hot function failure detected")
|
||||
|
||||
Reference in New Issue
Block a user