mirror of
https://github.com/fnproject/fn.git
synced 2022-10-28 21:29:17 +03:00
previously we would retry infinitely up to the context with some backoff in between. for hot functions, since we don't set any dead line on pulling or creating the image, this means it would retry forever without making any progress if e.g. the registry is inaccessable or any other temporary error that isn't actually temporary. this adds a hard cap of 10 retries, which gives approximately 13s if the ops take no time, still respecting the context deadline enclosed. the case where this was coming up is now tested for and was otherwise confusing for users to debug, now it spits out an ECONNREFUSED with the address of the registry, which should help users debug without having to poke around fn logs (though I don't like this as an excuse, not all users will be operators at some point in the near future, and this one makes sense) closes #727
329 lines
10 KiB
Go
329 lines
10 KiB
Go
// +build go1.7
|
|
|
|
package docker
|
|
|
|
import (
|
|
"context"
|
|
"crypto/tls"
|
|
"net"
|
|
"net/http"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/fnproject/fn/api/common"
|
|
"github.com/fsouza/go-dockerclient"
|
|
"github.com/opentracing/opentracing-go"
|
|
"github.com/opentracing/opentracing-go/log"
|
|
"github.com/sirupsen/logrus"
|
|
)
|
|
|
|
const (
|
|
retryTimeout = 10 * time.Minute
|
|
pauseTimeout = 5 * time.Second
|
|
)
|
|
|
|
// wrap docker client calls so we can retry 500s, kind of sucks but fsouza doesn't
|
|
// bake in retries we can use internally, could contribute it at some point, would
|
|
// be much more convenient if we didn't have to do this, but it's better than ad hoc retries.
|
|
// also adds timeouts to many operations, varying by operation
|
|
// TODO could generate this, maybe not worth it, may not change often
|
|
type dockerClient interface {
|
|
// Each of these are github.com/fsouza/go-dockerclient methods
|
|
|
|
AttachToContainerNonBlocking(ctx context.Context, opts docker.AttachToContainerOptions) (docker.CloseWaiter, error)
|
|
WaitContainerWithContext(id string, ctx context.Context) (int, error)
|
|
StartContainerWithContext(id string, hostConfig *docker.HostConfig, ctx context.Context) error
|
|
CreateContainer(opts docker.CreateContainerOptions) (*docker.Container, error)
|
|
RemoveContainer(opts docker.RemoveContainerOptions) error
|
|
PauseContainer(id string, ctx context.Context) error
|
|
UnpauseContainer(id string, ctx context.Context) error
|
|
PullImage(opts docker.PullImageOptions, auth docker.AuthConfiguration) error
|
|
InspectImage(ctx context.Context, name string) (*docker.Image, error)
|
|
InspectContainerWithContext(container string, ctx context.Context) (*docker.Container, error)
|
|
Stats(opts docker.StatsOptions) error
|
|
Info(ctx context.Context) (*docker.DockerInfo, error)
|
|
}
|
|
|
|
// TODO: switch to github.com/docker/engine-api
|
|
func newClient() dockerClient {
|
|
// TODO this was much easier, don't need special settings at the moment
|
|
// docker, err := docker.NewClient(conf.Docker)
|
|
client, err := docker.NewClientFromEnv()
|
|
if err != nil {
|
|
logrus.WithError(err).Fatal("couldn't create docker client")
|
|
}
|
|
|
|
t := &http.Transport{
|
|
Dial: (&net.Dialer{
|
|
Timeout: 10 * time.Second,
|
|
KeepAlive: 1 * time.Minute,
|
|
}).Dial,
|
|
TLSClientConfig: &tls.Config{
|
|
ClientSessionCache: tls.NewLRUClientSessionCache(8192),
|
|
},
|
|
TLSHandshakeTimeout: 10 * time.Second,
|
|
MaxIdleConnsPerHost: 512,
|
|
Proxy: http.ProxyFromEnvironment,
|
|
MaxIdleConns: 512,
|
|
IdleConnTimeout: 90 * time.Second,
|
|
ExpectContinueTimeout: 1 * time.Second,
|
|
}
|
|
|
|
client.HTTPClient = &http.Client{Transport: t}
|
|
|
|
if err := client.Ping(); err != nil {
|
|
logrus.WithError(err).Fatal("couldn't connect to docker daemon")
|
|
}
|
|
|
|
client.SetTimeout(120 * time.Second)
|
|
|
|
// get 2 clients, one with a small timeout, one with no timeout to use contexts
|
|
|
|
clientNoTimeout, err := docker.NewClientFromEnv()
|
|
if err != nil {
|
|
logrus.WithError(err).Fatal("couldn't create other docker client")
|
|
}
|
|
|
|
clientNoTimeout.HTTPClient = &http.Client{Transport: t}
|
|
|
|
if err := clientNoTimeout.Ping(); err != nil {
|
|
logrus.WithError(err).Fatal("couldn't connect to other docker daemon")
|
|
}
|
|
|
|
return &dockerWrap{client, clientNoTimeout}
|
|
}
|
|
|
|
type dockerWrap struct {
|
|
docker *docker.Client
|
|
dockerNoTimeout *docker.Client
|
|
}
|
|
|
|
func (d *dockerWrap) retry(ctx context.Context, f func() error) error {
|
|
var i int
|
|
var err error
|
|
span := opentracing.SpanFromContext(ctx)
|
|
defer func() { span.LogFields(log.Int("docker_call_retries", i)) }()
|
|
|
|
logger := common.Logger(ctx)
|
|
var b common.Backoff
|
|
// 10 retries w/o change to backoff is ~13s if ops take ~0 time
|
|
for ; i < 10; i++ {
|
|
select {
|
|
case <-ctx.Done():
|
|
span.LogFields(log.String("task", "fail.docker"))
|
|
logger.WithError(ctx.Err()).Warnf("docker call timed out")
|
|
return ctx.Err()
|
|
default:
|
|
}
|
|
|
|
err = filter(ctx, f())
|
|
if common.IsTemporary(err) || isDocker50x(err) {
|
|
logger.WithError(err).Warn("docker temporary error, retrying")
|
|
b.Sleep(ctx)
|
|
span.LogFields(log.String("task", "tmperror.docker"))
|
|
continue
|
|
}
|
|
if err != nil {
|
|
span.LogFields(log.String("task", "error.docker"))
|
|
}
|
|
return err
|
|
}
|
|
return err // TODO could return context.DeadlineExceeded which ~makes sense
|
|
}
|
|
|
|
func isDocker50x(err error) bool {
|
|
derr, ok := err.(*docker.Error)
|
|
return ok && derr.Status >= 500
|
|
}
|
|
|
|
// implement common.Temporary()
|
|
type temporary struct {
|
|
error
|
|
}
|
|
|
|
func (t *temporary) Temporary() bool { return true }
|
|
|
|
func temp(err error) error {
|
|
return &temporary{err}
|
|
}
|
|
|
|
// some 500s are totally cool
|
|
func filter(ctx context.Context, err error) error {
|
|
log := common.Logger(ctx)
|
|
// "API error (500): {\"message\":\"service endpoint with name task-57d722ecdecb9e7be16aff17 already exists\"}\n" -> ok since container exists
|
|
switch {
|
|
default:
|
|
return err
|
|
case err == nil:
|
|
return err
|
|
case strings.Contains(err.Error(), "service endpoint with name"):
|
|
}
|
|
log.WithError(err).Warn("filtering error")
|
|
return nil
|
|
}
|
|
|
|
func filterNoSuchContainer(ctx context.Context, err error) error {
|
|
log := common.Logger(ctx)
|
|
if err == nil {
|
|
return nil
|
|
}
|
|
_, containerNotFound := err.(*docker.NoSuchContainer)
|
|
dockerErr, ok := err.(*docker.Error)
|
|
if containerNotFound || (ok && dockerErr.Status == 404) {
|
|
log.WithError(err).Error("filtering error")
|
|
return nil
|
|
}
|
|
return err
|
|
}
|
|
|
|
func (d *dockerWrap) Info(ctx context.Context) (info *docker.DockerInfo, err error) {
|
|
span, ctx := opentracing.StartSpanFromContext(ctx, "docker_server_version")
|
|
defer span.Finish()
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, retryTimeout)
|
|
defer cancel()
|
|
err = d.retry(ctx, func() error {
|
|
info, err = d.docker.Info()
|
|
return err
|
|
})
|
|
return info, err
|
|
}
|
|
|
|
func (d *dockerWrap) AttachToContainerNonBlocking(ctx context.Context, opts docker.AttachToContainerOptions) (w docker.CloseWaiter, err error) {
|
|
span, ctx := opentracing.StartSpanFromContext(ctx, "docker_attach_container")
|
|
defer span.Finish()
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, retryTimeout)
|
|
defer cancel()
|
|
err = d.retry(ctx, func() error {
|
|
w, err = d.docker.AttachToContainerNonBlocking(opts)
|
|
if err != nil {
|
|
// always retry if attach errors, task is running, we want logs!
|
|
err = temp(err)
|
|
}
|
|
return err
|
|
})
|
|
return w, err
|
|
}
|
|
|
|
func (d *dockerWrap) WaitContainerWithContext(id string, ctx context.Context) (code int, err error) {
|
|
span, ctx := opentracing.StartSpanFromContext(ctx, "docker_wait_container")
|
|
defer span.Finish()
|
|
err = d.retry(ctx, func() error {
|
|
code, err = d.dockerNoTimeout.WaitContainerWithContext(id, ctx)
|
|
return err
|
|
})
|
|
return code, filterNoSuchContainer(ctx, err)
|
|
}
|
|
|
|
func (d *dockerWrap) StartContainerWithContext(id string, hostConfig *docker.HostConfig, ctx context.Context) (err error) {
|
|
span, ctx := opentracing.StartSpanFromContext(ctx, "docker_start_container")
|
|
defer span.Finish()
|
|
err = d.retry(ctx, func() error {
|
|
err = d.dockerNoTimeout.StartContainerWithContext(id, hostConfig, ctx)
|
|
if _, ok := err.(*docker.NoSuchContainer); ok {
|
|
// for some reason create will sometimes return successfully then say no such container here. wtf. so just retry like normal
|
|
return temp(err)
|
|
}
|
|
return err
|
|
})
|
|
return err
|
|
}
|
|
|
|
func (d *dockerWrap) CreateContainer(opts docker.CreateContainerOptions) (c *docker.Container, err error) {
|
|
span, ctx := opentracing.StartSpanFromContext(opts.Context, "docker_create_container")
|
|
defer span.Finish()
|
|
err = d.retry(ctx, func() error {
|
|
c, err = d.dockerNoTimeout.CreateContainer(opts)
|
|
return err
|
|
})
|
|
return c, err
|
|
}
|
|
|
|
func (d *dockerWrap) PullImage(opts docker.PullImageOptions, auth docker.AuthConfiguration) (err error) {
|
|
span, ctx := opentracing.StartSpanFromContext(opts.Context, "docker_pull_image")
|
|
defer span.Finish()
|
|
err = d.retry(ctx, func() error {
|
|
err = d.dockerNoTimeout.PullImage(opts, auth)
|
|
return err
|
|
})
|
|
return err
|
|
}
|
|
|
|
func (d *dockerWrap) RemoveContainer(opts docker.RemoveContainerOptions) (err error) {
|
|
// extract the span, but do not keep the context, since the enclosing context
|
|
// may be timed out, and we still want to remove the container. TODO in caller? who cares?
|
|
span, _ := opentracing.StartSpanFromContext(opts.Context, "docker_remove_container")
|
|
defer span.Finish()
|
|
ctx := opentracing.ContextWithSpan(context.Background(), span)
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, retryTimeout)
|
|
defer cancel()
|
|
err = d.retry(ctx, func() error {
|
|
err = d.docker.RemoveContainer(opts)
|
|
return err
|
|
})
|
|
return filterNoSuchContainer(ctx, err)
|
|
}
|
|
|
|
func (d *dockerWrap) PauseContainer(id string, ctx context.Context) (err error) {
|
|
span, _ := opentracing.StartSpanFromContext(ctx, "docker_pause_container")
|
|
defer span.Finish()
|
|
ctx, cancel := context.WithTimeout(ctx, pauseTimeout)
|
|
defer cancel()
|
|
err = d.retry(ctx, func() error {
|
|
err = d.docker.PauseContainer(id)
|
|
return err
|
|
})
|
|
return filterNoSuchContainer(ctx, err)
|
|
}
|
|
|
|
func (d *dockerWrap) UnpauseContainer(id string, ctx context.Context) (err error) {
|
|
span, _ := opentracing.StartSpanFromContext(ctx, "docker_unpause_container")
|
|
defer span.Finish()
|
|
ctx, cancel := context.WithTimeout(ctx, pauseTimeout)
|
|
defer cancel()
|
|
err = d.retry(ctx, func() error {
|
|
err = d.docker.UnpauseContainer(id)
|
|
return err
|
|
})
|
|
return filterNoSuchContainer(ctx, err)
|
|
}
|
|
|
|
func (d *dockerWrap) InspectImage(ctx context.Context, name string) (i *docker.Image, err error) {
|
|
span, ctx := opentracing.StartSpanFromContext(ctx, "docker_inspect_image")
|
|
defer span.Finish()
|
|
ctx, cancel := context.WithTimeout(ctx, retryTimeout)
|
|
defer cancel()
|
|
err = d.retry(ctx, func() error {
|
|
i, err = d.docker.InspectImage(name)
|
|
return err
|
|
})
|
|
return i, err
|
|
}
|
|
|
|
func (d *dockerWrap) InspectContainerWithContext(container string, ctx context.Context) (c *docker.Container, err error) {
|
|
span, ctx := opentracing.StartSpanFromContext(ctx, "docker_inspect_container")
|
|
defer span.Finish()
|
|
ctx, cancel := context.WithTimeout(ctx, retryTimeout)
|
|
defer cancel()
|
|
err = d.retry(ctx, func() error {
|
|
c, err = d.docker.InspectContainerWithContext(container, ctx)
|
|
return err
|
|
})
|
|
return c, err
|
|
}
|
|
|
|
func (d *dockerWrap) Stats(opts docker.StatsOptions) (err error) {
|
|
// we can't retry this one this way since the callee closes the
|
|
// stats chan, need a fancier retry mechanism where we can swap out
|
|
// channels, but stats isn't crucial so... be lazy for now
|
|
return d.docker.Stats(opts)
|
|
|
|
//err = d.retry(func() error {
|
|
//err = d.docker.Stats(opts)
|
|
//return err
|
|
//})
|
|
//return err
|
|
}
|