fn: Call extensions/overriding and more customization friendly docker driver (#1065)

In pure-runner and LB agent, service providers might want to set specific driver options.

For example, to add cpu-shares to functions, LB can add the information as extensions
to the Call and pass this via gRPC to runners. Runners then pick these extensions from
gRPC call and pass it to driver. Using a custom driver implementation, pure-runners can
process these extensions to modify docker.CreateContainerOptions.

To achieve this, LB agents can now be configured using a call overrider.

Pure-runners can be configured using a custom docker driver.

RunnerCall and Call interfaces both expose call extensions.

An example to demonstrate this is implemented in test/fn-system-tests/system_test.go
which registers a call overrider for LB agent as well as a simple custom docker driver.
In this example, LB agent adds a key-value to extensions and runners add this key-value
as an environment variable to the container.
This commit is contained in:
Tolga Ceylan
2018-06-18 14:42:28 -07:00
committed by GitHub
parent 199827b319
commit e67d0e5f3f
17 changed files with 741 additions and 411 deletions

View File

@@ -0,0 +1,189 @@
package docker
import (
"context"
"fmt"
"strings"
"github.com/fnproject/fn/api/agent/drivers"
"github.com/fnproject/fn/api/common"
"github.com/fsouza/go-dockerclient"
"github.com/sirupsen/logrus"
)
// A cookie identifies a unique request to run a task.
type cookie struct {
// namespace id used from prefork pool if applicable
poolId string
// network name from docker networks if applicable
netId string
// docker container create options created by Driver.CreateCookie, required for Driver.Prepare()
opts docker.CreateContainerOptions
// task associated with this cookie
task drivers.ContainerTask
// pointer to docker driver
drv *DockerDriver
}
func (c *cookie) configureFsSize(log logrus.FieldLogger) {
if c.task.FsSize() == 0 {
return
}
// If defined, impose file system size limit. In MB units.
if c.opts.HostConfig.StorageOpt == nil {
c.opts.HostConfig.StorageOpt = make(map[string]string)
}
opt := fmt.Sprintf("%vM", c.task.FsSize())
log.WithFields(logrus.Fields{"size": opt, "call_id": c.task.Id()}).Debug("setting storage option")
c.opts.HostConfig.StorageOpt["size"] = opt
}
func (c *cookie) configureTmpFs(log logrus.FieldLogger) {
// if RO Root is NOT enabled and TmpFsSize does not have any limit, then we do not need
// any tmpfs in the container since function can freely write whereever it wants.
if c.task.TmpFsSize() == 0 && !c.drv.conf.EnableReadOnlyRootFs {
return
}
if c.opts.HostConfig.Tmpfs == nil {
c.opts.HostConfig.Tmpfs = make(map[string]string)
}
var tmpFsOption string
if c.task.TmpFsSize() != 0 {
if c.drv.conf.MaxTmpFsInodes != 0 {
tmpFsOption = fmt.Sprintf("size=%dm,nr_inodes=%d", c.task.TmpFsSize(), c.drv.conf.MaxTmpFsInodes)
} else {
tmpFsOption = fmt.Sprintf("size=%dm", c.task.TmpFsSize())
}
}
log.WithFields(logrus.Fields{"target": "/tmp", "options": tmpFsOption, "call_id": c.task.Id()}).Debug("setting tmpfs")
c.opts.HostConfig.Tmpfs["/tmp"] = tmpFsOption
}
func (c *cookie) configureVolumes(log logrus.FieldLogger) {
if len(c.task.Volumes()) == 0 {
return
}
if c.opts.Config.Volumes == nil {
c.opts.Config.Volumes = map[string]struct{}{}
}
for _, mapping := range c.task.Volumes() {
hostDir := mapping[0]
containerDir := mapping[1]
c.opts.Config.Volumes[containerDir] = struct{}{}
mapn := fmt.Sprintf("%s:%s", hostDir, containerDir)
c.opts.HostConfig.Binds = append(c.opts.HostConfig.Binds, mapn)
log.WithFields(logrus.Fields{"volumes": mapn, "call_id": c.task.Id()}).Debug("setting volumes")
}
}
func (c *cookie) configureCPU(log logrus.FieldLogger) {
// Translate milli cpus into CPUQuota & CPUPeriod (see Linux cGroups CFS cgroup v1 documentation)
// eg: task.CPUQuota() of 8000 means CPUQuota of 8 * 100000 usecs in 100000 usec period,
// which is approx 8 CPUS in CFS world.
// Also see docker run options --cpu-quota and --cpu-period
if c.task.CPUs() == 0 {
return
}
quota := int64(c.task.CPUs() * 100)
period := int64(100000)
log.WithFields(logrus.Fields{"quota": quota, "period": period, "call_id": c.task.Id()}).Debug("setting CPU")
c.opts.HostConfig.CPUQuota = quota
c.opts.HostConfig.CPUPeriod = period
}
func (c *cookie) configureWorkDir(log logrus.FieldLogger) {
wd := c.task.WorkDir()
if wd == "" {
return
}
log.WithFields(logrus.Fields{"wd": wd, "call_id": c.task.Id()}).Debug("setting work dir")
c.opts.Config.WorkingDir = wd
}
func (c *cookie) configureHostname(log logrus.FieldLogger) {
// hostname and container NetworkMode is not compatible.
if c.opts.HostConfig.NetworkMode != "" {
return
}
log.WithFields(logrus.Fields{"hostname": c.drv.hostname, "call_id": c.task.Id()}).Debug("setting hostname")
c.opts.Config.Hostname = c.drv.hostname
}
func (c *cookie) configureCmd(log logrus.FieldLogger) {
if c.task.Command() == "" {
return
}
// NOTE: this is hyper-sensitive and may not be correct like this even, but it passes old tests
cmd := strings.Fields(c.task.Command())
log.WithFields(logrus.Fields{"call_id": c.task.Id(), "cmd": cmd, "len": len(cmd)}).Debug("docker command")
c.opts.Config.Cmd = cmd
}
func (c *cookie) configureEnv(log logrus.FieldLogger) {
if len(c.task.EnvVars()) == 0 {
return
}
envvars := make([]string, 0, len(c.task.EnvVars()))
for name, val := range c.task.EnvVars() {
envvars = append(envvars, name+"="+val)
}
c.opts.Config.Env = envvars
}
// implements Cookie
func (c *cookie) Close(ctx context.Context) error {
err := c.drv.removeContainer(ctx, c.task.Id())
c.drv.unpickPool(c)
c.drv.unpickNetwork(c)
return err
}
// implements Cookie
func (c *cookie) Run(ctx context.Context) (drivers.WaitResult, error) {
return c.drv.run(ctx, c.task.Id(), c.task)
}
// implements Cookie
func (c *cookie) ContainerOptions() interface{} {
return c.opts
}
// implements Cookie
func (c *cookie) Freeze(ctx context.Context) error {
ctx, log := common.LoggerWithFields(ctx, logrus.Fields{"stack": "Freeze"})
log.WithFields(logrus.Fields{"call_id": c.task.Id()}).Debug("docker pause")
err := c.drv.docker.PauseContainer(c.task.Id(), ctx)
if err != nil {
logrus.WithError(err).WithFields(logrus.Fields{"call_id": c.task.Id()}).Error("error pausing container")
}
return err
}
// implements Cookie
func (c *cookie) Unfreeze(ctx context.Context) error {
ctx, log := common.LoggerWithFields(ctx, logrus.Fields{"stack": "Unfreeze"})
log.WithFields(logrus.Fields{"call_id": c.task.Id()}).Debug("docker unpause")
err := c.drv.docker.UnpauseContainer(c.task.Id(), ctx)
if err != nil {
logrus.WithError(err).WithFields(logrus.Fields{"call_id": c.task.Id()}).Error("error unpausing container")
}
return err
}
var _ drivers.Cookie = &cookie{}

View File

@@ -144,37 +144,37 @@ func (drv *DockerDriver) Close() error {
return err
}
func (drv *DockerDriver) pickPool(ctx context.Context, container *docker.CreateContainerOptions) string {
func (drv *DockerDriver) pickPool(ctx context.Context, c *cookie) {
ctx, log := common.LoggerWithFields(ctx, logrus.Fields{"stack": "tryUsePool"})
if drv.pool == nil || container.HostConfig.NetworkMode != "" {
return ""
if drv.pool == nil || c.opts.HostConfig.NetworkMode != "" {
return
}
id, err := drv.pool.AllocPoolId()
if err != nil {
log.WithError(err).Error("Could not fetch pre fork pool container")
return ""
return
}
// We are able to fetch a container from pool. Now, use its
// network, ipc and pid namespaces.
container.HostConfig.NetworkMode = fmt.Sprintf("container:%s", id)
//container.HostConfig.IpcMode = linker
//container.HostConfig.PidMode = linker
return id
c.opts.HostConfig.NetworkMode = fmt.Sprintf("container:%s", id)
//c.opts.HostConfig.IpcMode = linker
//c.opts.HostConfig.PidMode = linker
c.poolId = id
}
func (drv *DockerDriver) unpickPool(poolId string) {
if poolId != "" && drv.pool != nil {
drv.pool.FreePoolId(poolId)
func (drv *DockerDriver) unpickPool(c *cookie) {
if c.poolId != "" && drv.pool != nil {
drv.pool.FreePoolId(c.poolId)
}
}
func (drv *DockerDriver) pickNetwork(container *docker.CreateContainerOptions) string {
func (drv *DockerDriver) pickNetwork(c *cookie) {
if len(drv.networks) == 0 || container.HostConfig.NetworkMode != "" {
return ""
if len(drv.networks) == 0 || c.opts.HostConfig.NetworkMode != "" {
return
}
var id string
@@ -190,132 +190,23 @@ func (drv *DockerDriver) pickNetwork(container *docker.CreateContainerOptions) s
drv.networks[id]++
drv.networksLock.Unlock()
container.HostConfig.NetworkMode = id
return id
c.opts.HostConfig.NetworkMode = id
c.netId = id
}
func (drv *DockerDriver) unpickNetwork(netId string) {
if netId != "" {
drv.networksLock.Lock()
drv.networks[netId]--
drv.networksLock.Unlock()
func (drv *DockerDriver) unpickNetwork(c *cookie) {
if c.netId != "" {
c.drv.networksLock.Lock()
c.drv.networks[c.netId]--
c.drv.networksLock.Unlock()
}
}
func (drv *DockerDriver) configureFs(log logrus.FieldLogger, container *docker.CreateContainerOptions, task drivers.ContainerTask) {
if task.FsSize() != 0 {
// If defined, impose file system size limit. In MB units.
if container.HostConfig.StorageOpt == nil {
container.HostConfig.StorageOpt = make(map[string]string)
}
func (drv *DockerDriver) CreateCookie(ctx context.Context, task drivers.ContainerTask) (drivers.Cookie, error) {
opt := fmt.Sprintf("%vM", task.FsSize())
ctx, log := common.LoggerWithFields(ctx, logrus.Fields{"stack": "CreateCookie"})
log.WithFields(logrus.Fields{"size": opt, "call_id": task.Id()}).Debug("setting storage option")
container.HostConfig.StorageOpt["size"] = opt
}
container.HostConfig.ReadonlyRootfs = drv.conf.EnableReadOnlyRootFs
}
func (drv *DockerDriver) configureTmpFs(log logrus.FieldLogger, container *docker.CreateContainerOptions, task drivers.ContainerTask) {
if task.TmpFsSize() == 0 && !drv.conf.EnableReadOnlyRootFs {
return
}
if container.HostConfig.Tmpfs == nil {
container.HostConfig.Tmpfs = make(map[string]string)
}
var tmpFsOption string
if task.TmpFsSize() != 0 {
if drv.conf.MaxTmpFsInodes != 0 {
tmpFsOption = fmt.Sprintf("size=%dm,nr_inodes=%d", task.TmpFsSize(), drv.conf.MaxTmpFsInodes)
} else {
tmpFsOption = fmt.Sprintf("size=%dm", task.TmpFsSize())
}
}
target := "/tmp"
log.WithFields(logrus.Fields{"target": target, "options": tmpFsOption, "call_id": task.Id()}).Debug("setting tmpfs")
container.HostConfig.Tmpfs[target] = tmpFsOption
}
func (drv *DockerDriver) configureVolumes(log logrus.FieldLogger, container *docker.CreateContainerOptions, task drivers.ContainerTask) {
if len(task.Volumes()) == 0 {
return
}
if container.Config.Volumes == nil {
container.Config.Volumes = map[string]struct{}{}
}
for _, mapping := range task.Volumes() {
hostDir := mapping[0]
containerDir := mapping[1]
container.Config.Volumes[containerDir] = struct{}{}
mapn := fmt.Sprintf("%s:%s", hostDir, containerDir)
container.HostConfig.Binds = append(container.HostConfig.Binds, mapn)
log.WithFields(logrus.Fields{"volumes": mapn, "call_id": task.Id()}).Debug("setting volumes")
}
}
func (drv *DockerDriver) configureCPU(log logrus.FieldLogger, container *docker.CreateContainerOptions, task drivers.ContainerTask) {
// Translate milli cpus into CPUQuota & CPUPeriod (see Linux cGroups CFS cgroup v1 documentation)
// eg: task.CPUQuota() of 8000 means CPUQuota of 8 * 100000 usecs in 100000 usec period,
// which is approx 8 CPUS in CFS world.
// Also see docker run options --cpu-quota and --cpu-period
if task.CPUs() == 0 {
return
}
quota := int64(task.CPUs() * 100)
period := int64(100000)
log.WithFields(logrus.Fields{"quota": quota, "period": period, "call_id": task.Id()}).Debug("setting CPU")
container.HostConfig.CPUQuota = quota
container.HostConfig.CPUPeriod = period
}
func (drv *DockerDriver) configureWorkDir(log logrus.FieldLogger, container *docker.CreateContainerOptions, task drivers.ContainerTask) {
if wd := task.WorkDir(); wd != "" {
log.WithFields(logrus.Fields{"wd": wd, "call_id": task.Id()}).Debug("setting work dir")
container.Config.WorkingDir = wd
}
}
func (drv *DockerDriver) configureHostname(log logrus.FieldLogger, container *docker.CreateContainerOptions, task drivers.ContainerTask) {
if container.HostConfig.NetworkMode == "" {
// hostname and container NetworkMode is not compatible.
log.WithFields(logrus.Fields{"hostname": drv.hostname, "call_id": task.Id()}).Debug("setting hostname")
container.Config.Hostname = drv.hostname
}
}
func (drv *DockerDriver) configureCmd(log logrus.FieldLogger, container *docker.CreateContainerOptions, task drivers.ContainerTask) {
if task.Command() == "" {
return
}
// NOTE: this is hyper-sensitive and may not be correct like this even, but it passes old tests
cmd := strings.Fields(task.Command())
log.WithFields(logrus.Fields{"call_id": task.Id(), "cmd": cmd, "len": len(cmd)}).Debug("docker command")
container.Config.Cmd = cmd
}
func (drv *DockerDriver) configureEnv(log logrus.FieldLogger, container *docker.CreateContainerOptions, task drivers.ContainerTask) {
envvars := make([]string, 0, len(task.EnvVars()))
for name, val := range task.EnvVars() {
envvars = append(envvars, name+"="+val)
}
container.Config.Env = envvars
}
func (drv *DockerDriver) Prepare(ctx context.Context, task drivers.ContainerTask) (drivers.Cookie, error) {
ctx, log := common.LoggerWithFields(ctx, logrus.Fields{"stack": "Prepare"})
container := docker.CreateContainerOptions{
opts := docker.CreateContainerOptions{
Name: task.Id(),
Config: &docker.Config{
Memory: int64(task.Memory()),
@@ -333,91 +224,63 @@ func (drv *DockerDriver) Prepare(ctx context.Context, task drivers.ContainerTask
LogConfig: docker.LogConfig{
Type: "none",
},
ReadonlyRootfs: drv.conf.EnableReadOnlyRootFs,
},
Context: ctx,
}
drv.configureCmd(log, &container, task)
drv.configureEnv(log, &container, task)
drv.configureCPU(log, &container, task)
drv.configureFs(log, &container, task)
drv.configureTmpFs(log, &container, task)
drv.configureVolumes(log, &container, task)
drv.configureWorkDir(log, &container, task)
poolId := drv.pickPool(ctx, &container)
netId := drv.pickNetwork(&container)
drv.configureHostname(log, &container, task)
err := drv.ensureImage(ctx, task)
if err != nil {
drv.unpickPool(poolId)
drv.unpickNetwork(netId)
return nil, err
cookie := &cookie{
opts: opts,
task: task,
drv: drv,
}
_, err = drv.docker.CreateContainer(container)
cookie.configureCmd(log)
cookie.configureEnv(log)
cookie.configureCPU(log)
cookie.configureFsSize(log)
cookie.configureTmpFs(log)
cookie.configureVolumes(log)
cookie.configureWorkDir(log)
// Order is important, if pool is enabled, it overrides pick network
drv.pickPool(ctx, cookie)
drv.pickNetwork(cookie)
// Order is important, Hostname doesn't play well with Network config
cookie.configureHostname(log)
return cookie, nil
}
func (drv *DockerDriver) PrepareCookie(ctx context.Context, c drivers.Cookie) error {
ctx, log := common.LoggerWithFields(ctx, logrus.Fields{"stack": "PrepareCookie"})
cookie, ok := c.(*cookie)
if !ok {
return errors.New("unknown cookie implementation")
}
err := drv.ensureImage(ctx, cookie.task)
if err != nil {
return err
}
_, err = drv.docker.CreateContainer(cookie.opts)
if err != nil {
// since we retry under the hood, if the container gets created and retry fails, we can just ignore error
if err != docker.ErrContainerAlreadyExists {
log.WithFields(logrus.Fields{"call_id": task.Id(), "command": container.Config.Cmd, "memory": container.Config.Memory,
"cpu_quota": task.CPUs(), "hostname": container.Config.Hostname, "name": container.Name,
"image": container.Config.Image, "volumes": container.Config.Volumes, "binds": container.HostConfig.Binds, "container": container.Name,
log.WithFields(logrus.Fields{"call_id": cookie.task.Id(), "command": cookie.opts.Config.Cmd, "memory": cookie.opts.Config.Memory,
"cpu_quota": cookie.task.CPUs(), "hostname": cookie.opts.Config.Hostname, "name": cookie.opts.Name,
"image": cookie.opts.Config.Image, "volumes": cookie.opts.Config.Volumes, "binds": cookie.opts.HostConfig.Binds, "container": cookie.opts.Name,
}).WithError(err).Error("Could not create container")
// NOTE: if the container fails to create we don't really want to show to user since they aren't directly configuring the container
drv.unpickPool(poolId)
drv.unpickNetwork(netId)
return nil, err
return err
}
}
// discard removal error
return &cookie{id: task.Id(), task: task, drv: drv, poolId: poolId, netId: netId}, nil
}
type cookie struct {
id string
poolId string
netId string
task drivers.ContainerTask
drv *DockerDriver
}
func (c *cookie) Close(ctx context.Context) error {
defer func() {
c.drv.unpickPool(c.poolId)
c.drv.unpickNetwork(c.netId)
}()
return c.drv.removeContainer(ctx, c.id)
}
func (c *cookie) Run(ctx context.Context) (drivers.WaitResult, error) {
return c.drv.run(ctx, c.id, c.task)
}
func (c *cookie) Freeze(ctx context.Context) error {
ctx, log := common.LoggerWithFields(ctx, logrus.Fields{"stack": "Freeze"})
log.WithFields(logrus.Fields{"call_id": c.id}).Debug("docker pause")
err := c.drv.docker.PauseContainer(c.id, ctx)
if err != nil {
logrus.WithError(err).WithFields(logrus.Fields{"call_id": c.id}).Error("error pausing container")
}
return err
}
func (c *cookie) Unfreeze(ctx context.Context) error {
ctx, log := common.LoggerWithFields(ctx, logrus.Fields{"stack": "Unfreeze"})
log.WithFields(logrus.Fields{"call_id": c.id}).Debug("docker unpause")
err := c.drv.docker.UnpauseContainer(c.id, ctx)
if err != nil {
logrus.WithError(err).WithFields(logrus.Fields{"call_id": c.id}).Error("error unpausing container")
}
return err
return nil
}
func (drv *DockerDriver) removeContainer(ctx context.Context, container string) error {
@@ -782,3 +645,5 @@ func (w *waitResult) wait(ctx context.Context) (status string, err error) {
return drivers.StatusKilled, models.NewAPIError(http.StatusBadGateway, err)
}
}
var _ drivers.Driver = &DockerDriver{}

View File

@@ -70,6 +70,7 @@ func (c *poolTask) Memory() uint64 { return 0
func (c *poolTask) CPUs() uint64 { return 0 }
func (c *poolTask) FsSize() uint64 { return 0 }
func (c *poolTask) TmpFsSize() uint64 { return 0 }
func (c *poolTask) Extensions() map[string]string { return nil }
func (c *poolTask) WriteStat(ctx context.Context, stat drivers.Stat) {}
type dockerPoolItem struct {

View File

@@ -22,7 +22,6 @@ func (f *taskDockerTest) Command() string { return "" }
func (f *taskDockerTest) EnvVars() map[string]string {
return map[string]string{"FN_FORMAT": "default"}
}
func (f *taskDockerTest) Labels() map[string]string { return nil }
func (f *taskDockerTest) Id() string { return f.id }
func (f *taskDockerTest) Group() string { return "" }
func (f *taskDockerTest) Image() string { return "fnproject/fn-test-utils" }
@@ -37,6 +36,7 @@ func (f *taskDockerTest) TmpFsSize() uint64 { return 0 }
func (f *taskDockerTest) WorkDir() string { return "" }
func (f *taskDockerTest) Close() {}
func (f *taskDockerTest) Input() io.Reader { return f.input }
func (f *taskDockerTest) Extensions() map[string]string { return nil }
func TestRunnerDocker(t *testing.T) {
dkr := NewDocker(drivers.Config{})
@@ -46,11 +46,17 @@ func TestRunnerDocker(t *testing.T) {
task := &taskDockerTest{"test-docker", bytes.NewBufferString(`{"isDebug": true}`), &output, &errors}
cookie, err := dkr.Prepare(ctx, task)
cookie, err := dkr.CreateCookie(ctx, task)
if err != nil {
t.Fatal("Couldn't create task cookie")
}
defer cookie.Close(ctx)
err = dkr.PrepareCookie(ctx, cookie)
if err != nil {
t.Fatal("Couldn't prepare task test")
}
defer cookie.Close(ctx)
waiter, err := cookie.Run(ctx)
if err != nil {
@@ -80,13 +86,26 @@ func TestRunnerDockerNetworks(t *testing.T) {
task1 := &taskDockerTest{"test-docker1", bytes.NewBufferString(`{"isDebug": true}`), &output, &errors}
task2 := &taskDockerTest{"test-docker2", bytes.NewBufferString(`{"isDebug": true}`), &output, &errors}
cookie1, err := dkr.Prepare(ctx, task1)
cookie1, err := dkr.CreateCookie(ctx, task1)
if err != nil {
t.Fatal("Couldn't create task1 cookie")
}
defer cookie1.Close(ctx)
err = dkr.PrepareCookie(ctx, cookie1)
if err != nil {
t.Fatal("Couldn't prepare task1 test")
}
defer cookie1.Close(ctx)
cookie2, err := dkr.Prepare(ctx, task2)
cookie2, err := dkr.CreateCookie(ctx, task2)
if err != nil {
t.Fatal("Couldn't create task2 cookie")
}
defer cookie2.Close(ctx)
err = dkr.PrepareCookie(ctx, cookie2)
if err != nil {
t.Fatal("Couldn't prepare task2 test")
}
@@ -141,11 +160,17 @@ func TestRunnerDockerStdin(t *testing.T) {
task := &taskDockerTest{"test-docker-stdin", bytes.NewBufferString(input), &output, &errors}
cookie, err := dkr.Prepare(ctx, task)
cookie, err := dkr.CreateCookie(ctx, task)
if err != nil {
t.Fatal("Couldn't create task cookie")
}
defer cookie.Close(ctx)
err = dkr.PrepareCookie(ctx, cookie)
if err != nil {
t.Fatal("Couldn't prepare task test")
}
defer cookie.Close(ctx)
waiter, err := cookie.Run(ctx)
if err != nil {

View File

@@ -40,6 +40,12 @@ type Cookie interface {
// Unfreeze a frozen container to unpause frozen processes
Unfreeze(ctx context.Context) error
// Fetch driver specific container configuration. Use this to
// access the container create options. If Driver.Prepare() is not
// yet called with the cookie, then this can be used to modify container
// create options.
ContainerOptions() interface{}
}
type WaitResult interface {
@@ -51,7 +57,11 @@ type WaitResult interface {
}
type Driver interface {
// Prepare can be used in order to do any preparation that a specific driver
// Create a new cookie with defaults and/or settings from container task.
// Callers should Close the cookie regardless of whether they prepare or run it.
CreateCookie(ctx context.Context, task ContainerTask) (Cookie, error)
// PrepareCookie can be used in order to do any preparation that a specific driver
// may need to do before running the task, and can be useful to put
// preparation that the task can recover from into (i.e. if pulling an image
// fails because a registry is down, the task doesn't need to be failed). It
@@ -59,7 +69,7 @@ type Driver interface {
// Callers should Close the cookie regardless of whether they run it.
//
// The returned cookie should respect the task's timeout when it is run.
Prepare(ctx context.Context, task ContainerTask) (Cookie, error)
PrepareCookie(ctx context.Context, cookie Cookie) error
// close & shutdown the driver
Close() error
@@ -129,6 +139,9 @@ type ContainerTask interface {
// Close is used to perform cleanup after task execution.
// Close should be safe to call multiple times.
Close()
// Extra Configuration Options
Extensions() map[string]string
}
// Stat is a bucket of stats from a driver at a point in time for a certain task.

View File

@@ -17,14 +17,20 @@ type Mocker struct {
count int
}
func (m *Mocker) Prepare(context.Context, drivers.ContainerTask) (drivers.Cookie, error) {
func (m *Mocker) CreateCookie(context.Context, drivers.ContainerTask) (drivers.Cookie, error) {
return &cookie{m}, nil
}
func (m *Mocker) PrepareCookie(context.Context, drivers.Cookie) error {
return nil
}
func (m *Mocker) Close() error {
return nil
}
var _ drivers.Driver = &Mocker{}
type cookie struct {
m *Mocker
}
@@ -39,6 +45,8 @@ func (c *cookie) Unfreeze(context.Context) error {
func (c *cookie) Close(context.Context) error { return nil }
func (c *cookie) ContainerOptions() interface{} { return nil }
func (c *cookie) Run(ctx context.Context) (drivers.WaitResult, error) {
c.m.count++
if c.m.count%100 == 0 {
@@ -51,6 +59,8 @@ func (c *cookie) Run(ctx context.Context) (drivers.WaitResult, error) {
}, nil
}
var _ drivers.Cookie = &cookie{}
type runResult struct {
err error
status string