Files
fn-serverless/api/agent/drivers/docker/cookie.go
Reed Allman 3a9c48b8a3 http-stream format (#1202)
* POC code for inotify UDS-io-socket

* http-stream format

introducing the `http-stream` format support in fn. there are many details for
this, none of which can be linked from github :( -- docs are coming (I could
even try to add some here?). this is kinda MVP-ish level, but does not
implement the remaining spec, ie 'headers' fixing up / invoke fixing up. the
thinking being we can land this to test fdks / cli with and start splitting
work up on top of this. all other formats work the same as previous (no
breakage, only new stuff)

with the cli you can set `format: http-stream` and deploy, and then invoke a
function via the `http-stream` format. this uses unix domain socket (uds) on
the container instead of previous stdin/stdout, and fdks will have to support
this in a new fashion (will see about getting docs on here). fdk-go works,
which is here: https://github.com/fnproject/fdk-go/pull/30 . the output looks
the same as an http format function when invoking a function. wahoo.

there's some amount of stuff we can clean up here, enumerated:

* the cleanup of the sock files is iffy, high pri here

* permissions are a pain in the ass and i punted on dealing with them. you can
run `sudo ./fnserver` if running locally, it may/may not work in dind(?) ootb

* no pipe usage at all (yay), still could reduce buffer usage around the pipe
behavior, we could clean this up potentially before removal (and tests)

* my brain can’t figure out if dispatchOldFormats changes pipe behavior, but
tests work

* i marked XXX to do some clean up which will follow soon… need this to test fdk
tho so meh, any thoughts on those marked would be appreciated however (1 less
decision for me). mostly happy w/ general shape/plumbing tho

* there are no tests atm, this is a tricky dance indeed. attempts were made.
need to futz with the permission stuff before committing to adding any tests
here, which I don't like either. also, need to get the fdk-go based test image
updated according to the fdk-go, and there's a dance there too. rumba time..

* delaying the big big cleanup until we have good enough fdk support to kill
all the other formats.

open to ideas on how to maneuver landing stuff...

* fix unmount

* see if the tests work on ci...

* add call id header

* fix up makefile

* add configurable iofs opts

* add format file describing http-stream contract

* rm some cruft

* default iofs to /tmp, remove mounting

out of the box fn we can't mount. /tmp will provide a memory backed fs for us
on most systems, this will be fine for local developing and this can be
configured to be wherever for anyone that wants to make things more difficult
for themselves.

also removes the mounting, this has to be done as root. we can't do this in
the oss fn (short of requesting root, but no). in the future, we may want to
have a knob here to have a function that can be configured in fn that allows
further configuration here. since we don't know what we need in this dept
really, not doing that yet (it may be the case that it could be done
operationally outside of fn, eg, but not if each directory needs to be
configured itself, which seems likely, anyway...)

* add WIP note just in case...
2018-09-14 10:59:12 +01:00

243 lines
6.7 KiB
Go

package docker
import (
"context"
"fmt"
"strings"
"github.com/fnproject/fn/api/agent/drivers"
"github.com/fnproject/fn/api/common"
"github.com/fsouza/go-dockerclient"
"github.com/sirupsen/logrus"
)
// A cookie identifies a unique request to run a task.
type cookie struct {
// namespace id used from prefork pool if applicable
poolId string
// network name from docker networks if applicable
netId string
// docker container create options created by Driver.CreateCookie, required for Driver.Prepare()
opts docker.CreateContainerOptions
// task associated with this cookie
task drivers.ContainerTask
// pointer to docker driver
drv *DockerDriver
}
func (c *cookie) configureLogger(log logrus.FieldLogger) {
conf := c.task.LoggerConfig()
if conf.URL == "" {
c.opts.HostConfig.LogConfig = docker.LogConfig{
Type: "none",
}
return
}
c.opts.HostConfig.LogConfig = docker.LogConfig{
Type: "syslog",
Config: map[string]string{
"syslog-address": conf.URL,
"syslog-facility": "user",
"syslog-format": "rfc5424",
},
}
tags := make([]string, 0, len(conf.Tags))
for _, pair := range conf.Tags {
tags = append(tags, fmt.Sprintf("%s=%s", pair.Name, pair.Value))
}
if len(tags) > 0 {
c.opts.HostConfig.LogConfig.Config["tag"] = strings.Join(tags, ",")
}
}
func (c *cookie) configureMem(log logrus.FieldLogger) {
if c.task.Memory() == 0 {
return
}
mem := int64(c.task.Memory())
c.opts.Config.Memory = mem
c.opts.Config.MemorySwap = mem // disables swap
c.opts.Config.KernelMemory = mem
}
func (c *cookie) configureFsSize(log logrus.FieldLogger) {
if c.task.FsSize() == 0 {
return
}
// If defined, impose file system size limit. In MB units.
if c.opts.HostConfig.StorageOpt == nil {
c.opts.HostConfig.StorageOpt = make(map[string]string)
}
opt := fmt.Sprintf("%vM", c.task.FsSize())
log.WithFields(logrus.Fields{"size": opt, "call_id": c.task.Id()}).Debug("setting storage option")
c.opts.HostConfig.StorageOpt["size"] = opt
}
func (c *cookie) configureTmpFs(log logrus.FieldLogger) {
// if RO Root is NOT enabled and TmpFsSize does not have any limit, then we do not need
// any tmpfs in the container since function can freely write whereever it wants.
if c.task.TmpFsSize() == 0 && !c.drv.conf.EnableReadOnlyRootFs {
return
}
if c.opts.HostConfig.Tmpfs == nil {
c.opts.HostConfig.Tmpfs = make(map[string]string)
}
var tmpFsOption string
if c.task.TmpFsSize() != 0 {
if c.drv.conf.MaxTmpFsInodes != 0 {
tmpFsOption = fmt.Sprintf("size=%dm,nr_inodes=%d", c.task.TmpFsSize(), c.drv.conf.MaxTmpFsInodes)
} else {
tmpFsOption = fmt.Sprintf("size=%dm", c.task.TmpFsSize())
}
}
log.WithFields(logrus.Fields{"target": "/tmp", "options": tmpFsOption, "call_id": c.task.Id()}).Debug("setting tmpfs")
c.opts.HostConfig.Tmpfs["/tmp"] = tmpFsOption
}
func (c *cookie) configureIOFs(log logrus.FieldLogger) {
path := c.task.UDSPath()
if path == "" {
// TODO this should be required soon-ish
return
}
bind := fmt.Sprintf("%s:/iofs", path)
c.opts.HostConfig.Binds = append(c.opts.HostConfig.Binds, bind)
}
func (c *cookie) configureVolumes(log logrus.FieldLogger) {
if len(c.task.Volumes()) == 0 {
return
}
if c.opts.Config.Volumes == nil {
c.opts.Config.Volumes = map[string]struct{}{}
}
for _, mapping := range c.task.Volumes() {
hostDir := mapping[0]
containerDir := mapping[1]
c.opts.Config.Volumes[containerDir] = struct{}{}
mapn := fmt.Sprintf("%s:%s", hostDir, containerDir)
c.opts.HostConfig.Binds = append(c.opts.HostConfig.Binds, mapn)
log.WithFields(logrus.Fields{"volumes": mapn, "call_id": c.task.Id()}).Debug("setting volumes")
}
}
func (c *cookie) configureCPU(log logrus.FieldLogger) {
// Translate milli cpus into CPUQuota & CPUPeriod (see Linux cGroups CFS cgroup v1 documentation)
// eg: task.CPUQuota() of 8000 means CPUQuota of 8 * 100000 usecs in 100000 usec period,
// which is approx 8 CPUS in CFS world.
// Also see docker run options --cpu-quota and --cpu-period
if c.task.CPUs() == 0 {
return
}
quota := int64(c.task.CPUs() * 100)
period := int64(100000)
log.WithFields(logrus.Fields{"quota": quota, "period": period, "call_id": c.task.Id()}).Debug("setting CPU")
c.opts.HostConfig.CPUQuota = quota
c.opts.HostConfig.CPUPeriod = period
}
func (c *cookie) configureWorkDir(log logrus.FieldLogger) {
wd := c.task.WorkDir()
if wd == "" {
return
}
log.WithFields(logrus.Fields{"wd": wd, "call_id": c.task.Id()}).Debug("setting work dir")
c.opts.Config.WorkingDir = wd
}
func (c *cookie) configureHostname(log logrus.FieldLogger) {
// hostname and container NetworkMode is not compatible.
if c.opts.HostConfig.NetworkMode != "" {
return
}
log.WithFields(logrus.Fields{"hostname": c.drv.hostname, "call_id": c.task.Id()}).Debug("setting hostname")
c.opts.Config.Hostname = c.drv.hostname
}
func (c *cookie) configureCmd(log logrus.FieldLogger) {
if c.task.Command() == "" {
return
}
// NOTE: this is hyper-sensitive and may not be correct like this even, but it passes old tests
cmd := strings.Fields(c.task.Command())
log.WithFields(logrus.Fields{"call_id": c.task.Id(), "cmd": cmd, "len": len(cmd)}).Debug("docker command")
c.opts.Config.Cmd = cmd
}
func (c *cookie) configureEnv(log logrus.FieldLogger) {
if len(c.task.EnvVars()) == 0 {
return
}
if c.opts.Config.Env == nil {
c.opts.Config.Env = make([]string, 0, len(c.task.EnvVars()))
}
for name, val := range c.task.EnvVars() {
c.opts.Config.Env = append(c.opts.Config.Env, name+"="+val)
}
}
// implements Cookie
func (c *cookie) Close(ctx context.Context) error {
err := c.drv.removeContainer(ctx, c.task.Id())
c.drv.unpickPool(c)
c.drv.unpickNetwork(c)
return err
}
// implements Cookie
func (c *cookie) Run(ctx context.Context) (drivers.WaitResult, error) {
return c.drv.run(ctx, c.task.Id(), c.task)
}
// implements Cookie
func (c *cookie) ContainerOptions() interface{} {
return c.opts
}
// implements Cookie
func (c *cookie) Freeze(ctx context.Context) error {
ctx, log := common.LoggerWithFields(ctx, logrus.Fields{"stack": "Freeze"})
log.WithFields(logrus.Fields{"call_id": c.task.Id()}).Debug("docker pause")
err := c.drv.docker.PauseContainer(c.task.Id(), ctx)
if err != nil {
logrus.WithError(err).WithFields(logrus.Fields{"call_id": c.task.Id()}).Error("error pausing container")
}
return err
}
// implements Cookie
func (c *cookie) Unfreeze(ctx context.Context) error {
ctx, log := common.LoggerWithFields(ctx, logrus.Fields{"stack": "Unfreeze"})
log.WithFields(logrus.Fields{"call_id": c.task.Id()}).Debug("docker unpause")
err := c.drv.docker.UnpauseContainer(c.task.Id(), ctx)
if err != nil {
logrus.WithError(err).WithFields(logrus.Fields{"call_id": c.task.Id()}).Error("error unpausing container")
}
return err
}
var _ drivers.Cookie = &cookie{}