mirror of
https://github.com/fnproject/fn.git
synced 2022-10-28 21:29:17 +03:00
http-stream format (#1202)
* POC code for inotify UDS-io-socket * http-stream format introducing the `http-stream` format support in fn. there are many details for this, none of which can be linked from github :( -- docs are coming (I could even try to add some here?). this is kinda MVP-ish level, but does not implement the remaining spec, ie 'headers' fixing up / invoke fixing up. the thinking being we can land this to test fdks / cli with and start splitting work up on top of this. all other formats work the same as previous (no breakage, only new stuff) with the cli you can set `format: http-stream` and deploy, and then invoke a function via the `http-stream` format. this uses unix domain socket (uds) on the container instead of previous stdin/stdout, and fdks will have to support this in a new fashion (will see about getting docs on here). fdk-go works, which is here: https://github.com/fnproject/fdk-go/pull/30 . the output looks the same as an http format function when invoking a function. wahoo. there's some amount of stuff we can clean up here, enumerated: * the cleanup of the sock files is iffy, high pri here * permissions are a pain in the ass and i punted on dealing with them. you can run `sudo ./fnserver` if running locally, it may/may not work in dind(?) ootb * no pipe usage at all (yay), still could reduce buffer usage around the pipe behavior, we could clean this up potentially before removal (and tests) * my brain can’t figure out if dispatchOldFormats changes pipe behavior, but tests work * i marked XXX to do some clean up which will follow soon… need this to test fdk tho so meh, any thoughts on those marked would be appreciated however (1 less decision for me). mostly happy w/ general shape/plumbing tho * there are no tests atm, this is a tricky dance indeed. attempts were made. need to futz with the permission stuff before committing to adding any tests here, which I don't like either. also, need to get the fdk-go based test image updated according to the fdk-go, and there's a dance there too. rumba time.. * delaying the big big cleanup until we have good enough fdk support to kill all the other formats. open to ideas on how to maneuver landing stuff... * fix unmount * see if the tests work on ci... * add call id header * fix up makefile * add configurable iofs opts * add format file describing http-stream contract * rm some cruft * default iofs to /tmp, remove mounting out of the box fn we can't mount. /tmp will provide a memory backed fs for us on most systems, this will be fine for local developing and this can be configured to be wherever for anyone that wants to make things more difficult for themselves. also removes the mounting, this has to be done as root. we can't do this in the oss fn (short of requesting root, but no). in the future, we may want to have a knob here to have a function that can be configured in fn that allows further configuration here. since we don't know what we need in this dept really, not doing that yet (it may be the case that it could be done operationally outside of fn, eg, but not if each directory needs to be configured itself, which seems likely, anyway...) * add WIP note just in case...
This commit is contained in:
@@ -4,7 +4,12 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -15,6 +20,7 @@ import (
|
||||
"github.com/fnproject/fn/api/id"
|
||||
"github.com/fnproject/fn/api/models"
|
||||
"github.com/fnproject/fn/fnext"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -646,6 +652,7 @@ type hotSlot struct {
|
||||
errC <-chan error // container error
|
||||
container *container // TODO mask this
|
||||
cfg *Config
|
||||
udsClient http.Client
|
||||
fatalErr error
|
||||
containerSpan trace.SpanContext
|
||||
}
|
||||
@@ -681,26 +688,14 @@ func (s *hotSlot) exec(ctx context.Context, call *call) error {
|
||||
Type: trace.LinkTypeChild,
|
||||
})
|
||||
|
||||
// swap in fresh pipes & stat accumulator to not interlace with other calls that used this slot [and timed out]
|
||||
stdinRead, stdinWrite := io.Pipe()
|
||||
stdoutRead, stdoutWritePipe := io.Pipe()
|
||||
defer stdinRead.Close()
|
||||
defer stdoutWritePipe.Close()
|
||||
call.req = call.req.WithContext(ctx) // TODO this is funny biz reed is bad
|
||||
|
||||
// NOTE: stderr is limited separately (though line writer is vulnerable to attack?)
|
||||
// limit the bytes allowed to be written to the stdout pipe, which handles any
|
||||
// buffering overflows (json to a string, http to a buffer, etc)
|
||||
stdoutWrite := common.NewClampWriter(stdoutWritePipe, s.cfg.MaxResponseSize, models.ErrFunctionResponseTooBig)
|
||||
|
||||
proto := protocol.New(protocol.Protocol(call.Format), stdinWrite, stdoutRead)
|
||||
swapBack := s.container.swap(stdinRead, stdoutWrite, call.stderr, &call.Stats)
|
||||
defer swapBack() // NOTE: it's important this runs before the pipes are closed.
|
||||
|
||||
errApp := make(chan error, 1)
|
||||
go func() {
|
||||
ci := protocol.NewCallInfo(call.IsCloudEvent, call.Call, call.req.WithContext(ctx))
|
||||
errApp <- proto.Dispatch(ctx, ci, call.w)
|
||||
}()
|
||||
var errApp chan error
|
||||
if call.Format == models.FormatHTTPStream {
|
||||
errApp = s.dispatch(ctx, call)
|
||||
} else { // TODO remove this block one glorious day
|
||||
errApp = s.dispatchOldFormats(ctx, call)
|
||||
}
|
||||
|
||||
select {
|
||||
case err := <-s.errC: // error from container
|
||||
@@ -723,6 +718,93 @@ func (s *hotSlot) exec(ctx context.Context, call *call) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *hotSlot) dispatch(ctx context.Context, call *call) chan error {
|
||||
ctx, span := trace.StartSpan(ctx, "agent_dispatch_httpstream")
|
||||
defer span.End()
|
||||
|
||||
// TODO we can't trust that resp.Write doesn't timeout, even if the http
|
||||
// client should respect the request context (right?) so we still need this (right?)
|
||||
errApp := make(chan error, 1)
|
||||
|
||||
req := call.req
|
||||
req.RequestURI = "" // we have to clear this before using it as a client request, see https://golang.org/pkg/net/http/#Request
|
||||
|
||||
//req.Header.Set("FN_DEADLINE", ci.Deadline().String())
|
||||
req.Header.Set("FN_CALL_ID", call.ID)
|
||||
|
||||
go func() {
|
||||
resp, err := s.udsClient.Do(req)
|
||||
if err != nil {
|
||||
errApp <- err
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
select {
|
||||
case errApp <- writeResp(resp, call.w):
|
||||
case <-ctx.Done():
|
||||
errApp <- ctx.Err()
|
||||
}
|
||||
}()
|
||||
return errApp
|
||||
}
|
||||
|
||||
// XXX(reed): dupe code in http proto (which will die...)
|
||||
func writeResp(resp *http.Response, w io.Writer) error {
|
||||
rw, ok := w.(http.ResponseWriter)
|
||||
if !ok {
|
||||
return resp.Write(w)
|
||||
}
|
||||
|
||||
// if we're writing directly to the response writer, we need to set headers
|
||||
// and status code, and only copy the body. resp.Write would copy a full
|
||||
// http request into the response body (not what we want).
|
||||
|
||||
for k, vs := range resp.Header {
|
||||
for _, v := range vs {
|
||||
rw.Header().Add(k, v)
|
||||
}
|
||||
}
|
||||
if resp.StatusCode > 0 {
|
||||
rw.WriteHeader(resp.StatusCode)
|
||||
}
|
||||
_, err := io.Copy(rw, resp.Body)
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO remove
|
||||
func (s *hotSlot) dispatchOldFormats(ctx context.Context, call *call) chan error {
|
||||
|
||||
errApp := make(chan error, 1)
|
||||
go func() {
|
||||
// XXX(reed): this may be liable to leave the pipes fucked up if dispatch times out, eg
|
||||
// we may need ye ole close() func to put the Close()/swapBack() in from the caller
|
||||
|
||||
// swap in fresh pipes & stat accumulator to not interlace with other calls that used this slot [and timed out]
|
||||
stdinRead, stdinWrite := io.Pipe()
|
||||
stdoutRead, stdoutWritePipe := io.Pipe()
|
||||
defer stdinRead.Close()
|
||||
defer stdoutWritePipe.Close()
|
||||
|
||||
// NOTE: stderr is limited separately (though line writer is vulnerable to attack?)
|
||||
// limit the bytes allowed to be written to the stdout pipe, which handles any
|
||||
// buffering overflows (json to a string, http to a buffer, etc)
|
||||
stdoutWrite := common.NewClampWriter(stdoutWritePipe, s.cfg.MaxResponseSize, models.ErrFunctionResponseTooBig)
|
||||
|
||||
swapBack := s.container.swap(stdinRead, stdoutWrite, call.stderr, &call.Stats)
|
||||
defer swapBack() // NOTE: it's important this runs before the pipes are closed.
|
||||
|
||||
// TODO this should get killed completely
|
||||
// TODO we could alternatively dial in and use the conn as stdin/stdout for an interim solution
|
||||
// XXX(reed): ^^^ do we need that for the cloud event dance ????
|
||||
proto := protocol.New(protocol.Protocol(call.Format), stdinWrite, stdoutRead)
|
||||
ci := protocol.NewCallInfo(call.IsCloudEvent, call.Call, call.req)
|
||||
errApp <- proto.Dispatch(ctx, ci, call.w)
|
||||
}()
|
||||
|
||||
return errApp
|
||||
}
|
||||
|
||||
func (a *agent) prepCold(ctx context.Context, call *call, tok ResourceToken, ch chan Slot) {
|
||||
ctx, span := trace.StartSpan(ctx, "agent_prep_cold")
|
||||
defer span.End()
|
||||
@@ -792,8 +874,33 @@ func (a *agent) runHot(ctx context.Context, call *call, tok ResourceToken, state
|
||||
state.UpdateState(ctx, ContainerStateStart, call.slots)
|
||||
defer state.UpdateState(ctx, ContainerStateDone, call.slots)
|
||||
|
||||
container, closer := newHotContainer(ctx, call, &a.cfg)
|
||||
defer closer()
|
||||
container, err := newHotContainer(ctx, call, &a.cfg)
|
||||
if err != nil {
|
||||
call.slots.queueSlot(&hotSlot{done: make(chan struct{}), fatalErr: err})
|
||||
return
|
||||
}
|
||||
defer container.Close()
|
||||
|
||||
// NOTE: soon this isn't assigned in a branch...
|
||||
var udsClient http.Client
|
||||
udsAwait := make(chan error)
|
||||
if call.Format == models.FormatHTTPStream {
|
||||
// start our listener before starting the container, so we don't miss the pretty things whispered in our ears
|
||||
// XXX(reed): figure out cleaner way to carry around the directory and expose the lsnr.sock file
|
||||
go inotifyUDS(ctx, container.UDSPath(), udsAwait)
|
||||
|
||||
udsClient = http.Client{
|
||||
Transport: &http.Transport{
|
||||
// XXX(reed): other settings ?
|
||||
DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) {
|
||||
var d net.Dialer
|
||||
return d.DialContext(ctx, "unix", container.UDSPath()+"/lsnr.sock") // XXX(reed): hardcoded lsnr.sock
|
||||
},
|
||||
},
|
||||
}
|
||||
} else {
|
||||
close(udsAwait) // XXX(reed): short case first / kill this
|
||||
}
|
||||
|
||||
logger := logrus.WithFields(logrus.Fields{"id": container.id, "app_id": call.AppID, "route": call.Path, "image": call.Image, "memory": call.Memory, "cpus": call.CPUs, "format": call.Format, "idle_timeout": call.IdleTimeout})
|
||||
ctx = common.WithLogger(ctx, logger)
|
||||
@@ -804,7 +911,7 @@ func (a *agent) runHot(ctx context.Context, call *call, tok ResourceToken, state
|
||||
return
|
||||
}
|
||||
|
||||
defer cookie.Close(ctx) // NOTE ensure this ctx doesn't time out
|
||||
defer cookie.Close(ctx)
|
||||
|
||||
err = a.driver.PrepareCookie(ctx, cookie)
|
||||
if err != nil {
|
||||
@@ -818,6 +925,19 @@ func (a *agent) runHot(ctx context.Context, call *call, tok ResourceToken, state
|
||||
return
|
||||
}
|
||||
|
||||
// now we wait for the socket to be created before handing out any slots
|
||||
select {
|
||||
case err := <-udsAwait: // XXX(reed): need to leave a note about pairing ctx here?
|
||||
// sends a nil error if all is good, we can proceed...
|
||||
if err != nil {
|
||||
call.slots.queueSlot(&hotSlot{done: make(chan struct{}), fatalErr: err})
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
call.slots.queueSlot(&hotSlot{done: make(chan struct{}), fatalErr: ctx.Err()})
|
||||
return
|
||||
}
|
||||
|
||||
// container is running
|
||||
state.UpdateState(ctx, ContainerStateIdle, call.slots)
|
||||
|
||||
@@ -843,6 +963,7 @@ func (a *agent) runHot(ctx context.Context, call *call, tok ResourceToken, state
|
||||
errC: errC,
|
||||
container: container,
|
||||
cfg: &a.cfg,
|
||||
udsClient: udsClient,
|
||||
containerSpan: trace.FromContext(ctx).SpanContext(),
|
||||
}
|
||||
if !a.runHotReq(ctx, call, state, logger, cookie, slot) {
|
||||
@@ -867,6 +988,82 @@ func (a *agent) runHot(ctx context.Context, call *call, tok ResourceToken, state
|
||||
logger.WithError(res.Error()).Info("hot function terminated")
|
||||
}
|
||||
|
||||
func createIOFS(cfg *Config) (string, error) {
|
||||
// XXX(reed): need to ensure these are cleaned up if any of these ops in here fail...
|
||||
|
||||
dir := cfg.IOFSPath
|
||||
if dir == "" {
|
||||
// /tmp should be a memory backed filesystem, where we can get user perms
|
||||
// on the socket file (fdks must give write permissions to users on sock).
|
||||
// /var/run is root only, hence this...
|
||||
dir = "/tmp"
|
||||
}
|
||||
|
||||
// create a tmpdir
|
||||
iofsDir, err := ioutil.TempDir(dir, "iofs")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("cannot create tmpdir for iofs: %v", err)
|
||||
}
|
||||
|
||||
opts := cfg.IOFSOpts
|
||||
if opts == "" {
|
||||
// opts = "size=1k,nr_inodes=8,mode=0777"
|
||||
}
|
||||
|
||||
// under tmpdir, create tmpfs
|
||||
// TODO uh, yea, idk
|
||||
//if cfg.IOFSPath != "" {
|
||||
//err = syscall.Mount("tmpfs", iofsDir, "tmpfs", uintptr( [>syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV<] 0), opts)
|
||||
//if err != nil {
|
||||
//return "", fmt.Errorf("cannot mount/create tmpfs=%s", iofsDir)
|
||||
//}
|
||||
//}
|
||||
|
||||
return iofsDir, nil
|
||||
}
|
||||
|
||||
func inotifyUDS(ctx context.Context, iofsDir string, awaitUDS chan<- error) {
|
||||
// XXX(reed): I forgot how to plumb channels temporarily forgive me for this sin (inotify will timeout, this is just bad programming)
|
||||
err := inotifyAwait(ctx, iofsDir)
|
||||
select {
|
||||
case awaitUDS <- err:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
func inotifyAwait(ctx context.Context, iofsDir string) error {
|
||||
ctx, span := trace.StartSpan(ctx, "inotify_await")
|
||||
defer span.End()
|
||||
|
||||
fsWatcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting fsnotify watcher: %v", err)
|
||||
}
|
||||
defer fsWatcher.Close()
|
||||
|
||||
err = fsWatcher.Add(iofsDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error adding iofs dir to fswatcher: %v", err)
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// XXX(reed): damn it would sure be nice to tell users they didn't make a uds and that's why it timed out
|
||||
return ctx.Err()
|
||||
case err := <-fsWatcher.Errors:
|
||||
return fmt.Errorf("error watching for iofs: %v", err)
|
||||
case event := <-fsWatcher.Events:
|
||||
common.Logger(ctx).WithField("event", event).Debug("fsnotify event")
|
||||
if event.Op&fsnotify.Create == fsnotify.Create && event.Name == iofsDir+"/lsnr.sock" {
|
||||
// XXX(reed): hardcoded /lsnr.sock path
|
||||
// wait until the socket file is created by the container
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// runHotReq enqueues a free slot to slot queue manager and watches various timers and the consumer until
|
||||
// the slot is consumed. A return value of false means, the container should shutdown and no subsequent
|
||||
// calls should be made to this function.
|
||||
@@ -980,8 +1177,10 @@ type container struct {
|
||||
cpus uint64
|
||||
fsSize uint64
|
||||
tmpFsSize uint64
|
||||
iofs string
|
||||
timeout time.Duration // cold only (superfluous, but in case)
|
||||
logCfg drivers.LoggerConfig
|
||||
close func()
|
||||
|
||||
stdin io.Reader
|
||||
stdout io.Writer
|
||||
@@ -993,7 +1192,7 @@ type container struct {
|
||||
}
|
||||
|
||||
//newHotContainer creates a container that can be used for multiple sequential events
|
||||
func newHotContainer(ctx context.Context, call *call, cfg *Config) (*container, func()) {
|
||||
func newHotContainer(ctx context.Context, call *call, cfg *Config) (*container, error) {
|
||||
// if freezer is enabled, be consistent with freezer behavior and
|
||||
// block stdout and stderr between calls.
|
||||
isBlockIdleIO := MaxMsDisabled != cfg.FreezeIdle
|
||||
@@ -1038,33 +1237,61 @@ func newHotContainer(ctx context.Context, call *call, cfg *Config) (*container,
|
||||
stderr.Swap(newLineWriterWithBuffer(buf2, sec))
|
||||
}
|
||||
|
||||
var iofs string
|
||||
var err error
|
||||
closer := func() {} // XXX(reed):
|
||||
if call.Format == models.FormatHTTPStream {
|
||||
// XXX(reed): we should also point stdout to stderr, and not have stdin
|
||||
|
||||
iofs, err = createIOFS(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// XXX(reed): futz with this, we have to make sure shit gets cleaned up properly
|
||||
closer = func() {
|
||||
//err := syscall.Unmount(iofs, 0)
|
||||
//if err != nil {
|
||||
//common.Logger(ctx).WithError(err).Error("error unmounting iofs")
|
||||
//}
|
||||
|
||||
err = os.RemoveAll(iofs)
|
||||
if err != nil {
|
||||
common.Logger(ctx).WithError(err).Error("error removing iofs")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &container{
|
||||
id: id, // XXX we could just let docker generate ids...
|
||||
image: call.Image,
|
||||
env: map[string]string(call.Config),
|
||||
extensions: call.extensions,
|
||||
memory: call.Memory,
|
||||
cpus: uint64(call.CPUs),
|
||||
fsSize: cfg.MaxFsSize,
|
||||
tmpFsSize: uint64(call.TmpFsSize),
|
||||
logCfg: drivers.LoggerConfig{
|
||||
URL: strings.TrimSpace(call.SyslogURL),
|
||||
Tags: []drivers.LoggerTag{
|
||||
{Name: "app_name", Value: call.AppName},
|
||||
{Name: "func_name", Value: call.Path},
|
||||
},
|
||||
id: id, // XXX we could just let docker generate ids...
|
||||
image: call.Image,
|
||||
env: map[string]string(call.Config),
|
||||
extensions: call.extensions,
|
||||
memory: call.Memory,
|
||||
cpus: uint64(call.CPUs),
|
||||
fsSize: cfg.MaxFsSize,
|
||||
tmpFsSize: uint64(call.TmpFsSize),
|
||||
iofs: iofs,
|
||||
logCfg: drivers.LoggerConfig{
|
||||
URL: strings.TrimSpace(call.SyslogURL),
|
||||
Tags: []drivers.LoggerTag{
|
||||
{Name: "app_name", Value: call.AppName},
|
||||
{Name: "func_name", Value: call.Path},
|
||||
},
|
||||
stdin: stdin,
|
||||
stdout: stdout,
|
||||
stderr: stderr,
|
||||
}, func() {
|
||||
},
|
||||
stdin: stdin,
|
||||
stdout: stdout,
|
||||
stderr: stderr,
|
||||
close: func() {
|
||||
stdin.Close()
|
||||
stderr.Close()
|
||||
stdout.Close()
|
||||
for _, b := range bufs {
|
||||
bufPool.Put(b)
|
||||
}
|
||||
}
|
||||
closer() // XXX(reed): clean up
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *container) swap(stdin io.Reader, stdout, stderr io.Writer, cs *drivers.Stats) func() {
|
||||
@@ -1094,7 +1321,7 @@ func (c *container) Input() io.Reader { return c.stdin }
|
||||
func (c *container) Logger() (io.Writer, io.Writer) { return c.stdout, c.stderr }
|
||||
func (c *container) Volumes() [][2]string { return nil }
|
||||
func (c *container) WorkDir() string { return "" }
|
||||
func (c *container) Close() {}
|
||||
func (c *container) Close() { c.close() }
|
||||
func (c *container) Image() string { return c.image }
|
||||
func (c *container) Timeout() time.Duration { return c.timeout }
|
||||
func (c *container) EnvVars() map[string]string { return c.env }
|
||||
@@ -1104,6 +1331,7 @@ func (c *container) FsSize() uint64 { return c.fsSize }
|
||||
func (c *container) TmpFsSize() uint64 { return c.tmpFsSize }
|
||||
func (c *container) Extensions() map[string]string { return c.extensions }
|
||||
func (c *container) LoggerConfig() drivers.LoggerConfig { return c.logCfg }
|
||||
func (c *container) UDSPath() string { return c.iofs }
|
||||
|
||||
// WriteStat publishes each metric in the specified Stats structure as a histogram metric
|
||||
func (c *container) WriteStat(ctx context.Context, stat drivers.Stat) {
|
||||
|
||||
@@ -311,6 +311,9 @@ func buildConfigWithPath(app *models.App, fn *models.Fn, path string) models.Con
|
||||
}
|
||||
|
||||
conf["FN_FORMAT"] = fn.Format
|
||||
if fn.Format == models.FormatHTTPStream { // TODO should be always soon...
|
||||
conf["FN_LISTENER"] = "unix:/iofs/lsnr.sock" // XXX(reed): hardcoding this is ok right? it's a contract
|
||||
}
|
||||
conf["FN_APP_NAME"] = app.Name
|
||||
conf["FN_PATH"] = path
|
||||
// TODO: might be a good idea to pass in: "FN_BASE_PATH" = fmt.Sprintf("/r/%s", appName) || "/" if using DNS entries per app
|
||||
@@ -433,12 +436,15 @@ func (a *agent) GetCall(opts ...CallOpt) (Call, error) {
|
||||
|
||||
c.handler = a.da
|
||||
c.ct = a
|
||||
// TODO(reed): is line writer is vulnerable to attack?
|
||||
c.stderr = setupLogger(c.req.Context(), a.cfg.MaxLogSize, !a.cfg.DisableDebugUserLogs, c.Call)
|
||||
if c.w == nil {
|
||||
// send STDOUT to logs if no writer given (async...)
|
||||
// TODO we could/should probably make this explicit to GetCall, ala 'WithLogger', but it's dupe code (who cares?)
|
||||
c.w = c.stderr
|
||||
}
|
||||
// NOTE: we need to limit the output size(?) since users may not use fdk we can't limit it there
|
||||
// c.w = common.NewClampWriter(c.w, a.cfg.MaxResponseSize, models.ErrFunctionResponseTooBig)
|
||||
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
@@ -33,6 +33,8 @@ type Config struct {
|
||||
DisableReadOnlyRootFs bool `json:"disable_readonly_rootfs"`
|
||||
DisableTini bool `json:"disable_tini"`
|
||||
DisableDebugUserLogs bool `json:"disable_debug_user_logs"`
|
||||
IOFSPath string `json:"iofs_path"`
|
||||
IOFSOpts string `json:"iofs_opts"`
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -83,6 +85,10 @@ const (
|
||||
EnvDisableTini = "FN_DISABLE_TINI"
|
||||
// EnvDisableDebugUserLogs disables user function logs being logged at level debug. wise to enable for production.
|
||||
EnvDisableDebugUserLogs = "FN_DISABLE_DEBUG_USER_LOGS"
|
||||
// EnvIOFSPath is the path of a directory to configure for unix socket files for each container
|
||||
EnvIOFSPath = "FN_IOFS_PATH"
|
||||
// EnvIOFSOpts are the options to set when mounting the iofs directory for unix socket files
|
||||
EnvIOFSOpts = "FN_IOFS_OPTS"
|
||||
|
||||
// MaxMsDisabled is used to determine whether mr freeze is lying in wait. TODO remove this manuever
|
||||
MaxMsDisabled = time.Duration(math.MaxInt64)
|
||||
@@ -123,6 +129,8 @@ func NewConfig() (*Config, error) {
|
||||
err = setEnvStr(err, EnvDockerNetworks, &cfg.DockerNetworks)
|
||||
err = setEnvStr(err, EnvDockerLoadFile, &cfg.DockerLoadFile)
|
||||
err = setEnvUint(err, EnvMaxTmpFsInodes, &cfg.MaxTmpFsInodes)
|
||||
err = setEnvStr(err, EnvIOFSPath, &cfg.IOFSPath)
|
||||
err = setEnvStr(err, EnvIOFSOpts, &cfg.IOFSOpts)
|
||||
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
|
||||
@@ -17,6 +17,7 @@ type cookie struct {
|
||||
poolId string
|
||||
// network name from docker networks if applicable
|
||||
netId string
|
||||
|
||||
// docker container create options created by Driver.CreateCookie, required for Driver.Prepare()
|
||||
opts docker.CreateContainerOptions
|
||||
// task associated with this cookie
|
||||
@@ -104,6 +105,17 @@ func (c *cookie) configureTmpFs(log logrus.FieldLogger) {
|
||||
c.opts.HostConfig.Tmpfs["/tmp"] = tmpFsOption
|
||||
}
|
||||
|
||||
func (c *cookie) configureIOFs(log logrus.FieldLogger) {
|
||||
path := c.task.UDSPath()
|
||||
if path == "" {
|
||||
// TODO this should be required soon-ish
|
||||
return
|
||||
}
|
||||
|
||||
bind := fmt.Sprintf("%s:/iofs", path)
|
||||
c.opts.HostConfig.Binds = append(c.opts.HostConfig.Binds, bind)
|
||||
}
|
||||
|
||||
func (c *cookie) configureVolumes(log logrus.FieldLogger) {
|
||||
if len(c.task.Volumes()) == 0 {
|
||||
return
|
||||
@@ -176,12 +188,13 @@ func (c *cookie) configureEnv(log logrus.FieldLogger) {
|
||||
return
|
||||
}
|
||||
|
||||
envvars := make([]string, 0, len(c.task.EnvVars()))
|
||||
for name, val := range c.task.EnvVars() {
|
||||
envvars = append(envvars, name+"="+val)
|
||||
if c.opts.Config.Env == nil {
|
||||
c.opts.Config.Env = make([]string, 0, len(c.task.EnvVars()))
|
||||
}
|
||||
|
||||
c.opts.Config.Env = envvars
|
||||
for name, val := range c.task.EnvVars() {
|
||||
c.opts.Config.Env = append(c.opts.Config.Env, name+"="+val)
|
||||
}
|
||||
}
|
||||
|
||||
// implements Cookie
|
||||
|
||||
@@ -242,6 +242,7 @@ func (drv *DockerDriver) CreateCookie(ctx context.Context, task drivers.Containe
|
||||
cookie.configureTmpFs(log)
|
||||
cookie.configureVolumes(log)
|
||||
cookie.configureWorkDir(log)
|
||||
cookie.configureIOFs(log)
|
||||
|
||||
// Order is important, if pool is enabled, it overrides pick network
|
||||
drv.pickPool(ctx, cookie)
|
||||
|
||||
@@ -73,6 +73,7 @@ func (c *poolTask) TmpFsSize() uint64 { return 0
|
||||
func (c *poolTask) Extensions() map[string]string { return nil }
|
||||
func (c *poolTask) LoggerConfig() drivers.LoggerConfig { return drivers.LoggerConfig{} }
|
||||
func (c *poolTask) WriteStat(ctx context.Context, stat drivers.Stat) {}
|
||||
func (c *poolTask) UDSPath() string { return "" }
|
||||
|
||||
type dockerPoolItem struct {
|
||||
id string
|
||||
|
||||
@@ -38,6 +38,7 @@ func (f *taskDockerTest) Close() {}
|
||||
func (f *taskDockerTest) Input() io.Reader { return f.input }
|
||||
func (f *taskDockerTest) Extensions() map[string]string { return nil }
|
||||
func (f *taskDockerTest) LoggerConfig() drivers.LoggerConfig { return drivers.LoggerConfig{} }
|
||||
func (f *taskDockerTest) UDSPath() string { return "" }
|
||||
|
||||
func TestRunnerDocker(t *testing.T) {
|
||||
dkr := NewDocker(drivers.Config{})
|
||||
|
||||
@@ -158,8 +158,13 @@ type ContainerTask interface {
|
||||
// Close should be safe to call multiple times.
|
||||
Close()
|
||||
|
||||
// Extra Configuration Options
|
||||
// Extensions are extra driver specific configuration options. They should be
|
||||
// more specific but it's easier to be lazy.
|
||||
Extensions() map[string]string
|
||||
|
||||
// UDSPath to use to configure the unix domain socket. the drivers
|
||||
// abstractions have leaked so bad at this point it's a monsoon.
|
||||
UDSPath() string
|
||||
}
|
||||
|
||||
// Stat is a bucket of stats from a driver at a point in time for a certain task.
|
||||
|
||||
@@ -125,6 +125,7 @@ type Protocol string
|
||||
const (
|
||||
Default Protocol = models.FormatDefault
|
||||
HTTP Protocol = models.FormatHTTP
|
||||
HTTPStream Protocol = models.FormatHTTPStream
|
||||
JSON Protocol = models.FormatJSON
|
||||
CloudEventP Protocol = models.FormatCloudEvent
|
||||
Empty Protocol = ""
|
||||
@@ -160,7 +161,7 @@ func (p Protocol) MarshalJSON() ([]byte, error) {
|
||||
// stdin/stdout.
|
||||
func New(p Protocol, in io.Writer, out io.Reader) ContainerIO {
|
||||
switch p {
|
||||
case HTTP:
|
||||
case HTTP, HTTPStream:
|
||||
return &HTTPProtocol{in, out}
|
||||
case JSON:
|
||||
return &JSONProtocol{in, out}
|
||||
|
||||
@@ -21,6 +21,8 @@ const (
|
||||
FormatDefault = "default"
|
||||
// FormatHTTP ...
|
||||
FormatHTTP = "http"
|
||||
// FormatHTTPStream
|
||||
FormatHTTPStream = "http-stream"
|
||||
// FormatJSON ...
|
||||
FormatJSON = "json"
|
||||
// FormatCloudEvent ...
|
||||
|
||||
@@ -171,7 +171,7 @@ func (f *Fn) Validate() error {
|
||||
}
|
||||
|
||||
switch f.Format {
|
||||
case FormatDefault, FormatHTTP, FormatJSON, FormatCloudEvent:
|
||||
case FormatDefault, FormatHTTP, FormatHTTPStream, FormatJSON, FormatCloudEvent:
|
||||
default:
|
||||
return ErrFnsInvalidFormat
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user