Response size clamp (#786)

*) Limit response http body or json response size to FN_MAX_RESPONSE_SIZE (default unlimited)
*) If limits are exceeded 502 is returned with 'body too large' in the error message
This commit is contained in:
Tolga Ceylan
2018-03-01 17:14:50 -08:00
committed by GitHub
parent a30c2c8f1b
commit 89a1fc7c72
7 changed files with 92 additions and 18 deletions

View File

@@ -124,7 +124,7 @@ func New(da DataAccess) Agent {
// TODO: Create drivers.New(runnerConfig)
driver := docker.NewDocker(drivers.Config{
ServerVersion: "17.06.0-ce",
ServerVersion: cfg.MinDockerVersion,
})
a := &agent{
@@ -518,10 +518,11 @@ func (s *coldSlot) Close(ctx context.Context) error {
// implements Slot
type hotSlot struct {
done chan struct{} // signal we are done with slot
errC <-chan error // container error
container *container // TODO mask this
err error
done chan struct{} // signal we are done with slot
errC <-chan error // container error
container *container // TODO mask this
maxRespSize uint64 // TODO boo.
err error
}
func (s *hotSlot) Close(ctx context.Context) error {
@@ -544,12 +545,16 @@ func (s *hotSlot) exec(ctx context.Context, call *call) error {
// swap in fresh pipes & stat accumulator to not interlace with other calls that used this slot [and timed out]
stdinRead, stdinWrite := io.Pipe()
stdoutRead, stdoutWrite := io.Pipe()
stdoutRead, stdoutWritePipe := io.Pipe()
defer stdinRead.Close()
defer stdoutWrite.Close()
defer stdoutWritePipe.Close()
// NOTE: stderr is limited separately (though line writer is vulnerable to attack?)
// limit the bytes allowed to be written to the stdout pipe, which handles any
// buffering overflows (json to a string, http to a buffer, etc)
stdoutWrite := common.NewClampWriter(stdoutWritePipe, s.maxRespSize, models.ErrFunctionResponseTooBig)
proto := protocol.New(protocol.Protocol(call.Format), stdinWrite, stdoutRead)
swapBack := s.container.swap(stdinRead, stdoutWrite, call.stderr, &call.Stats)
defer swapBack() // NOTE: it's important this runs before the pipes are closed.
@@ -595,7 +600,7 @@ func (a *agent) prepCold(ctx context.Context, call *call, tok ResourceToken, ch
cpus: uint64(call.CPUs),
timeout: time.Duration(call.Timeout) * time.Second, // this is unnecessary, but in case removal fails...
stdin: call.req.Body,
stdout: call.w,
stdout: common.NewClampWriter(call.w, a.cfg.MaxResponseSize, models.ErrFunctionResponseTooBig),
stderr: call.stderr,
stats: &call.Stats,
}
@@ -684,7 +689,7 @@ func (a *agent) runHot(ctx context.Context, call *call, tok ResourceToken, state
default: // ok
}
slot := &hotSlot{make(chan struct{}), errC, container, nil}
slot := &hotSlot{done: make(chan struct{}), errC: errC, container: container, maxRespSize: a.cfg.MaxResponseSize}
if !a.runHotReq(ctx, call, state, logger, cookie, slot) {
return
}