fn: agent MaxRequestSize limit (#998)

* fn: agent MaxRequestSize limit

Currently, LimitRequestBody() exists to install a
http request body size in http/gin server. For production
enviroments, this is expected to be used. However, in agents
we may need to verify/enforce these size limits and to be
able to assert in case of missing limits is valuable.
With this change, operators can define an agent env variable
to limit this in addition to installing Gin/Http handler.

http.MaxBytesReader is superior in some cases as it sets
http headers (Connection: close) to guard against subsequent
requests.

However, NewClampReadCloser() is superior in other cases,
where it can cleanly return an API error for this case alone
(http.MaxBytesReader() does not return a clean error type
for overflow case, which makes it difficult to use it without
peeking into its implementation.)

For lb agent, upcoming changes rely on such limits enabled
and using gin/http handler (http.MaxBytesReader) makes such
checks/safety validations difficult.

* fn: read/write clamp code adjustment

In case of overflows, opt for simple implementation
of a partial write followed by return error.
This commit is contained in:
Tolga Ceylan
2018-05-16 11:45:57 -07:00
committed by GitHub
parent 7a64ec9db5
commit eab85dfab0
6 changed files with 96 additions and 3 deletions

View File

@@ -403,6 +403,44 @@ func (l testListener) BeforeCall(context.Context, *models.Call) error {
return nil return nil
} }
func TestReqTooLarge(t *testing.T) {
app := &models.App{Name: "myapp"}
app.SetDefaults()
cm := &models.Call{
AppID: app.ID,
Config: map[string]string{},
Path: "/",
Image: "fnproject/fn-test-utils",
Type: "sync",
Format: "json",
Timeout: 10,
IdleTimeout: 20,
Memory: 64,
CPUs: models.MilliCPUs(200),
Payload: `{"sleepTime": 0, "isDebug": true, "isCrash": true}`,
URL: "http://127.0.0.1:8080/r/" + app.Name + "/",
Method: "GET",
}
// FromModel doesn't need a datastore, for now...
ds := datastore.NewMockInit()
cfg, err := NewAgentConfig()
if err != nil {
t.Fatal(err)
}
cfg.MaxRequestSize = 5
a := New(NewDirectDataAccess(ds, ds, new(mqs.Mock)), WithConfig(cfg))
defer a.Close()
_, err = a.GetCall(FromModel(cm))
if err != models.ErrRequestContentTooBig {
t.Fatal(err)
}
}
func TestSubmitError(t *testing.T) { func TestSubmitError(t *testing.T) {
app := &models.App{Name: "myapp"} app := &models.App{Name: "myapp"}
app.SetDefaults() app.SetDefaults()

View File

@@ -256,6 +256,10 @@ func (a *agent) GetCall(opts ...CallOpt) (Call, error) {
// if we're not going to be able to run this call on this machine, bail here. // if we're not going to be able to run this call on this machine, bail here.
return nil, models.ErrCallTimeoutServerBusy return nil, models.ErrCallTimeoutServerBusy
} }
err := setMaxBodyLimit(&a.cfg, &c)
if err != nil {
return nil, err
}
c.da = a.da c.da = a.da
c.ct = a c.ct = a
@@ -281,6 +285,16 @@ func (a *agent) GetCall(opts ...CallOpt) (Call, error) {
return &c, nil return &c, nil
} }
func setMaxBodyLimit(cfg *AgentConfig, c *call) error {
if cfg.MaxRequestSize > 0 && c.req.ContentLength > 0 && uint64(c.req.ContentLength) > cfg.MaxRequestSize {
return models.ErrRequestContentTooBig
}
if c.req.Body != nil {
c.req.Body = common.NewClampReadCloser(c.req.Body, cfg.MaxRequestSize, models.ErrRequestContentTooBig)
}
return nil
}
type call struct { type call struct {
*models.Call *models.Call

View File

@@ -19,6 +19,7 @@ type AgentConfig struct {
CallEndTimeout time.Duration `json:"call_end_timeout"` CallEndTimeout time.Duration `json:"call_end_timeout"`
MaxCallEndStacking uint64 `json:"max_call_end_stacking"` MaxCallEndStacking uint64 `json:"max_call_end_stacking"`
MaxResponseSize uint64 `json:"max_response_size_bytes"` MaxResponseSize uint64 `json:"max_response_size_bytes"`
MaxRequestSize uint64 `json:"max_request_size_bytes"`
MaxLogSize uint64 `json:"max_log_size_bytes"` MaxLogSize uint64 `json:"max_log_size_bytes"`
MaxTotalCPU uint64 `json:"max_total_cpu_mcpus"` MaxTotalCPU uint64 `json:"max_total_cpu_mcpus"`
MaxTotalMemory uint64 `json:"max_total_memory_bytes"` MaxTotalMemory uint64 `json:"max_total_memory_bytes"`
@@ -41,6 +42,7 @@ const (
EnvCallEndTimeout = "FN_CALL_END_TIMEOUT_MSECS" EnvCallEndTimeout = "FN_CALL_END_TIMEOUT_MSECS"
EnvMaxCallEndStacking = "FN_MAX_CALL_END_STACKING" EnvMaxCallEndStacking = "FN_MAX_CALL_END_STACKING"
EnvMaxResponseSize = "FN_MAX_RESPONSE_SIZE" EnvMaxResponseSize = "FN_MAX_RESPONSE_SIZE"
EnvMaxRequestSize = "FN_MAX_REQUEST_SIZE"
EnvMaxLogSize = "FN_MAX_LOG_SIZE_BYTES" EnvMaxLogSize = "FN_MAX_LOG_SIZE_BYTES"
EnvMaxTotalCPU = "FN_MAX_TOTAL_CPU_MCPUS" EnvMaxTotalCPU = "FN_MAX_TOTAL_CPU_MCPUS"
EnvMaxTotalMemory = "FN_MAX_TOTAL_MEMORY_BYTES" EnvMaxTotalMemory = "FN_MAX_TOTAL_MEMORY_BYTES"
@@ -74,6 +76,7 @@ func NewAgentConfig() (*AgentConfig, error) {
err = setEnvMsecs(err, EnvAsyncChewPoll, &cfg.AsyncChewPoll, time.Duration(60)*time.Second) err = setEnvMsecs(err, EnvAsyncChewPoll, &cfg.AsyncChewPoll, time.Duration(60)*time.Second)
err = setEnvMsecs(err, EnvCallEndTimeout, &cfg.CallEndTimeout, time.Duration(10)*time.Minute) err = setEnvMsecs(err, EnvCallEndTimeout, &cfg.CallEndTimeout, time.Duration(10)*time.Minute)
err = setEnvUint(err, EnvMaxResponseSize, &cfg.MaxResponseSize) err = setEnvUint(err, EnvMaxResponseSize, &cfg.MaxResponseSize)
err = setEnvUint(err, EnvMaxRequestSize, &cfg.MaxRequestSize)
err = setEnvUint(err, EnvMaxLogSize, &cfg.MaxLogSize) err = setEnvUint(err, EnvMaxLogSize, &cfg.MaxLogSize)
err = setEnvUint(err, EnvMaxTotalCPU, &cfg.MaxTotalCPU) err = setEnvUint(err, EnvMaxTotalCPU, &cfg.MaxTotalCPU)
err = setEnvUint(err, EnvMaxTotalMemory, &cfg.MaxTotalMemory) err = setEnvUint(err, EnvMaxTotalMemory, &cfg.MaxTotalMemory)

View File

@@ -97,6 +97,10 @@ func (a *lbAgent) GetCall(opts ...CallOpt) (Call, error) {
if c.req == nil || c.Call == nil { if c.req == nil || c.Call == nil {
return nil, errors.New("no model or request provided for call") return nil, errors.New("no model or request provided for call")
} }
err := setMaxBodyLimit(&a.cfg, &c)
if err != nil {
return nil, err
}
c.da = a.da c.da = a.da
c.ct = a c.ct = a

View File

@@ -11,9 +11,9 @@ type clampWriter struct {
overflowErr error overflowErr error
} }
func NewClampWriter(buf io.Writer, maxResponseSize uint64, overflowErr error) io.Writer { func NewClampWriter(buf io.Writer, max uint64, overflowErr error) io.Writer {
if maxResponseSize != 0 { if max != 0 {
return &clampWriter{w: buf, remaining: int64(maxResponseSize), overflowErr: overflowErr} return &clampWriter{w: buf, remaining: int64(max), overflowErr: overflowErr}
} }
return buf return buf
} }
@@ -34,6 +34,36 @@ func (g *clampWriter) Write(p []byte) (int, error) {
return n, err return n, err
} }
type clampReadCloser struct {
r io.ReadCloser
remaining int64
overflowErr error
}
func NewClampReadCloser(buf io.ReadCloser, max uint64, overflowErr error) io.ReadCloser {
if max != 0 {
return &clampReadCloser{r: buf, remaining: int64(max), overflowErr: overflowErr}
}
return buf
}
func (g *clampReadCloser) Close() error {
return g.r.Close()
}
func (g *clampReadCloser) Read(p []byte) (int, error) {
if g.remaining <= 0 {
return 0, g.overflowErr
}
if int64(len(p)) > g.remaining {
p = p[0:g.remaining]
}
n, err := g.r.Read(p)
g.remaining -= int64(n)
return n, err
}
type GhostWriter interface { type GhostWriter interface {
io.Writer io.Writer
io.Closer io.Closer

View File

@@ -185,6 +185,10 @@ var (
code: http.StatusBadGateway, code: http.StatusBadGateway,
error: fmt.Errorf("function response too large"), error: fmt.Errorf("function response too large"),
} }
ErrRequestContentTooBig = err{
code: http.StatusRequestEntityTooLarge,
error: fmt.Errorf("Request content too large"),
}
ErrInvalidAnnotationKey = err{ ErrInvalidAnnotationKey = err{
code: http.StatusBadRequest, code: http.StatusBadRequest,
error: errors.New("Invalid annotation key, annotation keys must be non-empty ascii strings excluding whitespace"), error: errors.New("Invalid annotation key, annotation keys must be non-empty ascii strings excluding whitespace"),