move max_request_size from agent to server (#1145)

moves the config option for max request size up to the front end, adds the env
var for it there, adds a server test for it and removes it from agent. a
request is either gonna come through the lb (before grpc) or to the server, we
can handle limiting the request there at least now, which may be easier than
having multiple layers of request body checking. this aligns with not making
the agent as responsible for http behaviors (eventually, not at all once route
is fully deprecated).
This commit is contained in:
Reed Allman
2018-07-31 08:58:47 -07:00
committed by GitHub
parent 0cde57bdab
commit af94f3f8ac
7 changed files with 54 additions and 81 deletions

View File

@@ -406,41 +406,6 @@ func (l testListener) BeforeCall(context.Context, *models.Call) error {
return nil
}
func TestReqTooLarge(t *testing.T) {
app := &models.App{ID: "app_id", Name: "myapp"}
cm := &models.Call{
AppID: app.ID,
Config: map[string]string{},
Path: "/",
Image: "fnproject/fn-test-utils",
Type: "sync",
Format: "json",
Timeout: 10,
IdleTimeout: 20,
Memory: 64,
CPUs: models.MilliCPUs(200),
Payload: `{"sleepTime": 0, "isDebug": true, "isCrash": true}`,
URL: "http://127.0.0.1:8080/r/" + app.Name + "/",
Method: "GET",
}
cfg, err := NewConfig()
if err != nil {
t.Fatal(err)
}
cfg.MaxRequestSize = 5
ls := logs.NewMock()
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)), WithConfig(cfg))
defer checkClose(t, a)
_, err = a.GetCall(FromModel(cm))
if err != models.ErrRequestContentTooBig {
t.Fatal(err)
}
}
func TestSubmitError(t *testing.T) {
app := &models.App{Name: "myapp"}

View File

@@ -354,11 +354,6 @@ func (a *agent) GetCall(opts ...CallOpt) (Call, error) {
return nil, models.ErrCallTimeoutServerBusy
}
err := setMaxBodyLimit(&a.cfg, &c)
if err != nil {
return nil, err
}
setupCtx(&c)
c.handler = a.da
@@ -379,16 +374,6 @@ func setupCtx(c *call) {
c.req = c.req.WithContext(ctx)
}
func setMaxBodyLimit(cfg *Config, c *call) error {
if cfg.MaxRequestSize > 0 && c.req.ContentLength > 0 && uint64(c.req.ContentLength) > cfg.MaxRequestSize {
return models.ErrRequestContentTooBig
}
if c.req.Body != nil {
c.req.Body = common.NewClampReadCloser(c.req.Body, cfg.MaxRequestSize, models.ErrRequestContentTooBig)
}
return nil
}
type call struct {
*models.Call

View File

@@ -21,7 +21,6 @@ type Config struct {
CallEndTimeout time.Duration `json:"call_end_timeout"`
MaxCallEndStacking uint64 `json:"max_call_end_stacking"`
MaxResponseSize uint64 `json:"max_response_size_bytes"`
MaxRequestSize uint64 `json:"max_request_size_bytes"`
MaxLogSize uint64 `json:"max_log_size_bytes"`
MaxTotalCPU uint64 `json:"max_total_cpu_mcpus"`
MaxTotalMemory uint64 `json:"max_total_memory_bytes"`
@@ -59,8 +58,6 @@ const (
EnvMaxCallEndStacking = "FN_MAX_CALL_END_STACKING"
// EnvMaxResponseSize is the maximum number of bytes that a function may return from an invocation
EnvMaxResponseSize = "FN_MAX_RESPONSE_SIZE"
// EnvMaxRequestSize is the maximum request size that may be passed to an agent TODO kill me from here
EnvMaxRequestSize = "FN_MAX_REQUEST_SIZE"
// EnvMaxLogSize is the maximum size that a function's log may reach
EnvMaxLogSize = "FN_MAX_LOG_SIZE_BYTES"
// EnvMaxTotalCPU is the maximum CPU that will be reserved across all containers
@@ -116,7 +113,6 @@ func NewConfig() (*Config, error) {
err = setEnvMsecs(err, EnvAsyncChewPoll, &cfg.AsyncChewPoll, time.Duration(60)*time.Second)
err = setEnvMsecs(err, EnvCallEndTimeout, &cfg.CallEndTimeout, time.Duration(10)*time.Minute)
err = setEnvUint(err, EnvMaxResponseSize, &cfg.MaxResponseSize)
err = setEnvUint(err, EnvMaxRequestSize, &cfg.MaxRequestSize)
err = setEnvUint(err, EnvMaxLogSize, &cfg.MaxLogSize)
err = setEnvUint(err, EnvMaxTotalCPU, &cfg.MaxTotalCPU)
err = setEnvUint(err, EnvMaxTotalMemory, &cfg.MaxTotalMemory)

View File

@@ -119,11 +119,6 @@ func (a *lbAgent) GetCall(opts ...CallOpt) (Call, error) {
c.extensions = ext
}
err := setMaxBodyLimit(&a.cfg, &c)
if err != nil {
return nil, err
}
setupCtx(&c)
c.isLB = true