fn: introducing 503 responses for out of capacity case (#518)

* fn: introducing 503 responses for out of capacity case

*) Adding 503 with Retry-After header case if request failed
during waiting for slots.
*) TODO: return 503 without Retry-After if the request can
never be met by this fn server.
*) fn: runner test docker pull fixup
*) fn: MaxMemory for routes is now a variable to allow
testing and adjusting it according to fleet memory sizes.
This commit is contained in:
Tolga Ceylan
2017-11-21 12:42:02 -08:00
committed by GitHub
parent 460e9d2dea
commit 2551be446a
6 changed files with 38 additions and 13 deletions

View File

@@ -170,7 +170,18 @@ func (a *agent) Close() error {
return nil
}
func transformTimeout(e error, isRetriable bool) error {
if e == context.DeadlineExceeded {
if isRetriable {
return models.ErrCallTimeoutServerBusy
}
return models.ErrCallTimeout
}
return e
}
func (a *agent) Submit(callI Call) error {
a.wg.Add(1)
defer a.wg.Done()
@@ -199,7 +210,7 @@ func (a *agent) Submit(callI Call) error {
slot, err := a.getSlot(ctx, call) // find ram available / running
if err != nil {
a.stats.Dequeue(callI.Model().Path)
return err
return transformTimeout(err, true)
}
// TODO if the call times out & container is created, we need
// to make this remove the container asynchronously?
@@ -209,7 +220,7 @@ func (a *agent) Submit(callI Call) error {
err = call.Start(ctx, a)
if err != nil {
a.stats.Dequeue(callI.Model().Path)
return err
return transformTimeout(err, true)
}
// decrement queued count, increment running count
@@ -231,7 +242,7 @@ func (a *agent) Submit(callI Call) error {
// but this could put us over the timeout if the call did not reply yet (need better policy).
ctx = opentracing.ContextWithSpan(context.Background(), span)
err = call.End(ctx, err, a)
return err
return transformTimeout(err, false)
}
// getSlot must ensure that if it receives a slot, it will be returned, otherwise