Files
fn-serverless/api/runner/async_runner.go
Reed Allman 9edacae928 clean up hotf(x) concurrency, rm max c
this patch gets rid of max concurrency for functions altogether, as discussed,
since it will be challenging to support across functions nodes. as a result of
doing so, the previous version of functions would fall over when offered 1000
functions, so there was some work needed in order to push this through.
further work is necessary as docker basically falls over when trying to start
enough containers at the same time, and with this patch essentially every
function can scale infinitely. it seems like we could add some kind of
adaptive restrictions based on task run length and configured wait time so
that fast running functions will line up to run in a hot container instead of
them all creating new hot containers.

this patch takes a first cut at whacking out some of the insanity that was the
previous concurrency model, which was problematic in that it limited
concurrency significantly across all functions since every task went through
the same unbuffered channel, which could create blocking issues for all
functions if the channel is not picked off fast enough (it's not apparent that
this was impossible in the previous implementation). in any event, each
request has a goroutine already, there's no reason not to use it. not too hard
to wrap a map in a lock, not sure what the benefits were (added insanity?) in effect
this is marginally easier to understand and less insane (marginally). after
getting rid of max c this adds a blocking mechanism for the first invocation
of any function so that all other hot functions will wait on the first one to
finish to avoid a herd issue (was making docker die...) -- this could be
slightly improved, but works in a pinch. reduced some memory usage by having
redundant maps of htfnsvr's and task.Requests (by a factor of 2!). cleaned up
some of the protocol stuff, need to clean this up further. anyway, it's a
first cut. have another patch that rewrites all of it but was getting into
rabbit hole territory, would be happy to oblige if anybody else has problems
understanding this rat's nest of channels. there is a good bit of work left to
make this prod ready (regardless of removing max c).

a warning that this will break the db schemas, didn't put the effort in to add
migration stuff since this isn't deployed anywhere in prod...

TODO need to clean out the htfnmgr bucket with LRU
TODO need to clean up runner interface
TODO need to unify the task running paths across protocols
TODO need to move the ram checking stuff into worker for noted reasons
TODO need better elasticity of hot f(x) containers
2017-06-05 20:04:13 -07:00

170 lines
3.5 KiB
Go

package runner
import (
"bytes"
"context"
"encoding/json"
"errors"
"io/ioutil"
"net"
"net/http"
"net/url"
"sync"
"time"
"github.com/Sirupsen/logrus"
"gitlab-odx.oracle.com/odx/functions/api/models"
"gitlab-odx.oracle.com/odx/functions/api/runner/common"
"gitlab-odx.oracle.com/odx/functions/api/runner/task"
)
func getTask(ctx context.Context, url string) (*models.Task, error) {
resp, err := http.Get(url)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var task models.Task
if err := json.Unmarshal(body, &task); err != nil {
return nil, err
}
if task.ID == "" {
return nil, nil
}
return &task, nil
}
func getCfg(t *models.Task) *task.Config {
cfg := &task.Config{
Image: *t.Image,
ID: t.ID,
AppName: t.AppName,
Env: t.EnvVars,
}
if t.Timeout == nil || *t.Timeout <= 0 {
cfg.Timeout = DefaultTimeout
} else {
cfg.Timeout = time.Duration(*t.Timeout) * time.Second
}
if t.IdleTimeout == nil || *t.IdleTimeout <= 0 {
cfg.IdleTimeout = DefaultIdleTimeout
} else {
cfg.IdleTimeout = time.Duration(*t.IdleTimeout) * time.Second
}
return cfg
}
func deleteTask(url string, task *models.Task) error {
// Unmarshal task to be sent over as a json
body, err := json.Marshal(task)
if err != nil {
return err
}
// Send out Delete request to delete task from queue
req, err := http.NewRequest(http.MethodDelete, url, bytes.NewBuffer(body))
if err != nil {
return err
}
c := &http.Client{}
if resp, err := c.Do(req); err != nil {
return err
} else if resp.StatusCode != http.StatusAccepted {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
return errors.New(string(body))
}
return nil
}
// RunAsyncRunner pulls tasks off a queue and processes them
func RunAsyncRunner(ctx context.Context, tasksrv string, rnr *Runner, ds models.Datastore) {
u := tasksrvURL(tasksrv)
startAsyncRunners(ctx, u, rnr, ds)
<-ctx.Done()
}
func startAsyncRunners(ctx context.Context, url string, rnr *Runner, ds models.Datastore) {
var wg sync.WaitGroup
ctx, log := common.LoggerWithFields(ctx, logrus.Fields{"runner": "async"})
for {
select {
case <-ctx.Done():
wg.Wait()
return
default:
if !rnr.hasAsyncAvailableMemory() {
log.Debug("memory full")
time.Sleep(1 * time.Second)
continue
}
task, err := getTask(ctx, url)
if err != nil {
if err, ok := err.(net.Error); ok && err.Timeout() {
log.WithError(err).Errorln("Could not fetch task, timeout.")
continue
}
log.WithError(err).Error("Could not fetch task")
time.Sleep(1 * time.Second)
continue
}
if task == nil {
time.Sleep(1 * time.Second)
continue
}
ctx, log := common.LoggerWithFields(ctx, logrus.Fields{"call_id": task.ID})
log.Debug("Running task:", task.ID)
wg.Add(1)
go func() {
defer wg.Done()
// Process Task
_, err := rnr.RunTrackedTask(task, ctx, getCfg(task), ds)
if err != nil {
log.WithError(err).Error("Cannot run task")
}
log.Debug("Processed task")
}()
// Delete task from queue
if err := deleteTask(url, task); err != nil {
log.WithError(err).Error("Cannot delete task")
continue
}
log.Info("Task complete")
}
}
}
func tasksrvURL(tasksrv string) string {
parsed, err := url.Parse(tasksrv)
if err != nil {
logrus.WithError(err).Fatalln("cannot parse API_URL endpoint")
}
if parsed.Scheme == "" {
parsed.Scheme = "http"
}
if parsed.Path == "" || parsed.Path == "/" {
parsed.Path = "/tasks"
}
return parsed.String()
}