mirror of
https://github.com/fnproject/fn.git
synced 2022-10-28 21:29:17 +03:00
possible breakages: * `FN_HEADER` on cold are no longer `s/-/_/` -- this is so that cold functions can rebuild the headers as they were when they came in on the request (fdks, specifically), there's no guarantee that a reversal `s/_/-/` is the original header on the request. * app and route config no longer `s/-/_/` -- it seemed really weird to rewrite the users config vars on these. should just pass them exactly as is to env. * headers no longer contain the environment vars (previously, base config; app config, route config, `FN_PATH`, etc.), these are still available in the environment. this gets rid of a lot of the code around headers, specifically the stuff that shoved everything into headers when constructing a call to begin with. now we just store the headers separately and add a few things, like FN_CALL_ID to them, and build a separate 'config' now to store on the call. I thought 'config' was more aptly named, 'env' was confusing, though now 'config' is exactly what 'base_vars' was, which is only the things being put into the env. we weren't storing this field in the db, this doesn't break unless there are messages in a queue from another version, anyway, don't think we're there and don't expect any breakage for anybody with field name changes. this makes the configuration stuff pretty straight forward, there's just two separate buckets of things, and cold just needs to mash them together into the env, and otherwise hot containers just need to put 'config' in the env, and then hot format can shove 'headers' in however they'd like. this seems better than my last idea about making this easier but worse (RIP). this means: * headers no longer contain all vars, the set of base vars can only be found in the environment. * headers is only the headers from request + call_id, deadline, method, url * for cold, we simply add the headers to the environment, prepending `FN_HEADER_` to them, BUT NOT upper casing or `s/-/_/` * fixes issue where async hot functions would end up with `Fn_header_` prefixed headers * removes idea of 'base' vars and 'env'. this was a strange concept. now we just have 'config' which was base vars, and headers, which was base_env+headers; i.e. they are disjoint now. * casing for all headers will lean to be `My-Header` style, which should help with consistency. notable exceptions for cold only are FN_CALL_ID, FN_METHOD, and FN_REQUEST_URL -- this is simply to avoid breakage, in either hot format they appear as `Fn_call_id` still. * removes FN_PARAM stuff * updated doc with behavior weird things left: `Fn_call_id` e.g. isn't a correctly formatted http header, it should likely be `Fn-Call-Id` but I wanted to live to fight another day on this one, it would add some breakage. examples to be posted of each format below closes #329
334 lines
9.0 KiB
Go
334 lines
9.0 KiB
Go
package agent
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"io"
|
|
"net/http"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/fnproject/fn/api/agent/drivers"
|
|
"github.com/fnproject/fn/api/common"
|
|
"github.com/fnproject/fn/api/id"
|
|
"github.com/fnproject/fn/api/models"
|
|
"github.com/go-openapi/strfmt"
|
|
"github.com/opentracing/opentracing-go"
|
|
"github.com/sirupsen/logrus"
|
|
)
|
|
|
|
type Call interface {
|
|
// Model will return the underlying models.Call configuration for this call.
|
|
// TODO we could respond to async correctly from agent but layering, this
|
|
// is only because the front end has different responses based on call type.
|
|
// try to discourage use elsewhere until this gets pushed down more...
|
|
Model() *models.Call
|
|
|
|
// Start will be called before this call is executed, it may be used to
|
|
// guarantee mutual exclusion, check docker permissions, update timestamps,
|
|
// etc.
|
|
// TODO Start and End can likely be unexported as they are only used in the agent,
|
|
// and on a type which is constructed in a specific agent. meh.
|
|
Start(ctx context.Context) error
|
|
|
|
// End will be called immediately after attempting a call execution,
|
|
// regardless of whether the execution failed or not. An error will be passed
|
|
// to End, which if nil indicates a successful execution. Any error returned
|
|
// from End will be returned as the error from Submit.
|
|
End(ctx context.Context, err error) error
|
|
}
|
|
|
|
// TODO build w/o closures... lazy
|
|
type CallOpt func(a *agent, c *call) error
|
|
|
|
type Param struct {
|
|
Key string
|
|
Value string
|
|
}
|
|
type Params []Param
|
|
|
|
func fixupRequestURL(req *http.Request) string {
|
|
if req.URL.Scheme == "" {
|
|
if req.TLS == nil {
|
|
req.URL.Scheme = "http"
|
|
} else {
|
|
req.URL.Scheme = "https"
|
|
}
|
|
}
|
|
if req.URL.Host == "" {
|
|
req.URL.Host = req.Host
|
|
}
|
|
return req.URL.String()
|
|
}
|
|
|
|
func FromRequest(appName, path string, req *http.Request) CallOpt {
|
|
return func(a *agent, c *call) error {
|
|
app, err := a.da.GetApp(req.Context(), appName)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
route, err := a.da.GetRoute(req.Context(), appName, path)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if route.Format == "" {
|
|
route.Format = models.FormatDefault
|
|
}
|
|
|
|
id := id.New().String()
|
|
|
|
// TODO this relies on ordering of opts, but tests make sure it works, probably re-plumb/destroy headers
|
|
// TODO async should probably supply an http.ResponseWriter that records the logs, to attach response headers to
|
|
if rw, ok := c.w.(http.ResponseWriter); ok {
|
|
rw.Header().Add("FN_CALL_ID", id)
|
|
for k, vs := range route.Headers {
|
|
for _, v := range vs {
|
|
// pre-write in these headers to response
|
|
rw.Header().Add(k, v)
|
|
}
|
|
}
|
|
}
|
|
|
|
// add our per call headers in here
|
|
req.Header.Set("FN_METHOD", req.Method)
|
|
req.Header.Set("FN_REQUEST_URL", reqURL(req))
|
|
req.Header.Set("FN_CALL_ID", id)
|
|
|
|
// this ensures that there is an image, path, timeouts, memory, etc are valid.
|
|
// NOTE: this means assign any changes above into route's fields
|
|
err = route.Validate()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
c.Call = &models.Call{
|
|
ID: id,
|
|
AppName: appName,
|
|
Path: route.Path,
|
|
Image: route.Image,
|
|
// Delay: 0,
|
|
Type: route.Type,
|
|
Format: route.Format,
|
|
// Payload: TODO,
|
|
Priority: new(int32), // TODO this is crucial, apparently
|
|
Timeout: route.Timeout,
|
|
IdleTimeout: route.IdleTimeout,
|
|
Memory: route.Memory,
|
|
Config: buildConfig(app, route),
|
|
Headers: req.Header,
|
|
CreatedAt: strfmt.DateTime(time.Now()),
|
|
URL: reqURL(req),
|
|
Method: req.Method,
|
|
}
|
|
|
|
c.req = req
|
|
return nil
|
|
}
|
|
}
|
|
|
|
func buildConfig(app *models.App, route *models.Route) models.Config {
|
|
conf := make(models.Config, 8+len(app.Config)+len(route.Config))
|
|
for k, v := range app.Config {
|
|
conf[k] = v
|
|
}
|
|
for k, v := range route.Config {
|
|
conf[k] = v
|
|
}
|
|
|
|
conf["FN_FORMAT"] = route.Format
|
|
conf["FN_APP_NAME"] = app.Name
|
|
conf["FN_PATH"] = route.Path
|
|
// TODO: might be a good idea to pass in: "FN_BASE_PATH" = fmt.Sprintf("/r/%s", appName) || "/" if using DNS entries per app
|
|
conf["FN_MEMORY"] = fmt.Sprintf("%d", route.Memory)
|
|
conf["FN_TYPE"] = route.Type
|
|
return conf
|
|
}
|
|
|
|
func reqURL(req *http.Request) string {
|
|
if req.URL.Scheme == "" {
|
|
if req.TLS == nil {
|
|
req.URL.Scheme = "http"
|
|
} else {
|
|
req.URL.Scheme = "https"
|
|
}
|
|
}
|
|
if req.URL.Host == "" {
|
|
req.URL.Host = req.Host
|
|
}
|
|
return req.URL.String()
|
|
}
|
|
|
|
// TODO this currently relies on FromRequest having happened before to create the model
|
|
// here, to be a fully qualified model. We probably should double check but having a way
|
|
// to bypass will likely be what's used anyway unless forced.
|
|
func FromModel(mCall *models.Call) CallOpt {
|
|
return func(a *agent, c *call) error {
|
|
c.Call = mCall
|
|
|
|
req, err := http.NewRequest(c.Method, c.URL, strings.NewReader(c.Payload))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
req.Header = c.Headers
|
|
|
|
c.req = req
|
|
// TODO anything else really?
|
|
return nil
|
|
}
|
|
}
|
|
|
|
// TODO this should be required
|
|
func WithWriter(w io.Writer) CallOpt {
|
|
return func(a *agent, c *call) error {
|
|
c.w = w
|
|
return nil
|
|
}
|
|
}
|
|
|
|
// GetCall builds a Call that can be used to submit jobs to the agent.
|
|
//
|
|
// TODO where to put this? async and sync both call this
|
|
func (a *agent) GetCall(opts ...CallOpt) (Call, error) {
|
|
var c call
|
|
|
|
for _, o := range opts {
|
|
err := o(a, &c)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
|
|
// TODO typed errors to test
|
|
if c.req == nil || c.Call == nil {
|
|
return nil, errors.New("no model or request provided for call")
|
|
}
|
|
|
|
c.da = a.da
|
|
c.ct = a
|
|
|
|
ctx, _ := common.LoggerWithFields(c.req.Context(),
|
|
logrus.Fields{"id": c.ID, "app": c.AppName, "route": c.Path})
|
|
c.req = c.req.WithContext(ctx)
|
|
|
|
// setup stderr logger separate (don't inherit ctx vars)
|
|
logger := logrus.WithFields(logrus.Fields{"user_log": true, "app_name": c.AppName, "path": c.Path, "image": c.Image, "call_id": c.ID})
|
|
c.stderr = setupLogger(logger)
|
|
if c.w == nil {
|
|
// send STDOUT to logs if no writer given (async...)
|
|
// TODO we could/should probably make this explicit to GetCall, ala 'WithLogger', but it's dupe code (who cares?)
|
|
c.w = c.stderr
|
|
}
|
|
|
|
now := time.Now()
|
|
slotDeadline := now.Add(time.Duration(c.Call.Timeout) * time.Second / 2)
|
|
execDeadline := now.Add(time.Duration(c.Call.Timeout) * time.Second)
|
|
|
|
c.slotDeadline = slotDeadline
|
|
c.execDeadline = execDeadline
|
|
|
|
execDeadlineStr := strfmt.DateTime(execDeadline).String()
|
|
|
|
// these 2 headers buckets are the same but for posterity!
|
|
if c.Headers == nil {
|
|
c.Headers = make(http.Header)
|
|
c.req.Header = c.Headers
|
|
}
|
|
c.Headers.Set("FN_DEADLINE", execDeadlineStr)
|
|
c.req.Header.Set("FN_DEADLINE", execDeadlineStr)
|
|
|
|
return &c, nil
|
|
}
|
|
|
|
type call struct {
|
|
*models.Call
|
|
|
|
da DataAccess
|
|
w io.Writer
|
|
req *http.Request
|
|
stderr io.ReadWriteCloser
|
|
ct callTrigger
|
|
slots *slotQueue
|
|
slotDeadline time.Time
|
|
execDeadline time.Time
|
|
}
|
|
|
|
func (c *call) Model() *models.Call { return c.Call }
|
|
|
|
func (c *call) Start(ctx context.Context) error {
|
|
span, ctx := opentracing.StartSpanFromContext(ctx, "agent_call_start")
|
|
defer span.Finish()
|
|
|
|
// Check context timeouts, errors
|
|
if ctx.Err() != nil {
|
|
return ctx.Err()
|
|
}
|
|
|
|
c.StartedAt = strfmt.DateTime(time.Now())
|
|
c.Status = "running"
|
|
|
|
if rw, ok := c.w.(http.ResponseWriter); ok { // TODO need to figure out better way to wire response headers in
|
|
rw.Header().Set("XXX-FXLB-WAIT", time.Time(c.StartedAt).Sub(time.Time(c.CreatedAt)).String())
|
|
}
|
|
|
|
if c.Type == models.TypeAsync {
|
|
// XXX (reed): make sure MQ reservation is lengthy. to skirt MQ semantics,
|
|
// we could add a new message to MQ w/ delay of call.Timeout and delete the
|
|
// old one (in that order), after marking the call as running in the db
|
|
// (see below)
|
|
|
|
// XXX (reed): should we store the updated started_at + status? we could
|
|
// use this so that if we pick up a call from mq and find its status is
|
|
// running to avoid running the call twice and potentially mark it as
|
|
// errored (built in long running task detector, so to speak...)
|
|
|
|
err := c.da.Start(ctx, c.Model())
|
|
if err != nil {
|
|
return err // let another thread try this
|
|
}
|
|
}
|
|
|
|
err := c.ct.fireBeforeCall(ctx, c.Model())
|
|
if err != nil {
|
|
return fmt.Errorf("BeforeCall: %v", err)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (c *call) End(ctx context.Context, errIn error) error {
|
|
span, ctx := opentracing.StartSpanFromContext(ctx, "agent_call_end")
|
|
defer span.Finish()
|
|
|
|
c.CompletedAt = strfmt.DateTime(time.Now())
|
|
|
|
switch errIn {
|
|
case nil:
|
|
c.Status = "success"
|
|
case context.DeadlineExceeded:
|
|
c.Status = "timeout"
|
|
default:
|
|
c.Status = "error"
|
|
c.Error = errIn.Error()
|
|
}
|
|
|
|
// ensure stats histogram is reasonably bounded
|
|
c.Call.Stats = drivers.Decimate(240, c.Call.Stats)
|
|
|
|
if err := c.da.Finish(ctx, c.Model(), c.stderr, c.Type == models.TypeAsync); err != nil {
|
|
common.Logger(ctx).WithError(err).Error("error finalizing call on datastore/mq")
|
|
// note: Not returning err here since the job could have already finished successfully.
|
|
}
|
|
|
|
// NOTE call this after InsertLog or the buffer will get reset
|
|
c.stderr.Close()
|
|
|
|
if err := c.ct.fireAfterCall(ctx, c.Model()); err != nil {
|
|
return fmt.Errorf("AfterCall: %v", err)
|
|
}
|
|
|
|
return errIn // original error, important for use in sync call returns
|
|
}
|