HTTP Triggers hookup (#1086)

* Initial suypport for invoking tiggers

* dupe method

* tighten server constraints

* runner tests not working yet

* basic route tests passing

* post rebase fixes

* add hybrid support for trigger invoke and tests

* consoloidate all hybrid evil into one place

* cleanup and make triggers unique by source

* fix oops with Agent

* linting

* review fixes
This commit is contained in:
Owen Cliffe
2018-07-05 18:56:07 +01:00
committed by Reed Allman
parent b07a000a18
commit b8b544ed25
38 changed files with 2208 additions and 865 deletions

View File

@@ -88,24 +88,11 @@ type Agent interface {
io.Closer
AddCallListener(fnext.CallListener)
// Enqueue is to use the agent's sweet sweet client bindings to remotely
// queue async tasks and should be removed from Agent interface ASAP.
Enqueue(context.Context, *models.Call) error
// GetAppID is to get the match of an app name to its ID
GetAppID(ctx context.Context, appName string) (string, error)
// GetAppByID is to get the app by ID
GetAppByID(ctx context.Context, appID string) (*models.App, error)
// GetRoute is to get the route by appId and path
GetRoute(ctx context.Context, appID string, path string) (*models.Route, error)
}
type agent struct {
cfg AgentConfig
da DataAccess
da CallHandler
callListeners []fnext.CallListener
driver drivers.Driver
@@ -121,12 +108,15 @@ type agent struct {
disableAsyncDequeue bool
callOverrider CallOverrider
// deferred actions to call at end of initialisation
onStartup []func()
}
// AgentOption configures an agent at startup
type AgentOption func(*agent) error
// New creates an Agent that executes functions locally as Docker containers.
func New(da DataAccess, options ...AgentOption) Agent {
func New(da CallHandler, options ...AgentOption) Agent {
cfg, err := NewAgentConfig()
if err != nil {
@@ -137,6 +127,10 @@ func New(da DataAccess, options ...AgentOption) Agent {
cfg: *cfg,
}
a.shutWg = common.NewWaitGroup()
a.da = da
a.slotMgr = NewSlotQueueMgr()
// Allow overriding config
for _, option := range options {
err = option(a)
@@ -151,24 +145,31 @@ func New(da DataAccess, options ...AgentOption) Agent {
a.driver = NewDockerDriver(&a.cfg)
}
a.da = da
a.slotMgr = NewSlotQueueMgr()
a.resources = NewResourceTracker(&a.cfg)
a.shutWg = common.NewWaitGroup()
// TODO assert that agent doesn't get started for API nodes up above ?
if a.disableAsyncDequeue {
return a
for _, sup := range a.onStartup {
sup()
}
if !a.shutWg.AddSession(1) {
logrus.Fatalf("cannot start agent, unable to add session")
}
go a.asyncDequeue() // safe shutdown can nanny this fine
return a
}
func (a *agent) addStartup(sup func()) {
a.onStartup = append(a.onStartup, sup)
}
// WithAsync Enables Async operations on the agent
func WithAsync(dqda DequeueDataAccess) AgentOption {
return func(a *agent) error {
if !a.shutWg.AddSession(1) {
logrus.Fatalf("cannot start agent, unable to add session")
}
a.addStartup(func() {
go a.asyncDequeue(dqda) // safe shutdown can nanny this fine
})
return nil
}
}
func WithConfig(cfg *AgentConfig) AgentOption {
return func(a *agent) error {
a.cfg = *cfg
@@ -176,7 +177,7 @@ func WithConfig(cfg *AgentConfig) AgentOption {
}
}
// Provide a customer driver to agent
// WithDockerDriver Provides a customer driver to agent
func WithDockerDriver(drv drivers.Driver) AgentOption {
return func(a *agent) error {
if a.driver != nil {
@@ -188,14 +189,7 @@ func WithDockerDriver(drv drivers.Driver) AgentOption {
}
}
func WithoutAsyncDequeue() AgentOption {
return func(a *agent) error {
a.disableAsyncDequeue = true
return nil
}
}
// Agents can use this to register a CallOverrider to modify a Call and extensions
// WithCallOverrider registers register a CallOverrider to modify a Call and extensions on call construction
func WithCallOverrider(fn CallOverrider) AgentOption {
return func(a *agent) error {
if a.callOverrider != nil {
@@ -206,7 +200,7 @@ func WithCallOverrider(fn CallOverrider) AgentOption {
}
}
// Create a default docker driver from agent config
// NewDockerDriver creates a default docker driver from agent config
func NewDockerDriver(cfg *AgentConfig) *docker.DockerDriver {
return docker.NewDocker(drivers.Config{
DockerNetworks: cfg.DockerNetworks,
@@ -221,23 +215,6 @@ func NewDockerDriver(cfg *AgentConfig) *docker.DockerDriver {
})
}
func (a *agent) GetAppByID(ctx context.Context, appID string) (*models.App, error) {
return a.da.GetAppByID(ctx, appID)
}
func (a *agent) GetAppID(ctx context.Context, appName string) (string, error) {
return a.da.GetAppID(ctx, appName)
}
func (a *agent) GetRoute(ctx context.Context, appID string, path string) (*models.Route, error) {
return a.da.GetRoute(ctx, appID, path)
}
// TODO shuffle this around somewhere else (maybe)
func (a *agent) Enqueue(ctx context.Context, call *models.Call) error {
return a.da.Enqueue(ctx, call)
}
func (a *agent) Close() error {
var err error
@@ -251,12 +228,6 @@ func (a *agent) Close() error {
}
})
// shutdown any db/queue resources
// associated with DataAccess
daErr := a.da.Close()
if daErr != nil {
return daErr
}
return err
}
@@ -856,7 +827,7 @@ func (a *agent) runHot(ctx context.Context, call *call, tok ResourceToken, state
state.UpdateState(ctx, ContainerStateStart, call.slots)
defer state.UpdateState(ctx, ContainerStateDone, call.slots)
container, closer := NewHotContainer(ctx, call, &a.cfg)
container, closer := newHotContainer(ctx, call, &a.cfg)
defer closer()
logger := logrus.WithFields(logrus.Fields{"id": container.id, "app_id": call.AppID, "route": call.Path, "image": call.Image, "memory": call.Memory, "cpus": call.CPUs, "format": call.Format, "idle_timeout": call.IdleTimeout})
@@ -1041,7 +1012,8 @@ type container struct {
stats *drivers.Stats
}
func NewHotContainer(ctx context.Context, call *call, cfg *AgentConfig) (*container, func()) {
//newHotContainer creates a container that can be used for multiple sequential events
func newHotContainer(ctx context.Context, call *call, cfg *AgentConfig) (*container, func()) {
// if freezer is enabled, be consistent with freezer behavior and
// block stdout and stderr between calls.
isBlockIdleIO := MaxDisabledMsecs != cfg.FreezeIdle

View File

@@ -5,7 +5,6 @@ import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"math"
@@ -17,7 +16,6 @@ import (
"testing"
"time"
"github.com/fnproject/fn/api/datastore"
"github.com/fnproject/fn/api/id"
"github.com/fnproject/fn/api/logs"
"github.com/fnproject/fn/api/models"
@@ -83,25 +81,21 @@ func TestCallConfigurationRequest(t *testing.T) {
rCfg := models.Config{"ROUTE_VAR": "BAR"}
app := &models.App{ID: "app_id", Name: appName, Config: cfg}
ds := datastore.NewMockInit(
[]*models.App{app},
[]*models.Route{
{
AppID: app.ID,
Config: rCfg,
Path: path,
Image: image,
Type: typ,
Format: format,
Timeout: timeout,
IdleTimeout: idleTimeout,
Memory: memory,
},
},
)
route := &models.Route{
AppID: app.ID,
Config: rCfg,
Path: path,
Image: image,
Type: typ,
Format: format,
Timeout: timeout,
IdleTimeout: idleTimeout,
Memory: memory,
}
ls := logs.NewMock()
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)))
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)))
defer checkClose(t, a)
w := httptest.NewRecorder()
@@ -122,7 +116,7 @@ func TestCallConfigurationRequest(t *testing.T) {
call, err := a.GetCall(
WithWriter(w), // XXX (reed): order matters [for now]
FromRequest(a, app, path, req),
FromRequest(app, route, req),
)
if err != nil {
t.Fatal(err)
@@ -242,10 +236,9 @@ func TestCallConfigurationModel(t *testing.T) {
}
// FromModel doesn't need a datastore, for now...
ds := datastore.NewMockInit()
ls := logs.NewMock()
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)))
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)))
defer checkClose(t, a)
callI, err := a.GetCall(FromModel(cm))
@@ -314,10 +307,9 @@ func TestAsyncCallHeaders(t *testing.T) {
}
// FromModel doesn't need a datastore, for now...
ds := datastore.NewMockInit()
ls := logs.NewMock()
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)))
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)))
defer checkClose(t, a)
callI, err := a.GetCall(FromModel(cm))
@@ -432,9 +424,6 @@ func TestReqTooLarge(t *testing.T) {
Method: "GET",
}
// FromModel doesn't need a datastore, for now...
ds := datastore.NewMockInit()
cfg, err := NewAgentConfig()
if err != nil {
t.Fatal(err)
@@ -443,7 +432,7 @@ func TestReqTooLarge(t *testing.T) {
cfg.MaxRequestSize = 5
ls := logs.NewMock()
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)), WithConfig(cfg))
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)), WithConfig(cfg))
defer checkClose(t, a)
_, err = a.GetCall(FromModel(cm))
@@ -494,10 +483,9 @@ func TestSubmitError(t *testing.T) {
}
// FromModel doesn't need a datastore, for now...
ds := datastore.NewMockInit()
ls := logs.NewMock()
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)))
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)))
defer checkClose(t, a)
var wg sync.WaitGroup
@@ -546,26 +534,19 @@ func TestHTTPWithoutContentLengthWorks(t *testing.T) {
url := "http://127.0.0.1:8080/r/" + appName + path
app := &models.App{ID: "app_id", Name: appName}
// we need to load in app & route so that FromRequest works
ds := datastore.NewMockInit(
[]*models.App{app},
[]*models.Route{
{
Path: path,
AppID: app.ID,
Image: "fnproject/fn-test-utils",
Type: "sync",
Format: "http", // this _is_ the test
Timeout: 5,
IdleTimeout: 10,
Memory: 128,
},
},
)
route := &models.Route{
Path: path,
AppID: app.ID,
Image: "fnproject/fn-test-utils",
Type: "sync",
Format: "http", // this _is_ the test
Timeout: 5,
IdleTimeout: 10,
Memory: 128,
}
ls := logs.NewMock()
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)))
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)))
defer checkClose(t, a)
bodOne := `{"echoContent":"yodawg"}`
@@ -580,7 +561,7 @@ func TestHTTPWithoutContentLengthWorks(t *testing.T) {
// grab a buffer so we can read what gets written to this guy
var out bytes.Buffer
callI, err := a.GetCall(FromRequest(a, app, path, req), WithWriter(&out))
callI, err := a.GetCall(FromRequest(app, route, req), WithWriter(&out))
if err != nil {
t.Fatal(err)
}
@@ -625,11 +606,8 @@ func TestGetCallReturnsResourceImpossibility(t *testing.T) {
Memory: math.MaxUint64,
}
// FromModel doesn't need a datastore, for now...
ds := datastore.NewMockInit()
ls := logs.NewMock()
a := New(NewCachedDataAccess(NewDirectDataAccess(ds, ls, new(mqs.Mock))))
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)))
defer checkClose(t, a)
_, err := a.GetCall(FromModel(call))
@@ -648,25 +626,19 @@ func TestTmpFsRW(t *testing.T) {
app := &models.App{ID: "app_id", Name: appName}
// we need to load in app & route so that FromRequest works
ds := datastore.NewMockInit(
[]*models.App{app},
[]*models.Route{
{
Path: path,
AppID: app.ID,
Image: "fnproject/fn-test-utils",
Type: "sync",
Format: "http", // this _is_ the test
Timeout: 5,
IdleTimeout: 10,
Memory: 64,
},
},
)
route := &models.Route{
Path: path,
AppID: app.ID,
Image: "fnproject/fn-test-utils",
Type: "sync",
Format: "http", // this _is_ the test
Timeout: 5,
IdleTimeout: 10,
Memory: 128,
}
ls := logs.NewMock()
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)))
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)))
defer checkClose(t, a)
// Here we tell fn-test-utils to read file /proc/mounts and create a /tmp/salsa of 4MB
@@ -678,7 +650,7 @@ func TestTmpFsRW(t *testing.T) {
}
var out bytes.Buffer
callI, err := a.GetCall(FromRequest(a, app, path, req), WithWriter(&out))
callI, err := a.GetCall(FromRequest(app, route, req), WithWriter(&out))
if err != nil {
t.Fatal(err)
}
@@ -745,23 +717,17 @@ func TestTmpFsSize(t *testing.T) {
app := &models.App{ID: "app_id", Name: appName}
// we need to load in app & route so that FromRequest works
ds := datastore.NewMockInit(
[]*models.App{app},
[]*models.Route{
{
Path: path,
AppID: app.ID,
Image: "fnproject/fn-test-utils",
Type: "sync",
Format: "http", // this _is_ the test
Timeout: 5,
IdleTimeout: 10,
Memory: 64,
TmpFsSize: 1,
},
},
)
route := &models.Route{
Path: path,
AppID: app.ID,
Image: "fnproject/fn-test-utils",
Type: "sync",
Format: "http", // this _is_ the test
Timeout: 5,
IdleTimeout: 10,
Memory: 64,
TmpFsSize: 1,
}
cfg, err := NewAgentConfig()
if err != nil {
@@ -771,7 +737,7 @@ func TestTmpFsSize(t *testing.T) {
cfg.MaxTmpFsInodes = 1024
ls := logs.NewMock()
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)), WithConfig(cfg))
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)), WithConfig(cfg))
defer checkClose(t, a)
// Here we tell fn-test-utils to read file /proc/mounts and create a /tmp/salsa of 4MB
@@ -783,7 +749,7 @@ func TestTmpFsSize(t *testing.T) {
}
var out bytes.Buffer
callI, err := a.GetCall(FromRequest(a, app, path, req), WithWriter(&out))
callI, err := a.GetCall(FromRequest(app, route, req), WithWriter(&out))
if err != nil {
t.Fatal(err)
}
@@ -923,25 +889,20 @@ func TestPipesAreClear(t *testing.T) {
ca.IdleTimeout = 60 // keep this bad boy alive
ca.Timeout = 4 // short
app := &models.App{Name: "myapp", ID: ca.AppID}
// we need to load in app & route so that FromRequest works
ds := datastore.NewMockInit(
[]*models.App{app},
[]*models.Route{
{
Path: ca.Path,
AppID: ca.AppID,
Image: ca.Image,
Type: ca.Type,
Format: ca.Format,
Timeout: ca.Timeout,
IdleTimeout: ca.IdleTimeout,
Memory: ca.Memory,
},
},
)
route := &models.Route{
Path: ca.Path,
AppID: ca.AppID,
Image: ca.Image,
Type: ca.Type,
Format: ca.Format,
Timeout: ca.Timeout,
IdleTimeout: ca.IdleTimeout,
Memory: ca.Memory,
}
ls := logs.NewMock()
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)))
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)))
defer checkClose(t, a)
// test read this body after 5s (after call times out) and make sure we don't get yodawg
@@ -960,7 +921,7 @@ func TestPipesAreClear(t *testing.T) {
req.Header.Set("Content-Length", fmt.Sprintf("%d", len(bodOne)))
var outOne bytes.Buffer
callI, err := a.GetCall(FromRequest(a, app, ca.Path, req), WithWriter(&outOne))
callI, err := a.GetCall(FromRequest(app, route, req), WithWriter(&outOne))
if err != nil {
t.Fatal(err)
}
@@ -994,7 +955,7 @@ func TestPipesAreClear(t *testing.T) {
req.Header.Set("Content-Length", fmt.Sprintf("%d", len(bodTwo)))
var outTwo bytes.Buffer
callI, err = a.GetCall(FromRequest(a, app, ca.Path, req), WithWriter(&outTwo))
callI, err = a.GetCall(FromRequest(app, route, req), WithWriter(&outTwo))
if err != nil {
t.Fatal(err)
}
@@ -1074,25 +1035,20 @@ func TestPipesDontMakeSpuriousCalls(t *testing.T) {
app := &models.App{Name: "myapp"}
app.ID = call.AppID
// we need to load in app & route so that FromRequest works
ds := datastore.NewMockInit(
[]*models.App{app},
[]*models.Route{
{
Path: call.Path,
AppID: call.AppID,
Image: call.Image,
Type: call.Type,
Format: call.Format,
Timeout: call.Timeout,
IdleTimeout: call.IdleTimeout,
Memory: call.Memory,
},
},
)
route := &models.Route{
Path: call.Path,
AppID: call.AppID,
Image: call.Image,
Type: call.Type,
Format: call.Format,
Timeout: call.Timeout,
IdleTimeout: call.IdleTimeout,
Memory: call.Memory,
}
ls := logs.NewMock()
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)))
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)))
defer checkClose(t, a)
bodOne := `{"echoContent":"yodawg"}`
@@ -1102,7 +1058,7 @@ func TestPipesDontMakeSpuriousCalls(t *testing.T) {
}
var outOne bytes.Buffer
callI, err := a.GetCall(FromRequest(a, app, call.Path, req), WithWriter(&outOne))
callI, err := a.GetCall(FromRequest(app, route, req), WithWriter(&outOne))
if err != nil {
t.Fatal(err)
}
@@ -1127,7 +1083,7 @@ func TestPipesDontMakeSpuriousCalls(t *testing.T) {
}
var outTwo bytes.Buffer
callI, err = a.GetCall(FromRequest(a, app, call.Path, req), WithWriter(&outTwo))
callI, err = a.GetCall(FromRequest(app, route, req), WithWriter(&outTwo))
if err != nil {
t.Fatal(err)
}
@@ -1172,22 +1128,17 @@ func TestNBIOResourceTracker(t *testing.T) {
app := &models.App{ID: "app_id", Name: "myapp"}
app.ID = call.AppID
// we need to load in app & route so that FromRequest works
ds := datastore.NewMockInit(
[]*models.App{app},
[]*models.Route{
{
Path: call.Path,
AppID: call.AppID,
Image: call.Image,
Type: call.Type,
Format: call.Format,
Timeout: call.Timeout,
IdleTimeout: call.IdleTimeout,
Memory: call.Memory,
},
},
)
route := &models.Route{
Path: call.Path,
AppID: call.AppID,
Image: call.Image,
Type: call.Type,
Format: call.Format,
Timeout: call.Timeout,
IdleTimeout: call.IdleTimeout,
Memory: call.Memory,
}
cfg, err := NewAgentConfig()
if err != nil {
@@ -1199,7 +1150,7 @@ func TestNBIOResourceTracker(t *testing.T) {
cfg.HotPoll = 20 * time.Millisecond
ls := logs.NewMock()
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)), WithConfig(cfg))
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)), WithConfig(cfg))
defer checkClose(t, a)
reqCount := 20
@@ -1213,7 +1164,7 @@ func TestNBIOResourceTracker(t *testing.T) {
}
var outOne bytes.Buffer
callI, err := a.GetCall(FromRequest(a, app, call.Path, req), WithWriter(&outOne))
callI, err := a.GetCall(FromRequest(app, route, req), WithWriter(&outOne))
if err != nil {
t.Fatal(err)
}
@@ -1250,44 +1201,3 @@ func TestNBIOResourceTracker(t *testing.T) {
t.Fatalf("Expected successes, but got %d", ok)
}
}
type closingDataAccess struct {
DataAccess
closeReturn error
closed chan struct{}
}
func newClosingDataAccess(closeReturn error) *closingDataAccess {
ds := datastore.NewMockInit()
ls := logs.NewMock()
return &closingDataAccess{
DataAccess: NewDirectDataAccess(ds, ls, new(mqs.Mock)),
closed: make(chan struct{}),
closeReturn: closeReturn,
}
}
func (da *closingDataAccess) Close() error {
close(da.closed)
return da.closeReturn
}
func TestClosesDataAccess(t *testing.T) {
da := newClosingDataAccess(nil)
a := New(da)
checkClose(t, a)
<-da.closed
}
func TestCloseReturnsDataAccessError(t *testing.T) {
err := errors.New("foo")
da := newClosingDataAccess(err)
a := New(da)
if cerr := a.Close(); cerr != err {
t.Fatalf("Wrong error returned, expected %v but got %v", err, cerr)
}
<-da.closed
}

View File

@@ -11,7 +11,7 @@ import (
"go.opencensus.io/trace"
)
func (a *agent) asyncDequeue() {
func (a *agent) asyncDequeue(dqda DequeueDataAccess) {
// this is just so we can hang up the dequeue request if we get shut down
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -37,7 +37,7 @@ func (a *agent) asyncDequeue() {
case <-a.shutWg.Closer():
a.shutWg.DoneSession()
return
case model, ok := <-a.asyncChew(ctx):
case model, ok := <-a.asyncChew(ctx, dqda):
if ok {
go func(model *models.Call) {
a.asyncRun(ctx, model)
@@ -53,14 +53,14 @@ func (a *agent) asyncDequeue() {
}
}
func (a *agent) asyncChew(ctx context.Context) <-chan *models.Call {
func (a *agent) asyncChew(ctx context.Context, dqda DequeueDataAccess) <-chan *models.Call {
ch := make(chan *models.Call, 1)
go func() {
ctx, cancel := context.WithTimeout(ctx, a.cfg.AsyncChewPoll)
defer cancel()
call, err := a.da.Dequeue(ctx)
call, err := dqda.Dequeue(ctx)
if call != nil {
ch <- call
} else { // call is nil / error

View File

@@ -46,23 +46,15 @@ type CallOverrider func(*models.Call, map[string]string) (map[string]string, err
// TODO build w/o closures... lazy
type CallOpt func(c *call) error
type Param struct {
Key string
Value string
}
type Params []Param
const (
ceMimeType = "application/cloudevents+json"
)
func FromRequest(a Agent, app *models.App, path string, req *http.Request) CallOpt {
// FromRequest initialises a call to a route from an HTTP request
// deprecate with routes
func FromRequest(app *models.App, route *models.Route, req *http.Request) CallOpt {
return func(c *call) error {
ctx := req.Context()
route, err := a.GetRoute(ctx, app.ID, path)
if err != nil {
return err
}
log := common.Logger(ctx)
// Check whether this is a CloudEvent, if coming in via HTTP router (only way currently), then we'll look for a special header
@@ -125,7 +117,7 @@ func FromRequest(a Agent, app *models.App, path string, req *http.Request) CallO
Memory: route.Memory,
CPUs: route.CPUs,
Config: buildConfig(app, route),
Annotations: buildAnnotations(app, route),
Annotations: app.Annotations.MergeChange(route.Annotations),
Headers: req.Header,
CreatedAt: common.DateTime(time.Now()),
URL: reqURL(req),
@@ -139,6 +131,77 @@ func FromRequest(a Agent, app *models.App, path string, req *http.Request) CallO
}
}
// Sets up a call from an http trigger request
func FromHTTPTriggerRequest(app *models.App, fn *models.Fn, trigger *models.Trigger, req *http.Request) CallOpt {
return func(c *call) error {
ctx := req.Context()
log := common.Logger(ctx)
// Check whether this is a CloudEvent, if coming in via HTTP router (only way currently), then we'll look for a special header
// Content-Type header: https://github.com/cloudevents/spec/blob/master/http-transport-binding.md#32-structured-content-mode
// Expected Content-Type for a CloudEvent: application/cloudevents+json; charset=UTF-8
contentType := req.Header.Get("Content-Type")
t, _, err := mime.ParseMediaType(contentType)
if err != nil {
// won't fail here, but log
log.Debugf("Could not parse Content-Type header: %v", err)
} else {
if t == ceMimeType {
c.IsCloudEvent = true
fn.Format = models.FormatCloudEvent
}
}
if fn.Format == "" {
fn.Format = models.FormatDefault
}
id := id.New().String()
// TODO this relies on ordering of opts, but tests make sure it works, probably re-plumb/destroy headers
// TODO async should probably supply an http.ResponseWriter that records the logs, to attach response headers to
if rw, ok := c.w.(http.ResponseWriter); ok {
rw.Header().Add("FN_CALL_ID", id)
}
var syslogURL string
if app.SyslogURL != nil {
syslogURL = *app.SyslogURL
}
c.Call = &models.Call{
ID: id,
Path: trigger.Source,
Image: fn.Image,
// Delay: 0,
Type: "sync",
Format: fn.Format,
// Payload: TODO,
Priority: new(int32), // TODO this is crucial, apparently
Timeout: fn.Timeout,
IdleTimeout: fn.IdleTimeout,
TmpFsSize: 0, // TODO clean up this
Memory: fn.Memory,
CPUs: 0, // TODO clean up this
Config: buildTriggerConfig(app, fn, trigger),
// TODO - this wasn't really the intention here (that annotations would naturally cascade
// but seems to be necessary for some runner behaviour
Annotations: app.Annotations.MergeChange(fn.Annotations).MergeChange(trigger.Annotations),
Headers: req.Header,
CreatedAt: common.DateTime(time.Now()),
URL: reqURL(req),
Method: req.Method,
AppID: app.ID,
FnID: fn.ID,
TriggerID: trigger.ID,
SyslogURL: syslogURL,
}
c.req = req
return nil
}
}
func buildConfig(app *models.App, route *models.Route) models.Config {
conf := make(models.Config, 8+len(app.Config)+len(route.Config))
for k, v := range app.Config {
@@ -163,15 +226,24 @@ func buildConfig(app *models.App, route *models.Route) models.Config {
return conf
}
func buildAnnotations(app *models.App, route *models.Route) models.Annotations {
ann := make(models.Annotations, len(app.Annotations)+len(route.Annotations))
for k, v := range app.Annotations {
ann[k] = v
func buildTriggerConfig(app *models.App, fn *models.Fn, trigger *models.Trigger) models.Config {
conf := make(models.Config, 8+len(app.Config)+len(fn.Config))
for k, v := range app.Config {
conf[k] = v
}
for k, v := range route.Annotations {
ann[k] = v
for k, v := range fn.Config {
conf[k] = v
}
return ann
conf["FN_FORMAT"] = fn.Format
conf["FN_APP_NAME"] = app.Name
conf["FN_PATH"] = trigger.Source
// TODO: might be a good idea to pass in: "FN_BASE_PATH" = fmt.Sprintf("/r/%s", appName) || "/" if using DNS entries per app
conf["FN_MEMORY"] = fmt.Sprintf("%d", fn.Memory)
conf["FN_TYPE"] = "sync"
conf["FN_FN_ID"] = fn.ID
return conf
}
func reqURL(req *http.Request) string {
@@ -188,9 +260,7 @@ func reqURL(req *http.Request) string {
return req.URL.String()
}
// TODO this currently relies on FromRequest having happened before to create the model
// here, to be a fully qualified model. We probably should double check but having a way
// to bypass will likely be what's used anyway unless forced.
// FromModel creates a call object from an existing stored call model object, reading the body from the stored call payload
func FromModel(mCall *models.Call) CallOpt {
return func(c *call) error {
c.Call = mCall
@@ -207,6 +277,7 @@ func FromModel(mCall *models.Call) CallOpt {
}
}
// FromModelAndInput creates a call object from an existing stored call model object , reading the body from a provided stream
func FromModelAndInput(mCall *models.Call, in io.ReadCloser) CallOpt {
return func(c *call) error {
c.Call = mCall
@@ -223,6 +294,7 @@ func FromModelAndInput(mCall *models.Call, in io.ReadCloser) CallOpt {
}
}
// WithWriter sets the writier that the call uses to send its output message to
// TODO this should be required
func WithWriter(w io.Writer) CallOpt {
return func(c *call) error {
@@ -231,6 +303,7 @@ func WithWriter(w io.Writer) CallOpt {
}
}
// WithContext overrides the context on the call
func WithContext(ctx context.Context) CallOpt {
return func(c *call) error {
c.req = c.req.WithContext(ctx)
@@ -238,6 +311,7 @@ func WithContext(ctx context.Context) CallOpt {
}
}
// WithExtensions adds internal attributes to the call that can be interpreted by extensions in the agent
// Pure runner can use this to pass an extension to the call
func WithExtensions(extensions map[string]string) CallOpt {
return func(c *call) error {
@@ -287,7 +361,7 @@ func (a *agent) GetCall(opts ...CallOpt) (Call, error) {
setupCtx(&c)
c.da = a.da
c.handler = a.da
c.ct = a
c.stderr = setupLogger(c.req.Context(), a.cfg.MaxLogSize, c.Call)
if c.w == nil {
@@ -321,7 +395,7 @@ type call struct {
// IsCloudEvent flag whether this was ingested as a cloud event. This may become the default or only way.
IsCloudEvent bool `json:"is_cloud_event"`
da DataAccess
handler CallHandler
w io.Writer
req *http.Request
stderr io.ReadWriteCloser
@@ -336,6 +410,8 @@ type call struct {
extensions map[string]string
}
// SlotHashId returns a string identity for this call that can be used to uniquely place the call in a given container
// This should correspond to a unique identity (including data changes) of the underlying function
func (c *call) SlotHashId() string {
return c.slotHashId
}
@@ -393,7 +469,7 @@ func (c *call) Start(ctx context.Context) error {
// running to avoid running the call twice and potentially mark it as
// errored (built in long running task detector, so to speak...)
err := c.da.Start(ctx, c.Model())
err := c.handler.Start(ctx, c.Model())
if err != nil {
return err // let another thread try this
}
@@ -426,7 +502,7 @@ func (c *call) End(ctx context.Context, errIn error) error {
// ensure stats histogram is reasonably bounded
c.Call.Stats = drivers.Decimate(240, c.Call.Stats)
if err := c.da.Finish(ctx, c.Model(), c.stderr, c.Type == models.TypeAsync); err != nil {
if err := c.handler.Finish(ctx, c.Model(), c.stderr, c.Type == models.TypeAsync); err != nil {
common.Logger(ctx).WithError(err).Error("error finalizing call on datastore/mq")
// note: Not returning err here since the job could have already finished successfully.
}

View File

@@ -11,26 +11,34 @@ import (
"github.com/patrickmn/go-cache"
)
// DataAccess abstracts the datastore and message queue operations done by the
// agent, so that API nodes and runner nodes can work with the same interface
// but actually operate on the data in different ways (by direct access or by
// mediation through an API node).
type DataAccess interface {
//ReadDataAccess represents read operations required to operate a load balancer node
type ReadDataAccess interface {
GetAppID(ctx context.Context, appName string) (string, error)
// GetAppByID abstracts querying the datastore for an app.
GetAppByID(ctx context.Context, appID string) (*models.App, error)
GetTriggerBySource(ctx context.Context, appId string, triggerType, source string) (*models.Trigger, error)
GetFnByID(ctx context.Context, fnId string) (*models.Fn, error)
// GetRoute abstracts querying the datastore for a route within an app.
GetRoute(ctx context.Context, appID string, routePath string) (*models.Route, error)
}
// Enqueue will add a Call to the queue (ultimately forwards to mq.Push).
Enqueue(ctx context.Context, mCall *models.Call) error
//DequeueDataAccess abstracts an underlying dequeue for async runners
type DequeueDataAccess interface {
// Dequeue will query the queue for the next available Call that can be run
// by this Agent, and reserve it (ultimately forwards to mq.Reserve).
Dequeue(ctx context.Context) (*models.Call, error)
}
//EnqueueDataAccess abstracts an underying enqueue for async queueing
type EnqueueDataAccess interface {
// Enqueue will add a Call to the queue (ultimately forwards to mq.Push).
Enqueue(ctx context.Context, mCall *models.Call) error
}
// CallHandler consumes the start and finish events for a call
// This is effectively a callback that is allowed to read the logs -
// TODO Deprecate this - this could be a CallListener except it also consumes logs
type CallHandler interface {
// Start will attempt to start the provided Call within an appropriate
// context.
Start(ctx context.Context, mCall *models.Call) error
@@ -38,25 +46,27 @@ type DataAccess interface {
// Finish will notify the system that the Call has been processed, and
// fulfill the reservation in the queue if the call came from a queue.
Finish(ctx context.Context, mCall *models.Call, stderr io.Reader, async bool) error
}
// Close will wait for any pending operations to complete and
// shuts down connections to the underlying datastore/queue resources.
// Close is not safe to be called from multiple threads.
io.Closer
// DataAccess is currently
type DataAccess interface {
ReadDataAccess
DequeueDataAccess
CallHandler
}
// CachedDataAccess wraps a DataAccess and caches the results of GetApp and GetRoute.
type CachedDataAccess struct {
DataAccess
type cachedDataAccess struct {
ReadDataAccess
cache *cache.Cache
singleflight singleflight.SingleFlight
}
func NewCachedDataAccess(da DataAccess) DataAccess {
cda := &CachedDataAccess{
DataAccess: da,
cache: cache.New(5*time.Second, 1*time.Minute),
func NewCachedDataAccess(da ReadDataAccess) ReadDataAccess {
cda := &cachedDataAccess{
ReadDataAccess: da,
cache: cache.New(5*time.Second, 1*time.Minute),
}
return cda
}
@@ -69,11 +79,11 @@ func appIDCacheKey(appID string) string {
return "a:" + appID
}
func (da *CachedDataAccess) GetAppID(ctx context.Context, appName string) (string, error) {
return da.DataAccess.GetAppID(ctx, appName)
func (da *cachedDataAccess) GetAppID(ctx context.Context, appName string) (string, error) {
return da.ReadDataAccess.GetAppID(ctx, appName)
}
func (da *CachedDataAccess) GetAppByID(ctx context.Context, appID string) (*models.App, error) {
func (da *cachedDataAccess) GetAppByID(ctx context.Context, appID string) (*models.App, error) {
key := appIDCacheKey(appID)
app, ok := da.cache.Get(key)
if ok {
@@ -82,7 +92,7 @@ func (da *CachedDataAccess) GetAppByID(ctx context.Context, appID string) (*mode
resp, err := da.singleflight.Do(key,
func() (interface{}, error) {
return da.DataAccess.GetAppByID(ctx, appID)
return da.ReadDataAccess.GetAppByID(ctx, appID)
})
if err != nil {
@@ -93,7 +103,7 @@ func (da *CachedDataAccess) GetAppByID(ctx context.Context, appID string) (*mode
return app.(*models.App), nil
}
func (da *CachedDataAccess) GetRoute(ctx context.Context, appID string, routePath string) (*models.Route, error) {
func (da *cachedDataAccess) GetRoute(ctx context.Context, appID string, routePath string) (*models.Route, error) {
key := routeCacheKey(appID, routePath)
r, ok := da.cache.Get(key)
if ok {
@@ -102,7 +112,7 @@ func (da *CachedDataAccess) GetRoute(ctx context.Context, appID string, routePat
resp, err := da.singleflight.Do(key,
func() (interface{}, error) {
return da.DataAccess.GetRoute(ctx, appID, routePath)
return da.ReadDataAccess.GetRoute(ctx, appID, routePath)
})
if err != nil {
@@ -113,48 +123,55 @@ func (da *CachedDataAccess) GetRoute(ctx context.Context, appID string, routePat
return r.(*models.Route), nil
}
// Close invokes close on the underlying DataAccess
func (da *CachedDataAccess) Close() error {
return da.DataAccess.Close()
}
type directDataAccess struct {
mq models.MessageQueue
ds models.Datastore
ls models.LogStore
}
func NewDirectDataAccess(ds models.Datastore, ls models.LogStore, mq models.MessageQueue) DataAccess {
type directDequeue struct {
mq models.MessageQueue
}
func (ddq *directDequeue) Dequeue(ctx context.Context) (*models.Call, error) {
return ddq.mq.Reserve(ctx)
}
func NewDirectDequeueAccess(mq models.MessageQueue) DequeueDataAccess {
return &directDequeue{
mq: mq,
}
}
type directEnequeue struct {
mq models.MessageQueue
}
func NewDirectEnqueueAccess(mq models.MessageQueue) EnqueueDataAccess {
return &directEnequeue{
mq: mq,
}
}
func (da *directEnequeue) Enqueue(ctx context.Context, mCall *models.Call) error {
_, err := da.mq.Push(ctx, mCall)
return err
// TODO: Insert a call in the datastore with the 'queued' state
}
func NewDirectCallDataAccess(ls models.LogStore, mq models.MessageQueue) CallHandler {
da := &directDataAccess{
mq: mq,
ds: ds,
ls: ls,
}
return da
}
func (da *directDataAccess) GetAppID(ctx context.Context, appName string) (string, error) {
return da.ds.GetAppID(ctx, appName)
}
func (da *directDataAccess) GetAppByID(ctx context.Context, appID string) (*models.App, error) {
return da.ds.GetAppByID(ctx, appID)
}
func (da *directDataAccess) GetRoute(ctx context.Context, appID string, routePath string) (*models.Route, error) {
return da.ds.GetRoute(ctx, appID, routePath)
}
func (da *directDataAccess) Enqueue(ctx context.Context, mCall *models.Call) error {
_, err := da.mq.Push(ctx, mCall)
return err
// TODO: Insert a call in the datastore with the 'queued' state
}
func (da *directDataAccess) Dequeue(ctx context.Context) (*models.Call, error) {
return da.mq.Reserve(ctx)
}
func (da *directDataAccess) Start(ctx context.Context, mCall *models.Call) error {
// TODO Access datastore and try a Compare-And-Swap to set the call to
// 'running'. If it fails, delete the message from the MQ and return an
@@ -183,22 +200,18 @@ func (da *directDataAccess) Finish(ctx context.Context, mCall *models.Call, stde
if async {
// XXX (reed): delete MQ message, eventually
// YYY (hhexo): yes, once we have the queued/running/finished mechanics
// return da.mq.Delete(ctx, mCall)
// return cda.mq.Delete(ctx, mCall)
}
return nil
}
// Close calls close on the underlying Datastore and MessageQueue. If the Logstore
// and Datastore are different, it will call Close on the Logstore as well.
func (da *directDataAccess) Close() error {
err := da.ds.Close()
if ls, ok := da.ds.(models.LogStore); ok && ls != da.ls {
if daErr := da.ls.Close(); daErr != nil {
err = daErr
}
}
if mqErr := da.mq.Close(); mqErr != nil {
err = mqErr
}
return err
type noAsyncEnqueueAccess struct{}
func (noAsyncEnqueueAccess) Enqueue(ctx context.Context, mCall *models.Call) error {
return models.ErrAsyncUnsupported
}
//NewUnsupportedEnqueueAccess is a backstop that errors when you try to enqueue an async operation on a server that doesn't support async
func NewUnsupportedAsyncEnqueueAccess() EnqueueDataAccess {
return &noAsyncEnqueueAccess{}
}

View File

@@ -39,7 +39,7 @@ func NewClient(u string) (agent.DataAccess, error) {
if uri.Scheme == "" {
uri.Scheme = "http"
}
host := uri.Scheme + "://" + uri.Host + "/v1/"
host := uri.Scheme + "://" + uri.Host + "/v2/"
httpClient := &http.Client{
Timeout: 60 * time.Second,
@@ -66,11 +66,13 @@ func NewClient(u string) (agent.DataAccess, error) {
}, nil
}
var noQuery = map[string]string{}
func (cl *client) Enqueue(ctx context.Context, c *models.Call) error {
ctx, span := trace.StartSpan(ctx, "hybrid_client_enqueue")
defer span.End()
err := cl.do(ctx, c, nil, "PUT", "runner", "async")
err := cl.do(ctx, c, nil, "PUT", noQuery, "runner", "async")
return err
}
@@ -81,7 +83,7 @@ func (cl *client) Dequeue(ctx context.Context) (*models.Call, error) {
var c struct {
C []*models.Call `json:"calls"`
}
err := cl.do(ctx, nil, &c, "GET", "runner", "async")
err := cl.do(ctx, nil, &c, "GET", noQuery, "runner", "async")
if len(c.C) > 0 {
return c.C[0], nil
}
@@ -92,7 +94,7 @@ func (cl *client) Start(ctx context.Context, c *models.Call) error {
ctx, span := trace.StartSpan(ctx, "hybrid_client_start")
defer span.End()
err := cl.do(ctx, c, nil, "POST", "runner", "start")
err := cl.do(ctx, c, nil, "POST", noQuery, "runner", "start")
return err
}
@@ -114,7 +116,7 @@ func (cl *client) Finish(ctx context.Context, c *models.Call, r io.Reader, async
}
// TODO add async bit to query params or body
err = cl.do(ctx, bod, nil, "POST", "runner", "finish")
err = cl.do(ctx, bod, nil, "POST", noQuery, "runner", "finish")
return err
}
@@ -123,21 +125,25 @@ func (cl *client) GetAppID(ctx context.Context, appName string) (string, error)
defer span.End()
var a struct {
A models.App `json:"app"`
Items []*models.App `json:"items"`
}
err := cl.do(ctx, nil, &a, "GET", "apps", appName)
return a.A.ID, err
err := cl.do(ctx, nil, &a, "GET", map[string]string{"name": appName}, "apps")
if len(a.Items) == 0 {
return "", errors.New("app not found")
}
return a.Items[0].ID, err
}
func (cl *client) GetAppByID(ctx context.Context, appID string) (*models.App, error) {
ctx, span := trace.StartSpan(ctx, "hybrid_client_get_app_id")
ctx, span := trace.StartSpan(ctx, "hybrid_client_get_app_by_id")
defer span.End()
var a struct {
A models.App `json:"app"`
}
err := cl.do(ctx, nil, &a, "GET", "runner", "apps", appID)
return &a.A, err
var a models.App
err := cl.do(ctx, nil, &a, "GET", noQuery, "apps", appID)
return &a, err
}
func (cl *client) GetRoute(ctx context.Context, appID, route string) (*models.Route, error) {
@@ -145,11 +151,30 @@ func (cl *client) GetRoute(ctx context.Context, appID, route string) (*models.Ro
defer span.End()
// TODO trim prefix is pretty odd here eh?
var r struct {
R models.Route `json:"route"`
var r = models.Route{}
err := cl.do(ctx, nil, &r, "GET", noQuery, "runner", "apps", appID, "routes", strings.TrimPrefix(route, "/"))
return &r, err
}
func (cl *client) GetTriggerBySource(ctx context.Context, appID string, triggerType, source string) (*models.Trigger, error) {
ctx, span := trace.StartSpan(ctx, "hybrid_client_get_trigger_by_source")
defer span.End()
var trigger models.Trigger
err := cl.do(ctx, nil, &trigger, "GET", noQuery, "runner", "apps", appID, "triggerBySource", triggerType, source)
return &trigger, err
}
func (cl *client) GetFnByID(ctx context.Context, fnID string) (*models.Fn, error) {
ctx, span := trace.StartSpan(ctx, "hybrid_client_get_fn_by_id")
defer span.End()
var fn models.Fn
err := cl.do(ctx, nil, &fn, "GET", noQuery, "fns", fnID)
if err != nil {
return nil, err
}
err := cl.do(ctx, nil, &r, "GET", "runner", "apps", appID, "routes", strings.TrimPrefix(route, "/"))
return &r.R, err
return &fn, nil
}
type httpErr struct {
@@ -157,7 +182,7 @@ type httpErr struct {
error
}
func (cl *client) do(ctx context.Context, request, result interface{}, method string, url ...string) error {
func (cl *client) do(ctx context.Context, request, result interface{}, method string, query map[string]string, url ...string) error {
// TODO determine policy (should we count to infinity?)
var b common.Backoff
@@ -170,7 +195,7 @@ func (cl *client) do(ctx context.Context, request, result interface{}, method st
}
// TODO this isn't re-using buffers very efficiently, but retries should be rare...
err = cl.once(ctx, request, result, method, url...)
err = cl.once(ctx, request, result, method, query, url...)
switch err := err.(type) {
case nil:
return nil
@@ -192,7 +217,7 @@ func (cl *client) do(ctx context.Context, request, result interface{}, method st
return err
}
func (cl *client) once(ctx context.Context, request, result interface{}, method string, url ...string) error {
func (cl *client) once(ctx context.Context, request, result interface{}, method string, query map[string]string, path ...string) error {
ctx, span := trace.StartSpan(ctx, "hybrid_client_http_do")
defer span.End()
@@ -204,7 +229,7 @@ func (cl *client) once(ctx context.Context, request, result interface{}, method
}
}
req, err := http.NewRequest(method, cl.url(url...), &b)
req, err := http.NewRequest(method, cl.url(query, path...), &b)
if err != nil {
return err
}
@@ -221,16 +246,14 @@ func (cl *client) once(ctx context.Context, request, result interface{}, method
if resp.StatusCode >= 300 {
// one of our errors
var msg struct {
Err *struct {
Msg string `json:"message"`
} `json:"error"`
Msg string `json:"message"`
}
// copy into a buffer in case it wasn't from us
var b bytes.Buffer
io.Copy(&b, resp.Body)
json.Unmarshal(b.Bytes(), &msg)
if msg.Err != nil {
return &httpErr{code: resp.StatusCode, error: errors.New(msg.Err.Msg)}
if msg.Msg != "" {
return &httpErr{code: resp.StatusCode, error: errors.New(msg.Msg)}
}
return &httpErr{code: resp.StatusCode, error: errors.New(b.String())}
}
@@ -245,8 +268,20 @@ func (cl *client) once(ctx context.Context, request, result interface{}, method
return nil
}
func (cl *client) url(args ...string) string {
return cl.base + strings.Join(args, "/")
func (cl *client) url(query map[string]string, args ...string) string {
var queryValues = make(url.Values)
for k, v := range query {
queryValues.Add(k, v)
}
queryString := queryValues.Encode()
baseUrl := cl.base + strings.Join(args, "/")
if queryString != "" {
baseUrl = baseUrl + "?" + queryString
}
return baseUrl
}
func (cl *client) Close() error {

View File

@@ -13,6 +13,18 @@ import (
// nopDataStore implements agent.DataAccess
type nopDataStore struct{}
func (cl *nopDataStore) GetTriggerBySource(ctx context.Context, appId string, triggerType, source string) (*models.Trigger, error) {
ctx, span := trace.StartSpan(ctx, "nop_datastore_get_trigger_by_source")
defer span.End()
return nil, errors.New("should not call GetTriggerBySource on a NOP data store")
}
func (cl *nopDataStore) GetFnByID(ctx context.Context, fnId string) (*models.Fn, error) {
ctx, span := trace.StartSpan(ctx, "nop_datastore_get_fn_by_id")
defer span.End()
return nil, errors.New("should not call GetFnByID on a NOP data store")
}
func NewNopDataStore() (agent.DataAccess, error) {
return &nopDataStore{}, nil
}

View File

@@ -19,7 +19,7 @@ import (
type lbAgent struct {
cfg AgentConfig
da DataAccess
cda CallHandler
callListeners []fnext.CallListener
rp pool.RunnerPool
placer pool.Placer
@@ -50,7 +50,7 @@ func WithLBCallOverrider(fn CallOverrider) LBAgentOption {
// NewLBAgent creates an Agent that knows how to load-balance function calls
// across a group of runner nodes.
func NewLBAgent(da DataAccess, rp pool.RunnerPool, p pool.Placer, options ...LBAgentOption) (Agent, error) {
func NewLBAgent(da CallHandler, rp pool.RunnerPool, p pool.Placer, options ...LBAgentOption) (Agent, error) {
// Yes, LBAgent and Agent both use an AgentConfig.
cfg, err := NewAgentConfig()
@@ -60,7 +60,7 @@ func NewLBAgent(da DataAccess, rp pool.RunnerPool, p pool.Placer, options ...LBA
a := &lbAgent{
cfg: *cfg,
da: da,
cda: da,
rp: rp,
placer: p,
shutWg: common.NewWaitGroup(),
@@ -93,23 +93,6 @@ func (a *lbAgent) fireAfterCall(ctx context.Context, call *models.Call) error {
return fireAfterCallFun(a.callListeners, ctx, call)
}
// implements Agent
// GetAppID is to get the match of an app name to its ID
func (a *lbAgent) GetAppID(ctx context.Context, appName string) (string, error) {
return a.da.GetAppID(ctx, appName)
}
// implements Agent
// GetAppByID is to get the app by ID
func (a *lbAgent) GetAppByID(ctx context.Context, appID string) (*models.App, error) {
return a.da.GetAppByID(ctx, appID)
}
// implements Agent
func (a *lbAgent) GetRoute(ctx context.Context, appID string, path string) (*models.Route, error) {
return a.da.GetRoute(ctx, appID, path)
}
// implements Agent
func (a *lbAgent) GetCall(opts ...CallOpt) (Call, error) {
var c call
@@ -144,7 +127,7 @@ func (a *lbAgent) GetCall(opts ...CallOpt) (Call, error) {
setupCtx(&c)
c.isLB = true
c.da = a.da
c.handler = a.cda
c.ct = a
c.stderr = &nullReadWriter{}
c.slotHashId = getSlotQueueKey(&c)

View File

@@ -17,7 +17,7 @@ import (
"sync/atomic"
"time"
runner "github.com/fnproject/fn/api/agent/grpc"
"github.com/fnproject/fn/api/agent/grpc"
"github.com/fnproject/fn/api/common"
"github.com/fnproject/fn/api/models"
"github.com/fnproject/fn/fnext"
@@ -494,26 +494,11 @@ type pureRunner struct {
inflight int32
}
// implements Agent
func (pr *pureRunner) GetAppID(ctx context.Context, appName string) (string, error) {
return pr.a.GetAppID(ctx, appName)
}
// implements Agent
func (pr *pureRunner) GetAppByID(ctx context.Context, appID string) (*models.App, error) {
return pr.a.GetAppByID(ctx, appID)
}
// implements Agent
func (pr *pureRunner) GetCall(opts ...CallOpt) (Call, error) {
return pr.a.GetCall(opts...)
}
// implements Agent
func (pr *pureRunner) GetRoute(ctx context.Context, appID string, path string) (*models.Route, error) {
return pr.a.GetRoute(ctx, appID, path)
}
// implements Agent
func (pr *pureRunner) Submit(Call) error {
return errors.New("Submit cannot be called directly in a Pure Runner.")
@@ -536,11 +521,6 @@ func (pr *pureRunner) AddCallListener(cl fnext.CallListener) {
pr.a.AddCallListener(cl)
}
// implements Agent
func (pr *pureRunner) Enqueue(context.Context, *models.Call) error {
return errors.New("Enqueue cannot be called directly in a Pure Runner.")
}
func (pr *pureRunner) spawnSubmit(state *callHandle) {
go func() {
err := pr.a.Submit(state.c)
@@ -653,9 +633,9 @@ func (pr *pureRunner) Status(ctx context.Context, _ *empty.Empty) (*runner.Runne
}, nil
}
func DefaultPureRunner(cancel context.CancelFunc, addr string, da DataAccess, cert string, key string, ca string) (Agent, error) {
func DefaultPureRunner(cancel context.CancelFunc, addr string, da CallHandler, cert string, key string, ca string) (Agent, error) {
agent := New(da, WithoutAsyncDequeue())
agent := New(da)
// WARNING: SSL creds are optional.
if cert == "" || key == "" || ca == "" {

View File

@@ -278,6 +278,7 @@ func (a *slotQueueMgr) deleteSlotQueue(slots *slotQueue) bool {
return isDeleted
}
// TODO this should be at least SHA-256 or more
var shapool = &sync.Pool{New: func() interface{} { return sha1.New() }}
// TODO do better; once we have app+route versions this function