mirror of
https://github.com/fnproject/fn.git
synced 2022-10-28 21:29:17 +03:00
HTTP Triggers hookup (#1086)
* Initial suypport for invoking tiggers * dupe method * tighten server constraints * runner tests not working yet * basic route tests passing * post rebase fixes * add hybrid support for trigger invoke and tests * consoloidate all hybrid evil into one place * cleanup and make triggers unique by source * fix oops with Agent * linting * review fixes
This commit is contained in:
@@ -88,24 +88,11 @@ type Agent interface {
|
||||
io.Closer
|
||||
|
||||
AddCallListener(fnext.CallListener)
|
||||
|
||||
// Enqueue is to use the agent's sweet sweet client bindings to remotely
|
||||
// queue async tasks and should be removed from Agent interface ASAP.
|
||||
Enqueue(context.Context, *models.Call) error
|
||||
|
||||
// GetAppID is to get the match of an app name to its ID
|
||||
GetAppID(ctx context.Context, appName string) (string, error)
|
||||
|
||||
// GetAppByID is to get the app by ID
|
||||
GetAppByID(ctx context.Context, appID string) (*models.App, error)
|
||||
|
||||
// GetRoute is to get the route by appId and path
|
||||
GetRoute(ctx context.Context, appID string, path string) (*models.Route, error)
|
||||
}
|
||||
|
||||
type agent struct {
|
||||
cfg AgentConfig
|
||||
da DataAccess
|
||||
da CallHandler
|
||||
callListeners []fnext.CallListener
|
||||
|
||||
driver drivers.Driver
|
||||
@@ -121,12 +108,15 @@ type agent struct {
|
||||
disableAsyncDequeue bool
|
||||
|
||||
callOverrider CallOverrider
|
||||
// deferred actions to call at end of initialisation
|
||||
onStartup []func()
|
||||
}
|
||||
|
||||
// AgentOption configures an agent at startup
|
||||
type AgentOption func(*agent) error
|
||||
|
||||
// New creates an Agent that executes functions locally as Docker containers.
|
||||
func New(da DataAccess, options ...AgentOption) Agent {
|
||||
func New(da CallHandler, options ...AgentOption) Agent {
|
||||
|
||||
cfg, err := NewAgentConfig()
|
||||
if err != nil {
|
||||
@@ -137,6 +127,10 @@ func New(da DataAccess, options ...AgentOption) Agent {
|
||||
cfg: *cfg,
|
||||
}
|
||||
|
||||
a.shutWg = common.NewWaitGroup()
|
||||
a.da = da
|
||||
a.slotMgr = NewSlotQueueMgr()
|
||||
|
||||
// Allow overriding config
|
||||
for _, option := range options {
|
||||
err = option(a)
|
||||
@@ -151,24 +145,31 @@ func New(da DataAccess, options ...AgentOption) Agent {
|
||||
a.driver = NewDockerDriver(&a.cfg)
|
||||
}
|
||||
|
||||
a.da = da
|
||||
a.slotMgr = NewSlotQueueMgr()
|
||||
a.resources = NewResourceTracker(&a.cfg)
|
||||
a.shutWg = common.NewWaitGroup()
|
||||
|
||||
// TODO assert that agent doesn't get started for API nodes up above ?
|
||||
if a.disableAsyncDequeue {
|
||||
return a
|
||||
for _, sup := range a.onStartup {
|
||||
sup()
|
||||
}
|
||||
|
||||
if !a.shutWg.AddSession(1) {
|
||||
logrus.Fatalf("cannot start agent, unable to add session")
|
||||
}
|
||||
go a.asyncDequeue() // safe shutdown can nanny this fine
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *agent) addStartup(sup func()) {
|
||||
a.onStartup = append(a.onStartup, sup)
|
||||
|
||||
}
|
||||
|
||||
// WithAsync Enables Async operations on the agent
|
||||
func WithAsync(dqda DequeueDataAccess) AgentOption {
|
||||
return func(a *agent) error {
|
||||
if !a.shutWg.AddSession(1) {
|
||||
logrus.Fatalf("cannot start agent, unable to add session")
|
||||
}
|
||||
a.addStartup(func() {
|
||||
go a.asyncDequeue(dqda) // safe shutdown can nanny this fine
|
||||
})
|
||||
return nil
|
||||
}
|
||||
}
|
||||
func WithConfig(cfg *AgentConfig) AgentOption {
|
||||
return func(a *agent) error {
|
||||
a.cfg = *cfg
|
||||
@@ -176,7 +177,7 @@ func WithConfig(cfg *AgentConfig) AgentOption {
|
||||
}
|
||||
}
|
||||
|
||||
// Provide a customer driver to agent
|
||||
// WithDockerDriver Provides a customer driver to agent
|
||||
func WithDockerDriver(drv drivers.Driver) AgentOption {
|
||||
return func(a *agent) error {
|
||||
if a.driver != nil {
|
||||
@@ -188,14 +189,7 @@ func WithDockerDriver(drv drivers.Driver) AgentOption {
|
||||
}
|
||||
}
|
||||
|
||||
func WithoutAsyncDequeue() AgentOption {
|
||||
return func(a *agent) error {
|
||||
a.disableAsyncDequeue = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Agents can use this to register a CallOverrider to modify a Call and extensions
|
||||
// WithCallOverrider registers register a CallOverrider to modify a Call and extensions on call construction
|
||||
func WithCallOverrider(fn CallOverrider) AgentOption {
|
||||
return func(a *agent) error {
|
||||
if a.callOverrider != nil {
|
||||
@@ -206,7 +200,7 @@ func WithCallOverrider(fn CallOverrider) AgentOption {
|
||||
}
|
||||
}
|
||||
|
||||
// Create a default docker driver from agent config
|
||||
// NewDockerDriver creates a default docker driver from agent config
|
||||
func NewDockerDriver(cfg *AgentConfig) *docker.DockerDriver {
|
||||
return docker.NewDocker(drivers.Config{
|
||||
DockerNetworks: cfg.DockerNetworks,
|
||||
@@ -221,23 +215,6 @@ func NewDockerDriver(cfg *AgentConfig) *docker.DockerDriver {
|
||||
})
|
||||
}
|
||||
|
||||
func (a *agent) GetAppByID(ctx context.Context, appID string) (*models.App, error) {
|
||||
return a.da.GetAppByID(ctx, appID)
|
||||
}
|
||||
|
||||
func (a *agent) GetAppID(ctx context.Context, appName string) (string, error) {
|
||||
return a.da.GetAppID(ctx, appName)
|
||||
}
|
||||
|
||||
func (a *agent) GetRoute(ctx context.Context, appID string, path string) (*models.Route, error) {
|
||||
return a.da.GetRoute(ctx, appID, path)
|
||||
}
|
||||
|
||||
// TODO shuffle this around somewhere else (maybe)
|
||||
func (a *agent) Enqueue(ctx context.Context, call *models.Call) error {
|
||||
return a.da.Enqueue(ctx, call)
|
||||
}
|
||||
|
||||
func (a *agent) Close() error {
|
||||
var err error
|
||||
|
||||
@@ -251,12 +228,6 @@ func (a *agent) Close() error {
|
||||
}
|
||||
})
|
||||
|
||||
// shutdown any db/queue resources
|
||||
// associated with DataAccess
|
||||
daErr := a.da.Close()
|
||||
if daErr != nil {
|
||||
return daErr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -856,7 +827,7 @@ func (a *agent) runHot(ctx context.Context, call *call, tok ResourceToken, state
|
||||
state.UpdateState(ctx, ContainerStateStart, call.slots)
|
||||
defer state.UpdateState(ctx, ContainerStateDone, call.slots)
|
||||
|
||||
container, closer := NewHotContainer(ctx, call, &a.cfg)
|
||||
container, closer := newHotContainer(ctx, call, &a.cfg)
|
||||
defer closer()
|
||||
|
||||
logger := logrus.WithFields(logrus.Fields{"id": container.id, "app_id": call.AppID, "route": call.Path, "image": call.Image, "memory": call.Memory, "cpus": call.CPUs, "format": call.Format, "idle_timeout": call.IdleTimeout})
|
||||
@@ -1041,7 +1012,8 @@ type container struct {
|
||||
stats *drivers.Stats
|
||||
}
|
||||
|
||||
func NewHotContainer(ctx context.Context, call *call, cfg *AgentConfig) (*container, func()) {
|
||||
//newHotContainer creates a container that can be used for multiple sequential events
|
||||
func newHotContainer(ctx context.Context, call *call, cfg *AgentConfig) (*container, func()) {
|
||||
// if freezer is enabled, be consistent with freezer behavior and
|
||||
// block stdout and stderr between calls.
|
||||
isBlockIdleIO := MaxDisabledMsecs != cfg.FreezeIdle
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/fnproject/fn/api/datastore"
|
||||
"github.com/fnproject/fn/api/id"
|
||||
"github.com/fnproject/fn/api/logs"
|
||||
"github.com/fnproject/fn/api/models"
|
||||
@@ -83,10 +81,7 @@ func TestCallConfigurationRequest(t *testing.T) {
|
||||
rCfg := models.Config{"ROUTE_VAR": "BAR"}
|
||||
|
||||
app := &models.App{ID: "app_id", Name: appName, Config: cfg}
|
||||
ds := datastore.NewMockInit(
|
||||
[]*models.App{app},
|
||||
[]*models.Route{
|
||||
{
|
||||
route := &models.Route{
|
||||
AppID: app.ID,
|
||||
Config: rCfg,
|
||||
Path: path,
|
||||
@@ -96,12 +91,11 @@ func TestCallConfigurationRequest(t *testing.T) {
|
||||
Timeout: timeout,
|
||||
IdleTimeout: idleTimeout,
|
||||
Memory: memory,
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
ls := logs.NewMock()
|
||||
|
||||
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)))
|
||||
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)))
|
||||
defer checkClose(t, a)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
@@ -122,7 +116,7 @@ func TestCallConfigurationRequest(t *testing.T) {
|
||||
|
||||
call, err := a.GetCall(
|
||||
WithWriter(w), // XXX (reed): order matters [for now]
|
||||
FromRequest(a, app, path, req),
|
||||
FromRequest(app, route, req),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -242,10 +236,9 @@ func TestCallConfigurationModel(t *testing.T) {
|
||||
}
|
||||
|
||||
// FromModel doesn't need a datastore, for now...
|
||||
ds := datastore.NewMockInit()
|
||||
ls := logs.NewMock()
|
||||
|
||||
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)))
|
||||
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)))
|
||||
defer checkClose(t, a)
|
||||
|
||||
callI, err := a.GetCall(FromModel(cm))
|
||||
@@ -314,10 +307,9 @@ func TestAsyncCallHeaders(t *testing.T) {
|
||||
}
|
||||
|
||||
// FromModel doesn't need a datastore, for now...
|
||||
ds := datastore.NewMockInit()
|
||||
ls := logs.NewMock()
|
||||
|
||||
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)))
|
||||
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)))
|
||||
defer checkClose(t, a)
|
||||
|
||||
callI, err := a.GetCall(FromModel(cm))
|
||||
@@ -432,9 +424,6 @@ func TestReqTooLarge(t *testing.T) {
|
||||
Method: "GET",
|
||||
}
|
||||
|
||||
// FromModel doesn't need a datastore, for now...
|
||||
ds := datastore.NewMockInit()
|
||||
|
||||
cfg, err := NewAgentConfig()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -443,7 +432,7 @@ func TestReqTooLarge(t *testing.T) {
|
||||
cfg.MaxRequestSize = 5
|
||||
ls := logs.NewMock()
|
||||
|
||||
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)), WithConfig(cfg))
|
||||
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)), WithConfig(cfg))
|
||||
defer checkClose(t, a)
|
||||
|
||||
_, err = a.GetCall(FromModel(cm))
|
||||
@@ -494,10 +483,9 @@ func TestSubmitError(t *testing.T) {
|
||||
}
|
||||
|
||||
// FromModel doesn't need a datastore, for now...
|
||||
ds := datastore.NewMockInit()
|
||||
ls := logs.NewMock()
|
||||
|
||||
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)))
|
||||
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)))
|
||||
defer checkClose(t, a)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
@@ -546,12 +534,7 @@ func TestHTTPWithoutContentLengthWorks(t *testing.T) {
|
||||
url := "http://127.0.0.1:8080/r/" + appName + path
|
||||
|
||||
app := &models.App{ID: "app_id", Name: appName}
|
||||
|
||||
// we need to load in app & route so that FromRequest works
|
||||
ds := datastore.NewMockInit(
|
||||
[]*models.App{app},
|
||||
[]*models.Route{
|
||||
{
|
||||
route := &models.Route{
|
||||
Path: path,
|
||||
AppID: app.ID,
|
||||
Image: "fnproject/fn-test-utils",
|
||||
@@ -560,12 +543,10 @@ func TestHTTPWithoutContentLengthWorks(t *testing.T) {
|
||||
Timeout: 5,
|
||||
IdleTimeout: 10,
|
||||
Memory: 128,
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
ls := logs.NewMock()
|
||||
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)))
|
||||
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)))
|
||||
defer checkClose(t, a)
|
||||
|
||||
bodOne := `{"echoContent":"yodawg"}`
|
||||
@@ -580,7 +561,7 @@ func TestHTTPWithoutContentLengthWorks(t *testing.T) {
|
||||
|
||||
// grab a buffer so we can read what gets written to this guy
|
||||
var out bytes.Buffer
|
||||
callI, err := a.GetCall(FromRequest(a, app, path, req), WithWriter(&out))
|
||||
callI, err := a.GetCall(FromRequest(app, route, req), WithWriter(&out))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -625,11 +606,8 @@ func TestGetCallReturnsResourceImpossibility(t *testing.T) {
|
||||
Memory: math.MaxUint64,
|
||||
}
|
||||
|
||||
// FromModel doesn't need a datastore, for now...
|
||||
ds := datastore.NewMockInit()
|
||||
|
||||
ls := logs.NewMock()
|
||||
a := New(NewCachedDataAccess(NewDirectDataAccess(ds, ls, new(mqs.Mock))))
|
||||
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)))
|
||||
defer checkClose(t, a)
|
||||
|
||||
_, err := a.GetCall(FromModel(call))
|
||||
@@ -648,11 +626,7 @@ func TestTmpFsRW(t *testing.T) {
|
||||
|
||||
app := &models.App{ID: "app_id", Name: appName}
|
||||
|
||||
// we need to load in app & route so that FromRequest works
|
||||
ds := datastore.NewMockInit(
|
||||
[]*models.App{app},
|
||||
[]*models.Route{
|
||||
{
|
||||
route := &models.Route{
|
||||
Path: path,
|
||||
AppID: app.ID,
|
||||
Image: "fnproject/fn-test-utils",
|
||||
@@ -660,13 +634,11 @@ func TestTmpFsRW(t *testing.T) {
|
||||
Format: "http", // this _is_ the test
|
||||
Timeout: 5,
|
||||
IdleTimeout: 10,
|
||||
Memory: 64,
|
||||
},
|
||||
},
|
||||
)
|
||||
Memory: 128,
|
||||
}
|
||||
|
||||
ls := logs.NewMock()
|
||||
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)))
|
||||
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)))
|
||||
defer checkClose(t, a)
|
||||
|
||||
// Here we tell fn-test-utils to read file /proc/mounts and create a /tmp/salsa of 4MB
|
||||
@@ -678,7 +650,7 @@ func TestTmpFsRW(t *testing.T) {
|
||||
}
|
||||
|
||||
var out bytes.Buffer
|
||||
callI, err := a.GetCall(FromRequest(a, app, path, req), WithWriter(&out))
|
||||
callI, err := a.GetCall(FromRequest(app, route, req), WithWriter(&out))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -745,11 +717,7 @@ func TestTmpFsSize(t *testing.T) {
|
||||
|
||||
app := &models.App{ID: "app_id", Name: appName}
|
||||
|
||||
// we need to load in app & route so that FromRequest works
|
||||
ds := datastore.NewMockInit(
|
||||
[]*models.App{app},
|
||||
[]*models.Route{
|
||||
{
|
||||
route := &models.Route{
|
||||
Path: path,
|
||||
AppID: app.ID,
|
||||
Image: "fnproject/fn-test-utils",
|
||||
@@ -759,9 +727,7 @@ func TestTmpFsSize(t *testing.T) {
|
||||
IdleTimeout: 10,
|
||||
Memory: 64,
|
||||
TmpFsSize: 1,
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
cfg, err := NewAgentConfig()
|
||||
if err != nil {
|
||||
@@ -771,7 +737,7 @@ func TestTmpFsSize(t *testing.T) {
|
||||
cfg.MaxTmpFsInodes = 1024
|
||||
|
||||
ls := logs.NewMock()
|
||||
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)), WithConfig(cfg))
|
||||
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)), WithConfig(cfg))
|
||||
defer checkClose(t, a)
|
||||
|
||||
// Here we tell fn-test-utils to read file /proc/mounts and create a /tmp/salsa of 4MB
|
||||
@@ -783,7 +749,7 @@ func TestTmpFsSize(t *testing.T) {
|
||||
}
|
||||
|
||||
var out bytes.Buffer
|
||||
callI, err := a.GetCall(FromRequest(a, app, path, req), WithWriter(&out))
|
||||
callI, err := a.GetCall(FromRequest(app, route, req), WithWriter(&out))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -923,11 +889,8 @@ func TestPipesAreClear(t *testing.T) {
|
||||
ca.IdleTimeout = 60 // keep this bad boy alive
|
||||
ca.Timeout = 4 // short
|
||||
app := &models.App{Name: "myapp", ID: ca.AppID}
|
||||
// we need to load in app & route so that FromRequest works
|
||||
ds := datastore.NewMockInit(
|
||||
[]*models.App{app},
|
||||
[]*models.Route{
|
||||
{
|
||||
|
||||
route := &models.Route{
|
||||
Path: ca.Path,
|
||||
AppID: ca.AppID,
|
||||
Image: ca.Image,
|
||||
@@ -936,12 +899,10 @@ func TestPipesAreClear(t *testing.T) {
|
||||
Timeout: ca.Timeout,
|
||||
IdleTimeout: ca.IdleTimeout,
|
||||
Memory: ca.Memory,
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
ls := logs.NewMock()
|
||||
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)))
|
||||
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)))
|
||||
defer checkClose(t, a)
|
||||
|
||||
// test read this body after 5s (after call times out) and make sure we don't get yodawg
|
||||
@@ -960,7 +921,7 @@ func TestPipesAreClear(t *testing.T) {
|
||||
req.Header.Set("Content-Length", fmt.Sprintf("%d", len(bodOne)))
|
||||
|
||||
var outOne bytes.Buffer
|
||||
callI, err := a.GetCall(FromRequest(a, app, ca.Path, req), WithWriter(&outOne))
|
||||
callI, err := a.GetCall(FromRequest(app, route, req), WithWriter(&outOne))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -994,7 +955,7 @@ func TestPipesAreClear(t *testing.T) {
|
||||
req.Header.Set("Content-Length", fmt.Sprintf("%d", len(bodTwo)))
|
||||
|
||||
var outTwo bytes.Buffer
|
||||
callI, err = a.GetCall(FromRequest(a, app, ca.Path, req), WithWriter(&outTwo))
|
||||
callI, err = a.GetCall(FromRequest(app, route, req), WithWriter(&outTwo))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -1074,11 +1035,8 @@ func TestPipesDontMakeSpuriousCalls(t *testing.T) {
|
||||
app := &models.App{Name: "myapp"}
|
||||
|
||||
app.ID = call.AppID
|
||||
// we need to load in app & route so that FromRequest works
|
||||
ds := datastore.NewMockInit(
|
||||
[]*models.App{app},
|
||||
[]*models.Route{
|
||||
{
|
||||
|
||||
route := &models.Route{
|
||||
Path: call.Path,
|
||||
AppID: call.AppID,
|
||||
Image: call.Image,
|
||||
@@ -1087,12 +1045,10 @@ func TestPipesDontMakeSpuriousCalls(t *testing.T) {
|
||||
Timeout: call.Timeout,
|
||||
IdleTimeout: call.IdleTimeout,
|
||||
Memory: call.Memory,
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
ls := logs.NewMock()
|
||||
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)))
|
||||
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)))
|
||||
defer checkClose(t, a)
|
||||
|
||||
bodOne := `{"echoContent":"yodawg"}`
|
||||
@@ -1102,7 +1058,7 @@ func TestPipesDontMakeSpuriousCalls(t *testing.T) {
|
||||
}
|
||||
|
||||
var outOne bytes.Buffer
|
||||
callI, err := a.GetCall(FromRequest(a, app, call.Path, req), WithWriter(&outOne))
|
||||
callI, err := a.GetCall(FromRequest(app, route, req), WithWriter(&outOne))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -1127,7 +1083,7 @@ func TestPipesDontMakeSpuriousCalls(t *testing.T) {
|
||||
}
|
||||
|
||||
var outTwo bytes.Buffer
|
||||
callI, err = a.GetCall(FromRequest(a, app, call.Path, req), WithWriter(&outTwo))
|
||||
callI, err = a.GetCall(FromRequest(app, route, req), WithWriter(&outTwo))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -1172,11 +1128,8 @@ func TestNBIOResourceTracker(t *testing.T) {
|
||||
app := &models.App{ID: "app_id", Name: "myapp"}
|
||||
|
||||
app.ID = call.AppID
|
||||
// we need to load in app & route so that FromRequest works
|
||||
ds := datastore.NewMockInit(
|
||||
[]*models.App{app},
|
||||
[]*models.Route{
|
||||
{
|
||||
|
||||
route := &models.Route{
|
||||
Path: call.Path,
|
||||
AppID: call.AppID,
|
||||
Image: call.Image,
|
||||
@@ -1185,9 +1138,7 @@ func TestNBIOResourceTracker(t *testing.T) {
|
||||
Timeout: call.Timeout,
|
||||
IdleTimeout: call.IdleTimeout,
|
||||
Memory: call.Memory,
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
cfg, err := NewAgentConfig()
|
||||
if err != nil {
|
||||
@@ -1199,7 +1150,7 @@ func TestNBIOResourceTracker(t *testing.T) {
|
||||
cfg.HotPoll = 20 * time.Millisecond
|
||||
|
||||
ls := logs.NewMock()
|
||||
a := New(NewDirectDataAccess(ds, ls, new(mqs.Mock)), WithConfig(cfg))
|
||||
a := New(NewDirectCallDataAccess(ls, new(mqs.Mock)), WithConfig(cfg))
|
||||
defer checkClose(t, a)
|
||||
|
||||
reqCount := 20
|
||||
@@ -1213,7 +1164,7 @@ func TestNBIOResourceTracker(t *testing.T) {
|
||||
}
|
||||
|
||||
var outOne bytes.Buffer
|
||||
callI, err := a.GetCall(FromRequest(a, app, call.Path, req), WithWriter(&outOne))
|
||||
callI, err := a.GetCall(FromRequest(app, route, req), WithWriter(&outOne))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -1250,44 +1201,3 @@ func TestNBIOResourceTracker(t *testing.T) {
|
||||
t.Fatalf("Expected successes, but got %d", ok)
|
||||
}
|
||||
}
|
||||
|
||||
type closingDataAccess struct {
|
||||
DataAccess
|
||||
closeReturn error
|
||||
closed chan struct{}
|
||||
}
|
||||
|
||||
func newClosingDataAccess(closeReturn error) *closingDataAccess {
|
||||
ds := datastore.NewMockInit()
|
||||
ls := logs.NewMock()
|
||||
return &closingDataAccess{
|
||||
DataAccess: NewDirectDataAccess(ds, ls, new(mqs.Mock)),
|
||||
closed: make(chan struct{}),
|
||||
closeReturn: closeReturn,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (da *closingDataAccess) Close() error {
|
||||
close(da.closed)
|
||||
return da.closeReturn
|
||||
}
|
||||
|
||||
func TestClosesDataAccess(t *testing.T) {
|
||||
da := newClosingDataAccess(nil)
|
||||
|
||||
a := New(da)
|
||||
checkClose(t, a)
|
||||
<-da.closed
|
||||
}
|
||||
|
||||
func TestCloseReturnsDataAccessError(t *testing.T) {
|
||||
err := errors.New("foo")
|
||||
da := newClosingDataAccess(err)
|
||||
a := New(da)
|
||||
|
||||
if cerr := a.Close(); cerr != err {
|
||||
t.Fatalf("Wrong error returned, expected %v but got %v", err, cerr)
|
||||
}
|
||||
<-da.closed
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
func (a *agent) asyncDequeue() {
|
||||
func (a *agent) asyncDequeue(dqda DequeueDataAccess) {
|
||||
// this is just so we can hang up the dequeue request if we get shut down
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@@ -37,7 +37,7 @@ func (a *agent) asyncDequeue() {
|
||||
case <-a.shutWg.Closer():
|
||||
a.shutWg.DoneSession()
|
||||
return
|
||||
case model, ok := <-a.asyncChew(ctx):
|
||||
case model, ok := <-a.asyncChew(ctx, dqda):
|
||||
if ok {
|
||||
go func(model *models.Call) {
|
||||
a.asyncRun(ctx, model)
|
||||
@@ -53,14 +53,14 @@ func (a *agent) asyncDequeue() {
|
||||
}
|
||||
}
|
||||
|
||||
func (a *agent) asyncChew(ctx context.Context) <-chan *models.Call {
|
||||
func (a *agent) asyncChew(ctx context.Context, dqda DequeueDataAccess) <-chan *models.Call {
|
||||
ch := make(chan *models.Call, 1)
|
||||
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(ctx, a.cfg.AsyncChewPoll)
|
||||
defer cancel()
|
||||
|
||||
call, err := a.da.Dequeue(ctx)
|
||||
call, err := dqda.Dequeue(ctx)
|
||||
if call != nil {
|
||||
ch <- call
|
||||
} else { // call is nil / error
|
||||
|
||||
@@ -46,23 +46,15 @@ type CallOverrider func(*models.Call, map[string]string) (map[string]string, err
|
||||
// TODO build w/o closures... lazy
|
||||
type CallOpt func(c *call) error
|
||||
|
||||
type Param struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
type Params []Param
|
||||
|
||||
const (
|
||||
ceMimeType = "application/cloudevents+json"
|
||||
)
|
||||
|
||||
func FromRequest(a Agent, app *models.App, path string, req *http.Request) CallOpt {
|
||||
// FromRequest initialises a call to a route from an HTTP request
|
||||
// deprecate with routes
|
||||
func FromRequest(app *models.App, route *models.Route, req *http.Request) CallOpt {
|
||||
return func(c *call) error {
|
||||
ctx := req.Context()
|
||||
route, err := a.GetRoute(ctx, app.ID, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log := common.Logger(ctx)
|
||||
// Check whether this is a CloudEvent, if coming in via HTTP router (only way currently), then we'll look for a special header
|
||||
@@ -125,7 +117,7 @@ func FromRequest(a Agent, app *models.App, path string, req *http.Request) CallO
|
||||
Memory: route.Memory,
|
||||
CPUs: route.CPUs,
|
||||
Config: buildConfig(app, route),
|
||||
Annotations: buildAnnotations(app, route),
|
||||
Annotations: app.Annotations.MergeChange(route.Annotations),
|
||||
Headers: req.Header,
|
||||
CreatedAt: common.DateTime(time.Now()),
|
||||
URL: reqURL(req),
|
||||
@@ -139,6 +131,77 @@ func FromRequest(a Agent, app *models.App, path string, req *http.Request) CallO
|
||||
}
|
||||
}
|
||||
|
||||
// Sets up a call from an http trigger request
|
||||
func FromHTTPTriggerRequest(app *models.App, fn *models.Fn, trigger *models.Trigger, req *http.Request) CallOpt {
|
||||
return func(c *call) error {
|
||||
ctx := req.Context()
|
||||
|
||||
log := common.Logger(ctx)
|
||||
// Check whether this is a CloudEvent, if coming in via HTTP router (only way currently), then we'll look for a special header
|
||||
// Content-Type header: https://github.com/cloudevents/spec/blob/master/http-transport-binding.md#32-structured-content-mode
|
||||
// Expected Content-Type for a CloudEvent: application/cloudevents+json; charset=UTF-8
|
||||
contentType := req.Header.Get("Content-Type")
|
||||
t, _, err := mime.ParseMediaType(contentType)
|
||||
if err != nil {
|
||||
// won't fail here, but log
|
||||
log.Debugf("Could not parse Content-Type header: %v", err)
|
||||
} else {
|
||||
if t == ceMimeType {
|
||||
c.IsCloudEvent = true
|
||||
fn.Format = models.FormatCloudEvent
|
||||
}
|
||||
}
|
||||
|
||||
if fn.Format == "" {
|
||||
fn.Format = models.FormatDefault
|
||||
}
|
||||
|
||||
id := id.New().String()
|
||||
|
||||
// TODO this relies on ordering of opts, but tests make sure it works, probably re-plumb/destroy headers
|
||||
// TODO async should probably supply an http.ResponseWriter that records the logs, to attach response headers to
|
||||
if rw, ok := c.w.(http.ResponseWriter); ok {
|
||||
rw.Header().Add("FN_CALL_ID", id)
|
||||
}
|
||||
|
||||
var syslogURL string
|
||||
if app.SyslogURL != nil {
|
||||
syslogURL = *app.SyslogURL
|
||||
}
|
||||
|
||||
c.Call = &models.Call{
|
||||
ID: id,
|
||||
Path: trigger.Source,
|
||||
Image: fn.Image,
|
||||
// Delay: 0,
|
||||
Type: "sync",
|
||||
Format: fn.Format,
|
||||
// Payload: TODO,
|
||||
Priority: new(int32), // TODO this is crucial, apparently
|
||||
Timeout: fn.Timeout,
|
||||
IdleTimeout: fn.IdleTimeout,
|
||||
TmpFsSize: 0, // TODO clean up this
|
||||
Memory: fn.Memory,
|
||||
CPUs: 0, // TODO clean up this
|
||||
Config: buildTriggerConfig(app, fn, trigger),
|
||||
// TODO - this wasn't really the intention here (that annotations would naturally cascade
|
||||
// but seems to be necessary for some runner behaviour
|
||||
Annotations: app.Annotations.MergeChange(fn.Annotations).MergeChange(trigger.Annotations),
|
||||
Headers: req.Header,
|
||||
CreatedAt: common.DateTime(time.Now()),
|
||||
URL: reqURL(req),
|
||||
Method: req.Method,
|
||||
AppID: app.ID,
|
||||
FnID: fn.ID,
|
||||
TriggerID: trigger.ID,
|
||||
SyslogURL: syslogURL,
|
||||
}
|
||||
|
||||
c.req = req
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func buildConfig(app *models.App, route *models.Route) models.Config {
|
||||
conf := make(models.Config, 8+len(app.Config)+len(route.Config))
|
||||
for k, v := range app.Config {
|
||||
@@ -163,15 +226,24 @@ func buildConfig(app *models.App, route *models.Route) models.Config {
|
||||
return conf
|
||||
}
|
||||
|
||||
func buildAnnotations(app *models.App, route *models.Route) models.Annotations {
|
||||
ann := make(models.Annotations, len(app.Annotations)+len(route.Annotations))
|
||||
for k, v := range app.Annotations {
|
||||
ann[k] = v
|
||||
func buildTriggerConfig(app *models.App, fn *models.Fn, trigger *models.Trigger) models.Config {
|
||||
conf := make(models.Config, 8+len(app.Config)+len(fn.Config))
|
||||
for k, v := range app.Config {
|
||||
conf[k] = v
|
||||
}
|
||||
for k, v := range route.Annotations {
|
||||
ann[k] = v
|
||||
for k, v := range fn.Config {
|
||||
conf[k] = v
|
||||
}
|
||||
return ann
|
||||
|
||||
conf["FN_FORMAT"] = fn.Format
|
||||
conf["FN_APP_NAME"] = app.Name
|
||||
conf["FN_PATH"] = trigger.Source
|
||||
// TODO: might be a good idea to pass in: "FN_BASE_PATH" = fmt.Sprintf("/r/%s", appName) || "/" if using DNS entries per app
|
||||
conf["FN_MEMORY"] = fmt.Sprintf("%d", fn.Memory)
|
||||
conf["FN_TYPE"] = "sync"
|
||||
conf["FN_FN_ID"] = fn.ID
|
||||
|
||||
return conf
|
||||
}
|
||||
|
||||
func reqURL(req *http.Request) string {
|
||||
@@ -188,9 +260,7 @@ func reqURL(req *http.Request) string {
|
||||
return req.URL.String()
|
||||
}
|
||||
|
||||
// TODO this currently relies on FromRequest having happened before to create the model
|
||||
// here, to be a fully qualified model. We probably should double check but having a way
|
||||
// to bypass will likely be what's used anyway unless forced.
|
||||
// FromModel creates a call object from an existing stored call model object, reading the body from the stored call payload
|
||||
func FromModel(mCall *models.Call) CallOpt {
|
||||
return func(c *call) error {
|
||||
c.Call = mCall
|
||||
@@ -207,6 +277,7 @@ func FromModel(mCall *models.Call) CallOpt {
|
||||
}
|
||||
}
|
||||
|
||||
// FromModelAndInput creates a call object from an existing stored call model object , reading the body from a provided stream
|
||||
func FromModelAndInput(mCall *models.Call, in io.ReadCloser) CallOpt {
|
||||
return func(c *call) error {
|
||||
c.Call = mCall
|
||||
@@ -223,6 +294,7 @@ func FromModelAndInput(mCall *models.Call, in io.ReadCloser) CallOpt {
|
||||
}
|
||||
}
|
||||
|
||||
// WithWriter sets the writier that the call uses to send its output message to
|
||||
// TODO this should be required
|
||||
func WithWriter(w io.Writer) CallOpt {
|
||||
return func(c *call) error {
|
||||
@@ -231,6 +303,7 @@ func WithWriter(w io.Writer) CallOpt {
|
||||
}
|
||||
}
|
||||
|
||||
// WithContext overrides the context on the call
|
||||
func WithContext(ctx context.Context) CallOpt {
|
||||
return func(c *call) error {
|
||||
c.req = c.req.WithContext(ctx)
|
||||
@@ -238,6 +311,7 @@ func WithContext(ctx context.Context) CallOpt {
|
||||
}
|
||||
}
|
||||
|
||||
// WithExtensions adds internal attributes to the call that can be interpreted by extensions in the agent
|
||||
// Pure runner can use this to pass an extension to the call
|
||||
func WithExtensions(extensions map[string]string) CallOpt {
|
||||
return func(c *call) error {
|
||||
@@ -287,7 +361,7 @@ func (a *agent) GetCall(opts ...CallOpt) (Call, error) {
|
||||
|
||||
setupCtx(&c)
|
||||
|
||||
c.da = a.da
|
||||
c.handler = a.da
|
||||
c.ct = a
|
||||
c.stderr = setupLogger(c.req.Context(), a.cfg.MaxLogSize, c.Call)
|
||||
if c.w == nil {
|
||||
@@ -321,7 +395,7 @@ type call struct {
|
||||
// IsCloudEvent flag whether this was ingested as a cloud event. This may become the default or only way.
|
||||
IsCloudEvent bool `json:"is_cloud_event"`
|
||||
|
||||
da DataAccess
|
||||
handler CallHandler
|
||||
w io.Writer
|
||||
req *http.Request
|
||||
stderr io.ReadWriteCloser
|
||||
@@ -336,6 +410,8 @@ type call struct {
|
||||
extensions map[string]string
|
||||
}
|
||||
|
||||
// SlotHashId returns a string identity for this call that can be used to uniquely place the call in a given container
|
||||
// This should correspond to a unique identity (including data changes) of the underlying function
|
||||
func (c *call) SlotHashId() string {
|
||||
return c.slotHashId
|
||||
}
|
||||
@@ -393,7 +469,7 @@ func (c *call) Start(ctx context.Context) error {
|
||||
// running to avoid running the call twice and potentially mark it as
|
||||
// errored (built in long running task detector, so to speak...)
|
||||
|
||||
err := c.da.Start(ctx, c.Model())
|
||||
err := c.handler.Start(ctx, c.Model())
|
||||
if err != nil {
|
||||
return err // let another thread try this
|
||||
}
|
||||
@@ -426,7 +502,7 @@ func (c *call) End(ctx context.Context, errIn error) error {
|
||||
// ensure stats histogram is reasonably bounded
|
||||
c.Call.Stats = drivers.Decimate(240, c.Call.Stats)
|
||||
|
||||
if err := c.da.Finish(ctx, c.Model(), c.stderr, c.Type == models.TypeAsync); err != nil {
|
||||
if err := c.handler.Finish(ctx, c.Model(), c.stderr, c.Type == models.TypeAsync); err != nil {
|
||||
common.Logger(ctx).WithError(err).Error("error finalizing call on datastore/mq")
|
||||
// note: Not returning err here since the job could have already finished successfully.
|
||||
}
|
||||
|
||||
@@ -11,26 +11,34 @@ import (
|
||||
"github.com/patrickmn/go-cache"
|
||||
)
|
||||
|
||||
// DataAccess abstracts the datastore and message queue operations done by the
|
||||
// agent, so that API nodes and runner nodes can work with the same interface
|
||||
// but actually operate on the data in different ways (by direct access or by
|
||||
// mediation through an API node).
|
||||
type DataAccess interface {
|
||||
//ReadDataAccess represents read operations required to operate a load balancer node
|
||||
type ReadDataAccess interface {
|
||||
GetAppID(ctx context.Context, appName string) (string, error)
|
||||
|
||||
// GetAppByID abstracts querying the datastore for an app.
|
||||
GetAppByID(ctx context.Context, appID string) (*models.App, error)
|
||||
|
||||
GetTriggerBySource(ctx context.Context, appId string, triggerType, source string) (*models.Trigger, error)
|
||||
GetFnByID(ctx context.Context, fnId string) (*models.Fn, error)
|
||||
// GetRoute abstracts querying the datastore for a route within an app.
|
||||
GetRoute(ctx context.Context, appID string, routePath string) (*models.Route, error)
|
||||
}
|
||||
|
||||
// Enqueue will add a Call to the queue (ultimately forwards to mq.Push).
|
||||
Enqueue(ctx context.Context, mCall *models.Call) error
|
||||
|
||||
//DequeueDataAccess abstracts an underlying dequeue for async runners
|
||||
type DequeueDataAccess interface {
|
||||
// Dequeue will query the queue for the next available Call that can be run
|
||||
// by this Agent, and reserve it (ultimately forwards to mq.Reserve).
|
||||
Dequeue(ctx context.Context) (*models.Call, error)
|
||||
}
|
||||
|
||||
//EnqueueDataAccess abstracts an underying enqueue for async queueing
|
||||
type EnqueueDataAccess interface {
|
||||
// Enqueue will add a Call to the queue (ultimately forwards to mq.Push).
|
||||
Enqueue(ctx context.Context, mCall *models.Call) error
|
||||
}
|
||||
|
||||
// CallHandler consumes the start and finish events for a call
|
||||
// This is effectively a callback that is allowed to read the logs -
|
||||
// TODO Deprecate this - this could be a CallListener except it also consumes logs
|
||||
type CallHandler interface {
|
||||
// Start will attempt to start the provided Call within an appropriate
|
||||
// context.
|
||||
Start(ctx context.Context, mCall *models.Call) error
|
||||
@@ -38,24 +46,26 @@ type DataAccess interface {
|
||||
// Finish will notify the system that the Call has been processed, and
|
||||
// fulfill the reservation in the queue if the call came from a queue.
|
||||
Finish(ctx context.Context, mCall *models.Call, stderr io.Reader, async bool) error
|
||||
}
|
||||
|
||||
// Close will wait for any pending operations to complete and
|
||||
// shuts down connections to the underlying datastore/queue resources.
|
||||
// Close is not safe to be called from multiple threads.
|
||||
io.Closer
|
||||
// DataAccess is currently
|
||||
type DataAccess interface {
|
||||
ReadDataAccess
|
||||
DequeueDataAccess
|
||||
CallHandler
|
||||
}
|
||||
|
||||
// CachedDataAccess wraps a DataAccess and caches the results of GetApp and GetRoute.
|
||||
type CachedDataAccess struct {
|
||||
DataAccess
|
||||
type cachedDataAccess struct {
|
||||
ReadDataAccess
|
||||
|
||||
cache *cache.Cache
|
||||
singleflight singleflight.SingleFlight
|
||||
}
|
||||
|
||||
func NewCachedDataAccess(da DataAccess) DataAccess {
|
||||
cda := &CachedDataAccess{
|
||||
DataAccess: da,
|
||||
func NewCachedDataAccess(da ReadDataAccess) ReadDataAccess {
|
||||
cda := &cachedDataAccess{
|
||||
ReadDataAccess: da,
|
||||
cache: cache.New(5*time.Second, 1*time.Minute),
|
||||
}
|
||||
return cda
|
||||
@@ -69,11 +79,11 @@ func appIDCacheKey(appID string) string {
|
||||
return "a:" + appID
|
||||
}
|
||||
|
||||
func (da *CachedDataAccess) GetAppID(ctx context.Context, appName string) (string, error) {
|
||||
return da.DataAccess.GetAppID(ctx, appName)
|
||||
func (da *cachedDataAccess) GetAppID(ctx context.Context, appName string) (string, error) {
|
||||
return da.ReadDataAccess.GetAppID(ctx, appName)
|
||||
}
|
||||
|
||||
func (da *CachedDataAccess) GetAppByID(ctx context.Context, appID string) (*models.App, error) {
|
||||
func (da *cachedDataAccess) GetAppByID(ctx context.Context, appID string) (*models.App, error) {
|
||||
key := appIDCacheKey(appID)
|
||||
app, ok := da.cache.Get(key)
|
||||
if ok {
|
||||
@@ -82,7 +92,7 @@ func (da *CachedDataAccess) GetAppByID(ctx context.Context, appID string) (*mode
|
||||
|
||||
resp, err := da.singleflight.Do(key,
|
||||
func() (interface{}, error) {
|
||||
return da.DataAccess.GetAppByID(ctx, appID)
|
||||
return da.ReadDataAccess.GetAppByID(ctx, appID)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -93,7 +103,7 @@ func (da *CachedDataAccess) GetAppByID(ctx context.Context, appID string) (*mode
|
||||
return app.(*models.App), nil
|
||||
}
|
||||
|
||||
func (da *CachedDataAccess) GetRoute(ctx context.Context, appID string, routePath string) (*models.Route, error) {
|
||||
func (da *cachedDataAccess) GetRoute(ctx context.Context, appID string, routePath string) (*models.Route, error) {
|
||||
key := routeCacheKey(appID, routePath)
|
||||
r, ok := da.cache.Get(key)
|
||||
if ok {
|
||||
@@ -102,7 +112,7 @@ func (da *CachedDataAccess) GetRoute(ctx context.Context, appID string, routePat
|
||||
|
||||
resp, err := da.singleflight.Do(key,
|
||||
func() (interface{}, error) {
|
||||
return da.DataAccess.GetRoute(ctx, appID, routePath)
|
||||
return da.ReadDataAccess.GetRoute(ctx, appID, routePath)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -113,48 +123,55 @@ func (da *CachedDataAccess) GetRoute(ctx context.Context, appID string, routePat
|
||||
return r.(*models.Route), nil
|
||||
}
|
||||
|
||||
// Close invokes close on the underlying DataAccess
|
||||
func (da *CachedDataAccess) Close() error {
|
||||
return da.DataAccess.Close()
|
||||
}
|
||||
|
||||
type directDataAccess struct {
|
||||
mq models.MessageQueue
|
||||
ds models.Datastore
|
||||
ls models.LogStore
|
||||
}
|
||||
|
||||
func NewDirectDataAccess(ds models.Datastore, ls models.LogStore, mq models.MessageQueue) DataAccess {
|
||||
type directDequeue struct {
|
||||
mq models.MessageQueue
|
||||
}
|
||||
|
||||
func (ddq *directDequeue) Dequeue(ctx context.Context) (*models.Call, error) {
|
||||
return ddq.mq.Reserve(ctx)
|
||||
}
|
||||
|
||||
func NewDirectDequeueAccess(mq models.MessageQueue) DequeueDataAccess {
|
||||
return &directDequeue{
|
||||
mq: mq,
|
||||
}
|
||||
}
|
||||
|
||||
type directEnequeue struct {
|
||||
mq models.MessageQueue
|
||||
}
|
||||
|
||||
func NewDirectEnqueueAccess(mq models.MessageQueue) EnqueueDataAccess {
|
||||
return &directEnequeue{
|
||||
mq: mq,
|
||||
}
|
||||
}
|
||||
|
||||
func (da *directEnequeue) Enqueue(ctx context.Context, mCall *models.Call) error {
|
||||
_, err := da.mq.Push(ctx, mCall)
|
||||
return err
|
||||
// TODO: Insert a call in the datastore with the 'queued' state
|
||||
}
|
||||
|
||||
func NewDirectCallDataAccess(ls models.LogStore, mq models.MessageQueue) CallHandler {
|
||||
da := &directDataAccess{
|
||||
mq: mq,
|
||||
ds: ds,
|
||||
ls: ls,
|
||||
}
|
||||
return da
|
||||
}
|
||||
|
||||
func (da *directDataAccess) GetAppID(ctx context.Context, appName string) (string, error) {
|
||||
return da.ds.GetAppID(ctx, appName)
|
||||
}
|
||||
|
||||
func (da *directDataAccess) GetAppByID(ctx context.Context, appID string) (*models.App, error) {
|
||||
return da.ds.GetAppByID(ctx, appID)
|
||||
}
|
||||
|
||||
func (da *directDataAccess) GetRoute(ctx context.Context, appID string, routePath string) (*models.Route, error) {
|
||||
return da.ds.GetRoute(ctx, appID, routePath)
|
||||
}
|
||||
|
||||
func (da *directDataAccess) Enqueue(ctx context.Context, mCall *models.Call) error {
|
||||
_, err := da.mq.Push(ctx, mCall)
|
||||
return err
|
||||
// TODO: Insert a call in the datastore with the 'queued' state
|
||||
}
|
||||
|
||||
func (da *directDataAccess) Dequeue(ctx context.Context) (*models.Call, error) {
|
||||
return da.mq.Reserve(ctx)
|
||||
}
|
||||
|
||||
func (da *directDataAccess) Start(ctx context.Context, mCall *models.Call) error {
|
||||
// TODO Access datastore and try a Compare-And-Swap to set the call to
|
||||
// 'running'. If it fails, delete the message from the MQ and return an
|
||||
@@ -183,22 +200,18 @@ func (da *directDataAccess) Finish(ctx context.Context, mCall *models.Call, stde
|
||||
if async {
|
||||
// XXX (reed): delete MQ message, eventually
|
||||
// YYY (hhexo): yes, once we have the queued/running/finished mechanics
|
||||
// return da.mq.Delete(ctx, mCall)
|
||||
// return cda.mq.Delete(ctx, mCall)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close calls close on the underlying Datastore and MessageQueue. If the Logstore
|
||||
// and Datastore are different, it will call Close on the Logstore as well.
|
||||
func (da *directDataAccess) Close() error {
|
||||
err := da.ds.Close()
|
||||
if ls, ok := da.ds.(models.LogStore); ok && ls != da.ls {
|
||||
if daErr := da.ls.Close(); daErr != nil {
|
||||
err = daErr
|
||||
}
|
||||
}
|
||||
if mqErr := da.mq.Close(); mqErr != nil {
|
||||
err = mqErr
|
||||
}
|
||||
return err
|
||||
type noAsyncEnqueueAccess struct{}
|
||||
|
||||
func (noAsyncEnqueueAccess) Enqueue(ctx context.Context, mCall *models.Call) error {
|
||||
return models.ErrAsyncUnsupported
|
||||
}
|
||||
|
||||
//NewUnsupportedEnqueueAccess is a backstop that errors when you try to enqueue an async operation on a server that doesn't support async
|
||||
func NewUnsupportedAsyncEnqueueAccess() EnqueueDataAccess {
|
||||
return &noAsyncEnqueueAccess{}
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ func NewClient(u string) (agent.DataAccess, error) {
|
||||
if uri.Scheme == "" {
|
||||
uri.Scheme = "http"
|
||||
}
|
||||
host := uri.Scheme + "://" + uri.Host + "/v1/"
|
||||
host := uri.Scheme + "://" + uri.Host + "/v2/"
|
||||
|
||||
httpClient := &http.Client{
|
||||
Timeout: 60 * time.Second,
|
||||
@@ -66,11 +66,13 @@ func NewClient(u string) (agent.DataAccess, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
var noQuery = map[string]string{}
|
||||
|
||||
func (cl *client) Enqueue(ctx context.Context, c *models.Call) error {
|
||||
ctx, span := trace.StartSpan(ctx, "hybrid_client_enqueue")
|
||||
defer span.End()
|
||||
|
||||
err := cl.do(ctx, c, nil, "PUT", "runner", "async")
|
||||
err := cl.do(ctx, c, nil, "PUT", noQuery, "runner", "async")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -81,7 +83,7 @@ func (cl *client) Dequeue(ctx context.Context) (*models.Call, error) {
|
||||
var c struct {
|
||||
C []*models.Call `json:"calls"`
|
||||
}
|
||||
err := cl.do(ctx, nil, &c, "GET", "runner", "async")
|
||||
err := cl.do(ctx, nil, &c, "GET", noQuery, "runner", "async")
|
||||
if len(c.C) > 0 {
|
||||
return c.C[0], nil
|
||||
}
|
||||
@@ -92,7 +94,7 @@ func (cl *client) Start(ctx context.Context, c *models.Call) error {
|
||||
ctx, span := trace.StartSpan(ctx, "hybrid_client_start")
|
||||
defer span.End()
|
||||
|
||||
err := cl.do(ctx, c, nil, "POST", "runner", "start")
|
||||
err := cl.do(ctx, c, nil, "POST", noQuery, "runner", "start")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -114,7 +116,7 @@ func (cl *client) Finish(ctx context.Context, c *models.Call, r io.Reader, async
|
||||
}
|
||||
|
||||
// TODO add async bit to query params or body
|
||||
err = cl.do(ctx, bod, nil, "POST", "runner", "finish")
|
||||
err = cl.do(ctx, bod, nil, "POST", noQuery, "runner", "finish")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -123,21 +125,25 @@ func (cl *client) GetAppID(ctx context.Context, appName string) (string, error)
|
||||
defer span.End()
|
||||
|
||||
var a struct {
|
||||
A models.App `json:"app"`
|
||||
Items []*models.App `json:"items"`
|
||||
}
|
||||
err := cl.do(ctx, nil, &a, "GET", "apps", appName)
|
||||
return a.A.ID, err
|
||||
|
||||
err := cl.do(ctx, nil, &a, "GET", map[string]string{"name": appName}, "apps")
|
||||
|
||||
if len(a.Items) == 0 {
|
||||
return "", errors.New("app not found")
|
||||
}
|
||||
|
||||
return a.Items[0].ID, err
|
||||
}
|
||||
|
||||
func (cl *client) GetAppByID(ctx context.Context, appID string) (*models.App, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "hybrid_client_get_app_id")
|
||||
ctx, span := trace.StartSpan(ctx, "hybrid_client_get_app_by_id")
|
||||
defer span.End()
|
||||
|
||||
var a struct {
|
||||
A models.App `json:"app"`
|
||||
}
|
||||
err := cl.do(ctx, nil, &a, "GET", "runner", "apps", appID)
|
||||
return &a.A, err
|
||||
var a models.App
|
||||
err := cl.do(ctx, nil, &a, "GET", noQuery, "apps", appID)
|
||||
return &a, err
|
||||
}
|
||||
|
||||
func (cl *client) GetRoute(ctx context.Context, appID, route string) (*models.Route, error) {
|
||||
@@ -145,11 +151,30 @@ func (cl *client) GetRoute(ctx context.Context, appID, route string) (*models.Ro
|
||||
defer span.End()
|
||||
|
||||
// TODO trim prefix is pretty odd here eh?
|
||||
var r struct {
|
||||
R models.Route `json:"route"`
|
||||
var r = models.Route{}
|
||||
err := cl.do(ctx, nil, &r, "GET", noQuery, "runner", "apps", appID, "routes", strings.TrimPrefix(route, "/"))
|
||||
return &r, err
|
||||
}
|
||||
|
||||
func (cl *client) GetTriggerBySource(ctx context.Context, appID string, triggerType, source string) (*models.Trigger, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "hybrid_client_get_trigger_by_source")
|
||||
defer span.End()
|
||||
|
||||
var trigger models.Trigger
|
||||
err := cl.do(ctx, nil, &trigger, "GET", noQuery, "runner", "apps", appID, "triggerBySource", triggerType, source)
|
||||
return &trigger, err
|
||||
}
|
||||
|
||||
func (cl *client) GetFnByID(ctx context.Context, fnID string) (*models.Fn, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "hybrid_client_get_fn_by_id")
|
||||
defer span.End()
|
||||
|
||||
var fn models.Fn
|
||||
err := cl.do(ctx, nil, &fn, "GET", noQuery, "fns", fnID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err := cl.do(ctx, nil, &r, "GET", "runner", "apps", appID, "routes", strings.TrimPrefix(route, "/"))
|
||||
return &r.R, err
|
||||
return &fn, nil
|
||||
}
|
||||
|
||||
type httpErr struct {
|
||||
@@ -157,7 +182,7 @@ type httpErr struct {
|
||||
error
|
||||
}
|
||||
|
||||
func (cl *client) do(ctx context.Context, request, result interface{}, method string, url ...string) error {
|
||||
func (cl *client) do(ctx context.Context, request, result interface{}, method string, query map[string]string, url ...string) error {
|
||||
// TODO determine policy (should we count to infinity?)
|
||||
|
||||
var b common.Backoff
|
||||
@@ -170,7 +195,7 @@ func (cl *client) do(ctx context.Context, request, result interface{}, method st
|
||||
}
|
||||
|
||||
// TODO this isn't re-using buffers very efficiently, but retries should be rare...
|
||||
err = cl.once(ctx, request, result, method, url...)
|
||||
err = cl.once(ctx, request, result, method, query, url...)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
@@ -192,7 +217,7 @@ func (cl *client) do(ctx context.Context, request, result interface{}, method st
|
||||
return err
|
||||
}
|
||||
|
||||
func (cl *client) once(ctx context.Context, request, result interface{}, method string, url ...string) error {
|
||||
func (cl *client) once(ctx context.Context, request, result interface{}, method string, query map[string]string, path ...string) error {
|
||||
ctx, span := trace.StartSpan(ctx, "hybrid_client_http_do")
|
||||
defer span.End()
|
||||
|
||||
@@ -204,7 +229,7 @@ func (cl *client) once(ctx context.Context, request, result interface{}, method
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(method, cl.url(url...), &b)
|
||||
req, err := http.NewRequest(method, cl.url(query, path...), &b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -221,16 +246,14 @@ func (cl *client) once(ctx context.Context, request, result interface{}, method
|
||||
if resp.StatusCode >= 300 {
|
||||
// one of our errors
|
||||
var msg struct {
|
||||
Err *struct {
|
||||
Msg string `json:"message"`
|
||||
} `json:"error"`
|
||||
}
|
||||
// copy into a buffer in case it wasn't from us
|
||||
var b bytes.Buffer
|
||||
io.Copy(&b, resp.Body)
|
||||
json.Unmarshal(b.Bytes(), &msg)
|
||||
if msg.Err != nil {
|
||||
return &httpErr{code: resp.StatusCode, error: errors.New(msg.Err.Msg)}
|
||||
if msg.Msg != "" {
|
||||
return &httpErr{code: resp.StatusCode, error: errors.New(msg.Msg)}
|
||||
}
|
||||
return &httpErr{code: resp.StatusCode, error: errors.New(b.String())}
|
||||
}
|
||||
@@ -245,8 +268,20 @@ func (cl *client) once(ctx context.Context, request, result interface{}, method
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cl *client) url(args ...string) string {
|
||||
return cl.base + strings.Join(args, "/")
|
||||
func (cl *client) url(query map[string]string, args ...string) string {
|
||||
|
||||
var queryValues = make(url.Values)
|
||||
for k, v := range query {
|
||||
queryValues.Add(k, v)
|
||||
}
|
||||
queryString := queryValues.Encode()
|
||||
|
||||
baseUrl := cl.base + strings.Join(args, "/")
|
||||
|
||||
if queryString != "" {
|
||||
baseUrl = baseUrl + "?" + queryString
|
||||
}
|
||||
return baseUrl
|
||||
}
|
||||
|
||||
func (cl *client) Close() error {
|
||||
|
||||
@@ -13,6 +13,18 @@ import (
|
||||
// nopDataStore implements agent.DataAccess
|
||||
type nopDataStore struct{}
|
||||
|
||||
func (cl *nopDataStore) GetTriggerBySource(ctx context.Context, appId string, triggerType, source string) (*models.Trigger, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "nop_datastore_get_trigger_by_source")
|
||||
defer span.End()
|
||||
return nil, errors.New("should not call GetTriggerBySource on a NOP data store")
|
||||
}
|
||||
|
||||
func (cl *nopDataStore) GetFnByID(ctx context.Context, fnId string) (*models.Fn, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "nop_datastore_get_fn_by_id")
|
||||
defer span.End()
|
||||
return nil, errors.New("should not call GetFnByID on a NOP data store")
|
||||
}
|
||||
|
||||
func NewNopDataStore() (agent.DataAccess, error) {
|
||||
return &nopDataStore{}, nil
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
|
||||
type lbAgent struct {
|
||||
cfg AgentConfig
|
||||
da DataAccess
|
||||
cda CallHandler
|
||||
callListeners []fnext.CallListener
|
||||
rp pool.RunnerPool
|
||||
placer pool.Placer
|
||||
@@ -50,7 +50,7 @@ func WithLBCallOverrider(fn CallOverrider) LBAgentOption {
|
||||
|
||||
// NewLBAgent creates an Agent that knows how to load-balance function calls
|
||||
// across a group of runner nodes.
|
||||
func NewLBAgent(da DataAccess, rp pool.RunnerPool, p pool.Placer, options ...LBAgentOption) (Agent, error) {
|
||||
func NewLBAgent(da CallHandler, rp pool.RunnerPool, p pool.Placer, options ...LBAgentOption) (Agent, error) {
|
||||
|
||||
// Yes, LBAgent and Agent both use an AgentConfig.
|
||||
cfg, err := NewAgentConfig()
|
||||
@@ -60,7 +60,7 @@ func NewLBAgent(da DataAccess, rp pool.RunnerPool, p pool.Placer, options ...LBA
|
||||
|
||||
a := &lbAgent{
|
||||
cfg: *cfg,
|
||||
da: da,
|
||||
cda: da,
|
||||
rp: rp,
|
||||
placer: p,
|
||||
shutWg: common.NewWaitGroup(),
|
||||
@@ -93,23 +93,6 @@ func (a *lbAgent) fireAfterCall(ctx context.Context, call *models.Call) error {
|
||||
return fireAfterCallFun(a.callListeners, ctx, call)
|
||||
}
|
||||
|
||||
// implements Agent
|
||||
// GetAppID is to get the match of an app name to its ID
|
||||
func (a *lbAgent) GetAppID(ctx context.Context, appName string) (string, error) {
|
||||
return a.da.GetAppID(ctx, appName)
|
||||
}
|
||||
|
||||
// implements Agent
|
||||
// GetAppByID is to get the app by ID
|
||||
func (a *lbAgent) GetAppByID(ctx context.Context, appID string) (*models.App, error) {
|
||||
return a.da.GetAppByID(ctx, appID)
|
||||
}
|
||||
|
||||
// implements Agent
|
||||
func (a *lbAgent) GetRoute(ctx context.Context, appID string, path string) (*models.Route, error) {
|
||||
return a.da.GetRoute(ctx, appID, path)
|
||||
}
|
||||
|
||||
// implements Agent
|
||||
func (a *lbAgent) GetCall(opts ...CallOpt) (Call, error) {
|
||||
var c call
|
||||
@@ -144,7 +127,7 @@ func (a *lbAgent) GetCall(opts ...CallOpt) (Call, error) {
|
||||
setupCtx(&c)
|
||||
|
||||
c.isLB = true
|
||||
c.da = a.da
|
||||
c.handler = a.cda
|
||||
c.ct = a
|
||||
c.stderr = &nullReadWriter{}
|
||||
c.slotHashId = getSlotQueueKey(&c)
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
runner "github.com/fnproject/fn/api/agent/grpc"
|
||||
"github.com/fnproject/fn/api/agent/grpc"
|
||||
"github.com/fnproject/fn/api/common"
|
||||
"github.com/fnproject/fn/api/models"
|
||||
"github.com/fnproject/fn/fnext"
|
||||
@@ -494,26 +494,11 @@ type pureRunner struct {
|
||||
inflight int32
|
||||
}
|
||||
|
||||
// implements Agent
|
||||
func (pr *pureRunner) GetAppID(ctx context.Context, appName string) (string, error) {
|
||||
return pr.a.GetAppID(ctx, appName)
|
||||
}
|
||||
|
||||
// implements Agent
|
||||
func (pr *pureRunner) GetAppByID(ctx context.Context, appID string) (*models.App, error) {
|
||||
return pr.a.GetAppByID(ctx, appID)
|
||||
}
|
||||
|
||||
// implements Agent
|
||||
func (pr *pureRunner) GetCall(opts ...CallOpt) (Call, error) {
|
||||
return pr.a.GetCall(opts...)
|
||||
}
|
||||
|
||||
// implements Agent
|
||||
func (pr *pureRunner) GetRoute(ctx context.Context, appID string, path string) (*models.Route, error) {
|
||||
return pr.a.GetRoute(ctx, appID, path)
|
||||
}
|
||||
|
||||
// implements Agent
|
||||
func (pr *pureRunner) Submit(Call) error {
|
||||
return errors.New("Submit cannot be called directly in a Pure Runner.")
|
||||
@@ -536,11 +521,6 @@ func (pr *pureRunner) AddCallListener(cl fnext.CallListener) {
|
||||
pr.a.AddCallListener(cl)
|
||||
}
|
||||
|
||||
// implements Agent
|
||||
func (pr *pureRunner) Enqueue(context.Context, *models.Call) error {
|
||||
return errors.New("Enqueue cannot be called directly in a Pure Runner.")
|
||||
}
|
||||
|
||||
func (pr *pureRunner) spawnSubmit(state *callHandle) {
|
||||
go func() {
|
||||
err := pr.a.Submit(state.c)
|
||||
@@ -653,9 +633,9 @@ func (pr *pureRunner) Status(ctx context.Context, _ *empty.Empty) (*runner.Runne
|
||||
}, nil
|
||||
}
|
||||
|
||||
func DefaultPureRunner(cancel context.CancelFunc, addr string, da DataAccess, cert string, key string, ca string) (Agent, error) {
|
||||
func DefaultPureRunner(cancel context.CancelFunc, addr string, da CallHandler, cert string, key string, ca string) (Agent, error) {
|
||||
|
||||
agent := New(da, WithoutAsyncDequeue())
|
||||
agent := New(da)
|
||||
|
||||
// WARNING: SSL creds are optional.
|
||||
if cert == "" || key == "" || ca == "" {
|
||||
|
||||
@@ -278,6 +278,7 @@ func (a *slotQueueMgr) deleteSlotQueue(slots *slotQueue) bool {
|
||||
return isDeleted
|
||||
}
|
||||
|
||||
// TODO this should be at least SHA-256 or more
|
||||
var shapool = &sync.Pool{New: func() interface{} { return sha1.New() }}
|
||||
|
||||
// TODO do better; once we have app+route versions this function
|
||||
|
||||
@@ -20,4 +20,9 @@ const (
|
||||
ParamCallID string = "call"
|
||||
// ParamFnID is the url path parameter for fn id
|
||||
ParamFnID string = "fnID"
|
||||
// ParamTriggerSource is the triggers source parameter
|
||||
ParamTriggerSource string = "triggerSource"
|
||||
|
||||
//ParamTriggerType is the trigger type parameter - only used in hybrid API
|
||||
ParamTriggerType string = "triggerType"
|
||||
)
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"log"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -52,18 +51,21 @@ type ResourceProvider interface {
|
||||
|
||||
// BasicResourceProvider supplies simple objects and can be used as a base for custom resource providers
|
||||
type BasicResourceProvider struct {
|
||||
idCount uint32
|
||||
rand *rand.Rand
|
||||
}
|
||||
|
||||
// DataStoreFunc provides an instance of a data store
|
||||
type DataStoreFunc func(*testing.T) models.Datastore
|
||||
|
||||
//NewBasicResourceProvider creates a dumb resource provider that generates resources that have valid, random names (and other unique attributes)
|
||||
func NewBasicResourceProvider() ResourceProvider {
|
||||
return &BasicResourceProvider{}
|
||||
return &BasicResourceProvider{
|
||||
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
}
|
||||
}
|
||||
|
||||
func (brp *BasicResourceProvider) NextID() uint32 {
|
||||
return atomic.AddUint32(&brp.idCount, rand.Uint32())
|
||||
return brp.rand.Uint32()
|
||||
}
|
||||
|
||||
func (brp *BasicResourceProvider) DefaultCtx() context.Context {
|
||||
@@ -86,7 +88,7 @@ func (brp *BasicResourceProvider) ValidTrigger(appId, funcId string) *models.Tri
|
||||
AppID: appId,
|
||||
FnID: funcId,
|
||||
Type: "http",
|
||||
Source: "ASource",
|
||||
Source: fmt.Sprintf("/source_%09d", brp.NextID()),
|
||||
}
|
||||
|
||||
return trigger
|
||||
@@ -1241,7 +1243,24 @@ func RunTriggersTest(t *testing.T, dsf DataStoreFunc, rp ResourceProvider) {
|
||||
t.Fatalf("expected empty trigger list and no error, but got list [%v] and err %s", triggers.Items, err)
|
||||
}
|
||||
})
|
||||
t.Run("duplicate trigger source of same type on same app", func(t *testing.T) {
|
||||
h := NewHarness(t, ctx, ds)
|
||||
defer h.Cleanup()
|
||||
app := h.GivenAppInDb(rp.ValidApp())
|
||||
fn := h.GivenFnInDb(rp.ValidFn(app.ID))
|
||||
origT := h.GivenTriggerInDb(rp.ValidTrigger(app.ID, fn.ID))
|
||||
|
||||
newT := rp.ValidTrigger(app.ID, fn.ID)
|
||||
|
||||
newT.Source = origT.Source
|
||||
|
||||
_, err := ds.InsertTrigger(ctx, newT)
|
||||
|
||||
if err != models.ErrTriggerSourceExists {
|
||||
t.Errorf("Expecting to fail with duplicate source on same app, got %s", err)
|
||||
}
|
||||
//todo ensure this doesn't apply when type is not equal
|
||||
})
|
||||
t.Run("app id not same as fn id ", func(t *testing.T) {
|
||||
h := NewHarness(t, ctx, ds)
|
||||
defer h.Cleanup()
|
||||
@@ -1532,6 +1551,39 @@ func RunTriggersTest(t *testing.T, dsf DataStoreFunc, rp ResourceProvider) {
|
||||
})
|
||||
}
|
||||
|
||||
func RunTriggerBySourceTests(t *testing.T, dsf DataStoreFunc, rp ResourceProvider) {
|
||||
|
||||
t.Run("http_trigger_access", func(t *testing.T) {
|
||||
ds := dsf(t)
|
||||
ctx := rp.DefaultCtx()
|
||||
t.Run("get_non_existant_trigger", func(t *testing.T) {
|
||||
_, err := ds.GetTriggerBySource(ctx, "none", "http", "source")
|
||||
if err != models.ErrTriggerNotFound {
|
||||
t.Fatalf("Expecting trigger not found, got %s", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("get_trigger_specific_http_route", func(t *testing.T) {
|
||||
h := NewHarness(t, ctx, ds)
|
||||
defer h.Cleanup()
|
||||
testApp := h.GivenAppInDb(rp.ValidApp())
|
||||
testFn := h.GivenFnInDb(rp.ValidFn(testApp.ID))
|
||||
testTrigger := h.GivenTriggerInDb(rp.ValidTrigger(testApp.ID, testFn.ID))
|
||||
trigger, err := ds.GetTriggerBySource(ctx, testApp.ID, testTrigger.Type, testTrigger.Source)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Expecting trigger, got error %s", err)
|
||||
}
|
||||
|
||||
if !trigger.Equals(testTrigger) {
|
||||
t.Errorf("Expecting trigger %#v got %#v", testTrigger, trigger)
|
||||
}
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func RunAllTests(t *testing.T, dsf DataStoreFunc, rp ResourceProvider) {
|
||||
buf := setLogBuffer()
|
||||
defer func() {
|
||||
@@ -1544,5 +1596,6 @@ func RunAllTests(t *testing.T, dsf DataStoreFunc, rp ResourceProvider) {
|
||||
RunRoutesTest(t, dsf, rp)
|
||||
RunFnsTest(t, dsf, rp)
|
||||
RunTriggersTest(t, dsf, rp)
|
||||
RunTriggerBySourceTests(t, dsf, rp)
|
||||
|
||||
}
|
||||
|
||||
@@ -16,6 +16,12 @@ type metricds struct {
|
||||
ds models.Datastore
|
||||
}
|
||||
|
||||
func (m *metricds) GetTriggerBySource(ctx context.Context, appId string, triggerType, source string) (*models.Trigger, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ds_get_trigger_by_source")
|
||||
defer span.End()
|
||||
return m.ds.GetTriggerBySource(ctx, appId, triggerType, source)
|
||||
}
|
||||
|
||||
func (m *metricds) GetAppID(ctx context.Context, appName string) (string, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ds_get_app_id")
|
||||
defer span.End()
|
||||
|
||||
@@ -28,6 +28,18 @@ func NewMock() models.Datastore {
|
||||
return NewMockInit()
|
||||
}
|
||||
|
||||
var _ models.Datastore = &mock{}
|
||||
|
||||
func (m *mock) GetTriggerBySource(ctx context.Context, appId string, triggerType, source string) (*models.Trigger, error) {
|
||||
for _, t := range m.Triggers {
|
||||
if t.AppID == appId && t.Type == triggerType && t.Source == source {
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, models.ErrTriggerNotFound
|
||||
}
|
||||
|
||||
// args helps break tests less if we change stuff
|
||||
func NewMockInit(args ...interface{}) models.Datastore {
|
||||
var mocker mock
|
||||
@@ -416,6 +428,12 @@ func (m *mock) InsertTrigger(ctx context.Context, trigger *models.Trigger) (*mod
|
||||
t.Name == trigger.Name) {
|
||||
return nil, models.ErrTriggerExists
|
||||
}
|
||||
|
||||
if t.AppID == trigger.AppID &&
|
||||
t.Source == trigger.Source &&
|
||||
t.Type == trigger.Type {
|
||||
return nil, models.ErrTriggerSourceExists
|
||||
}
|
||||
}
|
||||
|
||||
cl := trigger.Clone()
|
||||
|
||||
@@ -127,6 +127,8 @@ const (
|
||||
triggerSelector = `SELECT id,name,app_id,fn_id,type,source,annotations,created_at,updated_at FROM triggers`
|
||||
triggerIDSelector = triggerSelector + ` WHERE id=?`
|
||||
|
||||
triggerIDSourceSelector = triggerSelector + ` WHERE app_id=? AND type=? AND source=?`
|
||||
|
||||
EnvDBPingMaxRetries = "FN_DS_DB_PING_MAX_RETRIES"
|
||||
)
|
||||
|
||||
@@ -1185,6 +1187,8 @@ func (ds *SQLStore) InsertTrigger(ctx context.Context, newTrigger *models.Trigge
|
||||
if err := r.Scan(new(int)); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return models.ErrAppsNotFound
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1194,12 +1198,23 @@ func (ds *SQLStore) InsertTrigger(ctx context.Context, newTrigger *models.Trigge
|
||||
if err := r.Scan(&app_id); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return models.ErrFnsNotFound
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if app_id != trigger.AppID {
|
||||
return models.ErrTriggerFnIDNotSameApp
|
||||
}
|
||||
|
||||
query = tx.Rebind(`SELECT 1 FROM triggers WHERE app_id=? AND type=? and source=?`)
|
||||
r = tx.QueryRowContext(ctx, query, trigger.AppID, trigger.Type, trigger.Source)
|
||||
err := r.Scan(new(int))
|
||||
if err == nil {
|
||||
return models.ErrTriggerSourceExists
|
||||
} else if err != sql.ErrNoRows {
|
||||
return err
|
||||
}
|
||||
|
||||
query = tx.Rebind(`INSERT INTO triggers (
|
||||
id,
|
||||
name,
|
||||
@@ -1318,8 +1333,7 @@ func (ds *SQLStore) GetTriggerByID(ctx context.Context, triggerID string) (*mode
|
||||
err := row.StructScan(&trigger)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, models.ErrTriggerNotFound
|
||||
}
|
||||
if err != nil {
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1403,10 +1417,26 @@ func (ds *SQLStore) GetTriggers(ctx context.Context, filter *models.TriggerFilte
|
||||
if err == sql.ErrNoRows {
|
||||
return res, nil // no error for empty list
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (ds *SQLStore) GetTriggerBySource(ctx context.Context, appId string, triggerType, source string) (*models.Trigger, error) {
|
||||
var trigger models.Trigger
|
||||
|
||||
query := ds.db.Rebind(triggerIDSourceSelector)
|
||||
row := ds.db.QueryRowxContext(ctx, query, appId, triggerType, source)
|
||||
|
||||
err := row.StructScan(&trigger)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, models.ErrTriggerNotFound
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &trigger, nil
|
||||
}
|
||||
|
||||
// Close closes the database, releasing any open resources.
|
||||
func (ds *SQLStore) Close() error {
|
||||
return ds.db.Close()
|
||||
|
||||
@@ -149,6 +149,12 @@ type Call struct {
|
||||
|
||||
// App this call belongs to.
|
||||
AppID string `json:"app_id" db:"app_id"`
|
||||
|
||||
// Trigger this call belongs to.
|
||||
TriggerID string `json:"trigger_id" db:"trigger_id"`
|
||||
|
||||
// Fn this call belongs to.
|
||||
FnID string `json:"fn_id" db:"fn_id"`
|
||||
}
|
||||
|
||||
type CallFilter struct {
|
||||
|
||||
@@ -93,6 +93,9 @@ type Datastore interface {
|
||||
// Return ErrDatastoreEmptyAppId if no AppID set in the filter
|
||||
GetTriggers(ctx context.Context, filter *TriggerFilter) (*TriggerList, error)
|
||||
|
||||
// GetTriggerBySource loads a trigger by type and source ID - this is only needed when the data store is also used for agent read access
|
||||
GetTriggerBySource(ctx context.Context, appId string, triggerType, source string) (*Trigger, error)
|
||||
|
||||
// implements io.Closer to shutdown
|
||||
io.Closer
|
||||
}
|
||||
|
||||
@@ -198,6 +198,11 @@ var (
|
||||
code: http.StatusBadRequest,
|
||||
error: fmt.Errorf("Invalid annotation change, new key(s) exceed maximum permitted number of annotations keys (%d)", maxAnnotationsKeys),
|
||||
}
|
||||
|
||||
ErrAsyncUnsupported = err{
|
||||
code: http.StatusBadRequest,
|
||||
error: errors.New("Async functions are not supported on this server"),
|
||||
}
|
||||
)
|
||||
|
||||
// APIError any error that implements this interface will return an API response
|
||||
|
||||
@@ -10,9 +10,10 @@ import (
|
||||
"github.com/fnproject/fn/api/common"
|
||||
)
|
||||
|
||||
// For want of a better place to put this it's here
|
||||
// TriggerHTTPEndpointAnnotation is the annotation that exposes the HTTP trigger endpoint For want of a better place to put this it's here
|
||||
const TriggerHTTPEndpointAnnotation = "fnproject.io/trigger/httpEndpoint"
|
||||
|
||||
// Trigger represents a binding between a Function and an external event source
|
||||
type Trigger struct {
|
||||
ID string `json:"id" db:"id"`
|
||||
Name string `json:"name" db:"name"`
|
||||
@@ -25,6 +26,7 @@ type Trigger struct {
|
||||
Annotations Annotations `json:"annotations,omitempty" db:"annotations"`
|
||||
}
|
||||
|
||||
// Equals compares two triggers for semantic equality it ignores timestamp fields but includes annotations
|
||||
func (t *Trigger) Equals(t2 *Trigger) bool {
|
||||
eq := true
|
||||
eq = eq && t.ID == t2.ID
|
||||
@@ -39,6 +41,7 @@ func (t *Trigger) Equals(t2 *Trigger) bool {
|
||||
return eq
|
||||
}
|
||||
|
||||
// EqualsWithAnnotationSubset is equivalent to Equals except it accepts cases where t's annotations are strict subset of t2
|
||||
func (t *Trigger) EqualsWithAnnotationSubset(t2 *Trigger) bool {
|
||||
eq := true
|
||||
eq = eq && t.ID == t2.ID
|
||||
@@ -53,14 +56,17 @@ func (t *Trigger) EqualsWithAnnotationSubset(t2 *Trigger) bool {
|
||||
return eq
|
||||
}
|
||||
|
||||
//TriggerTypeHTTP represents an HTTP trigger
|
||||
const TriggerTypeHTTP = "http"
|
||||
|
||||
var triggerTypes = []string{TriggerTypeHTTP}
|
||||
|
||||
//ValidTriggerTypes lists the supported trigger types in this service
|
||||
func ValidTriggerTypes() []string {
|
||||
return triggerTypes
|
||||
}
|
||||
|
||||
//ValidTriggerType checks that a given trigger type is valid on this service
|
||||
func ValidTriggerType(a string) bool {
|
||||
for _, b := range triggerTypes {
|
||||
if b == a {
|
||||
@@ -71,46 +77,63 @@ func ValidTriggerType(a string) bool {
|
||||
}
|
||||
|
||||
var (
|
||||
//ErrTriggerIDProvided indicates that a trigger ID was specified when it shouldn't have been
|
||||
ErrTriggerIDProvided = err{
|
||||
code: http.StatusBadRequest,
|
||||
error: errors.New("ID cannot be provided for Trigger creation"),
|
||||
}
|
||||
//ErrTriggerIDMismatch indicates an ID was provided that did not match the ID of the corresponding operation/call
|
||||
ErrTriggerIDMismatch = err{
|
||||
code: http.StatusBadRequest,
|
||||
error: errors.New("ID in path does not match ID in body"),
|
||||
}
|
||||
//ErrTriggerMissingName - name not specified on a trigger object
|
||||
ErrTriggerMissingName = err{
|
||||
code: http.StatusBadRequest,
|
||||
error: errors.New("Missing name on Trigger")}
|
||||
//ErrTriggerTooLongName - name exceeds maximum permitted name
|
||||
ErrTriggerTooLongName = err{
|
||||
code: http.StatusBadRequest,
|
||||
error: fmt.Errorf("Trigger name must be %v characters or less", MaxTriggerName)}
|
||||
//ErrTriggerInvalidName - name does not comply with naming spec
|
||||
ErrTriggerInvalidName = err{
|
||||
code: http.StatusBadRequest,
|
||||
error: errors.New("Invalid name for Trigger")}
|
||||
//ErrTriggerMissingAppID - no API id specified on trigger creation
|
||||
ErrTriggerMissingAppID = err{
|
||||
code: http.StatusBadRequest,
|
||||
error: errors.New("Missing App ID on Trigger")}
|
||||
//ErrTriggerMissingFnID - no FNID specified on trigger creation
|
||||
ErrTriggerMissingFnID = err{
|
||||
code: http.StatusBadRequest,
|
||||
error: errors.New("Missing Fn ID on Trigger")}
|
||||
//ErrTriggerFnIDNotSameApp - specified Fn does not belong to the same app as the provided AppID
|
||||
ErrTriggerFnIDNotSameApp = err{
|
||||
code: http.StatusBadRequest,
|
||||
error: errors.New("Invalid Fn ID - not owned by specified app")}
|
||||
//ErrTriggerTypeUnknown - unsupported trigger type
|
||||
ErrTriggerTypeUnknown = err{
|
||||
code: http.StatusBadRequest,
|
||||
error: errors.New("Trigger Type Not Supported")}
|
||||
//ErrTriggerMissingSource - no source spceified for trigger
|
||||
ErrTriggerMissingSource = err{
|
||||
code: http.StatusBadRequest,
|
||||
error: errors.New("Missing Trigger Source")}
|
||||
//ErrTriggerNotFound - trigger not found
|
||||
ErrTriggerNotFound = err{
|
||||
code: http.StatusNotFound,
|
||||
error: errors.New("Trigger not found")}
|
||||
//ErrTriggerExists - a trigger with the specified name already exists
|
||||
ErrTriggerExists = err{
|
||||
code: http.StatusConflict,
|
||||
error: errors.New("Trigger already exists")}
|
||||
//ErrTriggerSourceExists - another trigger on the same app has the same source and type
|
||||
ErrTriggerSourceExists = err{
|
||||
code: http.StatusConflict,
|
||||
error: errors.New("Trigger with the same type and source exists on this app")}
|
||||
)
|
||||
|
||||
//Validate checks that trigger has valid data for inserting into a store
|
||||
func (t *Trigger) Validate() error {
|
||||
if t.Name == "" {
|
||||
return ErrTriggerMissingName
|
||||
@@ -149,20 +172,15 @@ func (t *Trigger) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clone creates a deep copy of a trigger
|
||||
func (t *Trigger) Clone() *Trigger {
|
||||
clone := new(Trigger)
|
||||
*clone = *t // shallow copy
|
||||
|
||||
if t.Annotations != nil {
|
||||
clone.Annotations = make(Annotations, len(t.Annotations))
|
||||
for k, v := range t.Annotations {
|
||||
// TODO technically, we need to deep copy the bytes
|
||||
clone.Annotations[k] = v
|
||||
}
|
||||
}
|
||||
// annotations are immutable via their interface so can be shallow copied
|
||||
return clone
|
||||
}
|
||||
|
||||
// Update applies a change to a trigger
|
||||
func (t *Trigger) Update(patch *Trigger) {
|
||||
|
||||
original := t.Clone()
|
||||
@@ -189,15 +207,20 @@ func (t *Trigger) Update(patch *Trigger) {
|
||||
}
|
||||
}
|
||||
|
||||
//TriggerFilter is a search criteria on triggers
|
||||
type TriggerFilter struct {
|
||||
AppID string // this is exact match
|
||||
//AppID searches for triggers in APP - mandatory
|
||||
AppID string // this is exact match mandatory
|
||||
//FNID searches for triggers belonging to a specific function
|
||||
FnID string // this is exact match
|
||||
//Name is the name of the trigger
|
||||
Name string // exact match
|
||||
|
||||
Cursor string
|
||||
PerPage int
|
||||
}
|
||||
|
||||
//TriggerList is a container of triggers returned by search, optionally indicating the next page cursor
|
||||
type TriggerList struct {
|
||||
NextCursor string `json:"next_cursor,omitempty"`
|
||||
Items []*Trigger `json:"items"`
|
||||
|
||||
@@ -5,17 +5,17 @@ import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var openEmptyJson = `{"id":"","name":"","app_id":"","fn_id":"","created_at":"0001-01-01T00:00:00.000Z","updated_at":"0001-01-01T00:00:00.000Z","type":"","source":""`
|
||||
var openEmptyJSON = `{"id":"","name":"","app_id":"","fn_id":"","created_at":"0001-01-01T00:00:00.000Z","updated_at":"0001-01-01T00:00:00.000Z","type":"","source":""`
|
||||
|
||||
var triggerJsonCases = []struct {
|
||||
var triggerJSONCases = []struct {
|
||||
val *Trigger
|
||||
valString string
|
||||
}{
|
||||
{val: &Trigger{}, valString: openEmptyJson + "}"},
|
||||
{val: &Trigger{}, valString: openEmptyJSON + "}"},
|
||||
}
|
||||
|
||||
func TestTriggerJsonMarshalling(t *testing.T) {
|
||||
for _, tc := range triggerJsonCases {
|
||||
for _, tc := range triggerJSONCases {
|
||||
v, err := json.Marshal(tc.val)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to marshal json into %s: %v", tc.valString, err)
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
)
|
||||
|
||||
// TODO: Deprecate with V1 API
|
||||
func (s *Server) handleV1AppGetByName(c *gin.Context) {
|
||||
func (s *Server) handleV1AppGetByIdOrName(c *gin.Context) {
|
||||
ctx := c.Request.Context()
|
||||
|
||||
param := c.MustGet(api.AppID).(string)
|
||||
|
||||
@@ -186,13 +186,13 @@ func AppFromContext(ctx context.Context) string {
|
||||
return r
|
||||
}
|
||||
|
||||
func (s *Server) checkAppPresenceByNameAtRunner() gin.HandlerFunc {
|
||||
func (s *Server) checkAppPresenceByNameAtLB() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
ctx, _ := common.LoggerWithFields(c.Request.Context(), extractFields(c))
|
||||
|
||||
appName := c.Param(api.ParamAppName)
|
||||
if appName != "" {
|
||||
appID, err := s.agent.GetAppID(ctx, appName)
|
||||
appID, err := s.lbReadAccess.GetAppID(ctx, appName)
|
||||
if err != nil {
|
||||
handleV1ErrorResponse(c, err)
|
||||
c.Abort()
|
||||
|
||||
@@ -5,9 +5,14 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/fnproject/fn/api"
|
||||
"github.com/fnproject/fn/api/common"
|
||||
"github.com/fnproject/fn/api/models"
|
||||
"github.com/gin-gonic/gin"
|
||||
"net/http"
|
||||
"path"
|
||||
)
|
||||
|
||||
func (s *Server) handleRunnerEnqueue(c *gin.Context) {
|
||||
@@ -18,9 +23,9 @@ func (s *Server) handleRunnerEnqueue(c *gin.Context) {
|
||||
err := c.BindJSON(&call)
|
||||
if err != nil {
|
||||
if models.IsAPIError(err) {
|
||||
handleV1ErrorResponse(c, err)
|
||||
handleErrorResponse(c, err)
|
||||
} else {
|
||||
handleV1ErrorResponse(c, models.ErrInvalidJSON)
|
||||
handleErrorResponse(c, models.ErrInvalidJSON)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -39,7 +44,7 @@ func (s *Server) handleRunnerEnqueue(c *gin.Context) {
|
||||
call.Status = "queued"
|
||||
_, err = s.mq.Push(ctx, &call)
|
||||
if err != nil {
|
||||
handleV1ErrorResponse(c, err)
|
||||
handleErrorResponse(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -50,9 +55,7 @@ func (s *Server) handleRunnerEnqueue(c *gin.Context) {
|
||||
// will ensure the call exists in the db in 'running' state there.
|
||||
// s.datastore.InsertCall(ctx, &call)
|
||||
|
||||
c.JSON(200, struct {
|
||||
M string `json:"msg"`
|
||||
}{M: "enqueued call"})
|
||||
c.String(http.StatusNoContent, "")
|
||||
}
|
||||
|
||||
func (s *Server) handleRunnerDequeue(c *gin.Context) {
|
||||
@@ -70,7 +73,7 @@ func (s *Server) handleRunnerDequeue(c *gin.Context) {
|
||||
for {
|
||||
call, err := s.mq.Reserve(ctx)
|
||||
if err != nil {
|
||||
handleV1ErrorResponse(c, err)
|
||||
handleErrorResponse(c, err)
|
||||
return
|
||||
}
|
||||
if call != nil {
|
||||
@@ -97,9 +100,9 @@ func (s *Server) handleRunnerStart(c *gin.Context) {
|
||||
err := c.BindJSON(&call)
|
||||
if err != nil {
|
||||
if models.IsAPIError(err) {
|
||||
handleV1ErrorResponse(c, err)
|
||||
handleErrorResponse(c, err)
|
||||
} else {
|
||||
handleV1ErrorResponse(c, models.ErrInvalidJSON)
|
||||
handleErrorResponse(c, models.ErrInvalidJSON)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -129,7 +132,7 @@ func (s *Server) handleRunnerStart(c *gin.Context) {
|
||||
// TODO change this to only delete message if the status change fails b/c it already ran
|
||||
// after messaging semantics change
|
||||
if err := s.mq.Delete(ctx, &call); err != nil { // TODO change this to take some string(s), not a whole call
|
||||
handleV1ErrorResponse(c, err)
|
||||
handleErrorResponse(c, err)
|
||||
return
|
||||
}
|
||||
//}
|
||||
@@ -137,9 +140,7 @@ func (s *Server) handleRunnerStart(c *gin.Context) {
|
||||
//return
|
||||
//}
|
||||
|
||||
c.JSON(200, struct {
|
||||
M string `json:"msg"`
|
||||
}{M: "slingshot: engage"})
|
||||
c.String(http.StatusNoContent, "")
|
||||
}
|
||||
|
||||
func (s *Server) handleRunnerFinish(c *gin.Context) {
|
||||
@@ -152,9 +153,9 @@ func (s *Server) handleRunnerFinish(c *gin.Context) {
|
||||
err := c.BindJSON(&body)
|
||||
if err != nil {
|
||||
if models.IsAPIError(err) {
|
||||
handleV1ErrorResponse(c, err)
|
||||
handleErrorResponse(c, err)
|
||||
} else {
|
||||
handleV1ErrorResponse(c, models.ErrInvalidJSON)
|
||||
handleErrorResponse(c, models.ErrInvalidJSON)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -184,7 +185,49 @@ func (s *Server) handleRunnerFinish(c *gin.Context) {
|
||||
//// note: Not returning err here since the job could have already finished successfully.
|
||||
//}
|
||||
|
||||
c.JSON(200, struct {
|
||||
M string `json:"msg"`
|
||||
}{M: "good night, sweet prince"})
|
||||
c.String(http.StatusNoContent, "")
|
||||
}
|
||||
|
||||
// This is a sort of interim route that is V2 API style but due for deprectation
|
||||
func (s *Server) handleRunnerGetRoute(c *gin.Context) {
|
||||
ctx := c.Request.Context()
|
||||
|
||||
routePath := path.Clean("/" + c.MustGet(api.Path).(string))
|
||||
route, err := s.datastore.GetRoute(ctx, c.MustGet(api.AppID).(string), routePath)
|
||||
if err != nil {
|
||||
handleErrorResponse(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, route)
|
||||
}
|
||||
|
||||
func (s *Server) handleRunnerGetTriggerBySource(c *gin.Context) {
|
||||
ctx := c.Request.Context()
|
||||
|
||||
appId := c.MustGet(api.AppID).(string)
|
||||
|
||||
triggerType := c.Param(api.ParamTriggerType)
|
||||
if triggerType == "" {
|
||||
handleErrorResponse(c, errors.New("no trigger type in request"))
|
||||
return
|
||||
}
|
||||
triggerSource := strings.TrimPrefix(c.Param(api.ParamTriggerSource), "/")
|
||||
|
||||
trigger, err := s.datastore.GetTriggerBySource(ctx, appId, triggerType, triggerSource)
|
||||
|
||||
if err != nil {
|
||||
handleErrorResponse(c, err)
|
||||
return
|
||||
}
|
||||
// Not clear that we really need to annotate the trigger here but ... lets do it just in case.
|
||||
app, err := s.datastore.GetAppByID(ctx, trigger.AppID)
|
||||
|
||||
if err != nil {
|
||||
handleErrorResponse(c, fmt.Errorf("unexpected error - trigger app not available: %s", err))
|
||||
}
|
||||
|
||||
s.triggerAnnotator.AnnotateTrigger(c, app, trigger)
|
||||
|
||||
c.JSON(http.StatusOK, trigger)
|
||||
}
|
||||
|
||||
69
api/server/hybrid_test.go
Normal file
69
api/server/hybrid_test.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"github.com/fnproject/fn/api/datastore"
|
||||
"github.com/fnproject/fn/api/id"
|
||||
"github.com/fnproject/fn/api/logs"
|
||||
"github.com/fnproject/fn/api/models"
|
||||
"github.com/fnproject/fn/api/mqs"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHybridEndpoints(t *testing.T) {
|
||||
buf := setLogBuffer()
|
||||
app := &models.App{ID: "app_id", Name: "myapp"}
|
||||
ds := datastore.NewMockInit(
|
||||
[]*models.App{app},
|
||||
[]*models.Route{{
|
||||
AppID: app.ID,
|
||||
Path: "yodawg",
|
||||
}},
|
||||
)
|
||||
|
||||
logDB := logs.NewMock()
|
||||
|
||||
srv := testServer(ds, &mqs.Mock{}, logDB, nil /* TODO */, ServerTypeAPI)
|
||||
|
||||
newCallBody := func() string {
|
||||
call := &models.Call{
|
||||
AppID: app.ID,
|
||||
ID: id.New().String(),
|
||||
Path: "yodawg",
|
||||
// TODO ?
|
||||
}
|
||||
var b bytes.Buffer
|
||||
json.NewEncoder(&b).Encode(&call)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
for _, test := range []struct {
|
||||
name string
|
||||
method string
|
||||
path string
|
||||
body string
|
||||
expectedCode int
|
||||
}{
|
||||
// TODO change all these tests to just do an async task in normal order once plumbing is done
|
||||
|
||||
{"post async call", "PUT", "/v2/runner/async", newCallBody(), http.StatusNoContent},
|
||||
|
||||
// TODO this one only works if it's not the same as the first since update isn't hooked up
|
||||
{"finish call", "POST", "/v2/runner/finish", newCallBody(), http.StatusNoContent},
|
||||
|
||||
// TODO these won't work until update works and the agent gets shut off
|
||||
//{"get async call", "GET", "/v1/runner/async", "", http.StatusOK},
|
||||
//{"start call", "POST", "/v1/runner/start", "TODO", http.StatusOK},
|
||||
} {
|
||||
_, rec := routerRequest(t, srv.Router, test.method, test.path, strings.NewReader(test.body))
|
||||
|
||||
if rec.Code != test.expectedCode {
|
||||
t.Log(buf.String())
|
||||
t.Errorf("Test \"%s\": Expected status code to be %d but was %d",
|
||||
test.name, test.expectedCode, rec.Code)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -43,7 +43,7 @@ func (c *middlewareController) CallFunction(w http.ResponseWriter, r *http.Reque
|
||||
c.ginContext.Set(api.AppID, appID)
|
||||
}
|
||||
|
||||
c.server.handleFunctionCall(c.ginContext)
|
||||
c.server.handleV1FunctionCall(c.ginContext)
|
||||
c.ginContext.Abort()
|
||||
}
|
||||
func (c *middlewareController) FunctionCalled() bool {
|
||||
|
||||
@@ -1,18 +1,17 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"path"
|
||||
|
||||
"github.com/fnproject/fn/api"
|
||||
"github.com/gin-gonic/gin"
|
||||
"net/http"
|
||||
"path"
|
||||
)
|
||||
|
||||
func routeGet(s *Server, appID string, c *gin.Context) {
|
||||
func (s *Server) handleRouteGetAPI(c *gin.Context) {
|
||||
ctx := c.Request.Context()
|
||||
|
||||
routePath := path.Clean("/" + c.MustGet(api.Path).(string))
|
||||
route, err := s.datastore.GetRoute(ctx, appID, routePath)
|
||||
route, err := s.datastore.GetRoute(ctx, c.MustGet(api.AppID).(string), routePath)
|
||||
if err != nil {
|
||||
handleV1ErrorResponse(c, err)
|
||||
return
|
||||
@@ -20,11 +19,3 @@ func routeGet(s *Server, appID string, c *gin.Context) {
|
||||
|
||||
c.JSON(http.StatusOK, routeResponse{"Successfully loaded route", route})
|
||||
}
|
||||
|
||||
func (s *Server) handleRouteGetAPI(c *gin.Context) {
|
||||
routeGet(s, c.MustGet(api.AppID).(string), c)
|
||||
}
|
||||
|
||||
func (s *Server) handleRouteGetRunner(c *gin.Context) {
|
||||
routeGet(s, c.MustGet(api.AppID).(string), c)
|
||||
}
|
||||
|
||||
@@ -17,8 +17,8 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// handleFunctionCall executes the function, for router handlers
|
||||
func (s *Server) handleFunctionCall(c *gin.Context) {
|
||||
// handleV1FunctionCall executes the function, for router handlers
|
||||
func (s *Server) handleV1FunctionCall(c *gin.Context) {
|
||||
err := s.handleFunctionCall2(c)
|
||||
if err != nil {
|
||||
handleV1ErrorResponse(c, err)
|
||||
@@ -40,15 +40,20 @@ func (s *Server) handleFunctionCall2(c *gin.Context) error {
|
||||
}
|
||||
|
||||
appID := c.MustGet(api.AppID).(string)
|
||||
app, err := s.agent.GetAppByID(ctx, appID)
|
||||
app, err := s.lbReadAccess.GetAppByID(ctx, appID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
routePath := path.Clean(p)
|
||||
route, err := s.lbReadAccess.GetRoute(ctx, appID, routePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// gin sets this to 404 on NoRoute, so we'll just ensure it's 200 by default.
|
||||
c.Status(200) // this doesn't write the header yet
|
||||
|
||||
return s.serve(c, app, path.Clean(p))
|
||||
return s.serve(c, app, route)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -57,7 +62,7 @@ var (
|
||||
|
||||
// TODO it would be nice if we could make this have nothing to do with the gin.Context but meh
|
||||
// TODO make async store an *http.Request? would be sexy until we have different api format...
|
||||
func (s *Server) serve(c *gin.Context, app *models.App, path string) error {
|
||||
func (s *Server) serve(c *gin.Context, app *models.App, route *models.Route) error {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
writer := syncResponseWriter{
|
||||
@@ -75,7 +80,7 @@ func (s *Server) serve(c *gin.Context, app *models.App, path string) error {
|
||||
|
||||
call, err := s.agent.GetCall(
|
||||
agent.WithWriter(&writer), // XXX (reed): order matters [for now]
|
||||
agent.FromRequest(s.agent, app, path, c.Request),
|
||||
agent.FromRequest(app, route, c.Request),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -97,8 +102,7 @@ func (s *Server) serve(c *gin.Context, app *models.App, path string) error {
|
||||
}
|
||||
model.Payload = buf.String()
|
||||
|
||||
// TODO idk where to put this, but agent is all runner really has...
|
||||
err = s.agent.Enqueue(c.Request.Context(), model)
|
||||
err = s.lbEnqueue.Enqueue(c.Request.Context(), model)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -21,6 +21,8 @@ func testRouterAsync(ds models.Datastore, mq models.MessageQueue, rnr agent.Agen
|
||||
Router: engine,
|
||||
AdminRouter: engine,
|
||||
datastore: ds,
|
||||
lbReadAccess: ds,
|
||||
lbEnqueue: agent.NewDirectEnqueueAccess(mq),
|
||||
mq: mq,
|
||||
nodeType: ServerTypeFull,
|
||||
}
|
||||
|
||||
152
api/server/runner_httptrigger.go
Normal file
152
api/server/runner_httptrigger.go
Normal file
@@ -0,0 +1,152 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/fnproject/fn/api"
|
||||
"github.com/fnproject/fn/api/agent"
|
||||
"github.com/fnproject/fn/api/common"
|
||||
"github.com/fnproject/fn/api/models"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// handleHTTPTriggerCall executes the function, for router handlers
|
||||
func (s *Server) handleHTTPTriggerCall(c *gin.Context) {
|
||||
err := s.handleTriggerHTTPFunctionCall2(c)
|
||||
if err != nil {
|
||||
handleErrorResponse(c, err)
|
||||
}
|
||||
}
|
||||
|
||||
// handleTriggerHTTPFunctionCall2 executes the function and returns an error
|
||||
// Requires the following in the context:
|
||||
func (s *Server) handleTriggerHTTPFunctionCall2(c *gin.Context) error {
|
||||
ctx := c.Request.Context()
|
||||
p := c.Param(api.ParamTriggerSource)
|
||||
if p == "" {
|
||||
p = "/"
|
||||
}
|
||||
|
||||
appName := c.Param(api.ParamAppName)
|
||||
|
||||
appID, err := s.lbReadAccess.GetAppID(ctx, appName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
app, err := s.lbReadAccess.GetAppByID(ctx, appID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
routePath := p
|
||||
|
||||
trigger, err := s.lbReadAccess.GetTriggerBySource(ctx, appID, "http", routePath)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fn, err := s.lbReadAccess.GetFnByID(ctx, trigger.FnID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// gin sets this to 404 on NoRoute, so we'll just ensure it's 200 by default.
|
||||
c.Status(200) // this doesn't write the header yet
|
||||
|
||||
return s.ServeHTTPTrigger(c, app, fn, trigger)
|
||||
}
|
||||
|
||||
//ServeHTTPTrigger serves an HTTP trigger for a given app/fn/trigger based on the current request
|
||||
// This is exported to allow extensions to handle their own trigger naming and publishing
|
||||
func (s *Server) ServeHTTPTrigger(c *gin.Context, app *models.App, fn *models.Fn, trigger *models.Trigger) error {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
writer := syncResponseWriter{
|
||||
Buffer: buf,
|
||||
headers: c.Writer.Header(), // copy ref
|
||||
}
|
||||
defer bufPool.Put(buf) // TODO need to ensure this is safe with Dispatch?
|
||||
|
||||
// GetCall can mod headers, assign an id, look up the route/app (cached),
|
||||
// strip params, etc.
|
||||
// this should happen ASAP to turn app name to app ID
|
||||
|
||||
// GetCall can mod headers, assign an id, look up the route/app (cached),
|
||||
// strip params, etc.
|
||||
|
||||
call, err := s.agent.GetCall(
|
||||
agent.WithWriter(&writer), // XXX (reed): order matters [for now]
|
||||
agent.FromHTTPTriggerRequest(app, fn, trigger, c.Request),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
model := call.Model()
|
||||
{ // scope this, to disallow ctx use outside of this scope. add id for handleV1ErrorResponse logger
|
||||
ctx, _ := common.LoggerWithFields(c.Request.Context(), logrus.Fields{"id": model.ID})
|
||||
c.Request = c.Request.WithContext(ctx)
|
||||
}
|
||||
|
||||
// TODO TRIGGERWIP not clear this makes sense here - but it works so...
|
||||
if model.Type == "async" {
|
||||
|
||||
// TODO we should push this into GetCall somehow (CallOpt maybe) or maybe agent.Queue(Call) ?
|
||||
if c.Request.ContentLength > 0 {
|
||||
buf.Grow(int(c.Request.ContentLength))
|
||||
}
|
||||
_, err := buf.ReadFrom(c.Request.Body)
|
||||
if err != nil {
|
||||
return models.ErrInvalidPayload
|
||||
}
|
||||
model.Payload = buf.String()
|
||||
|
||||
err = s.lbEnqueue.Enqueue(c.Request.Context(), model)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.JSON(http.StatusAccepted, map[string]string{"call_id": model.ID})
|
||||
return nil
|
||||
}
|
||||
|
||||
err = s.agent.Submit(call)
|
||||
if err != nil {
|
||||
// NOTE if they cancel the request then it will stop the call (kind of cool),
|
||||
// we could filter that error out here too as right now it yells a little
|
||||
if err == models.ErrCallTimeoutServerBusy || err == models.ErrCallTimeout {
|
||||
// TODO maneuver
|
||||
// add this, since it means that start may not have been called [and it's relevant]
|
||||
c.Writer.Header().Add("XXX-FXLB-WAIT", time.Now().Sub(time.Time(model.CreatedAt)).String())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// if they don't set a content-type - detect it
|
||||
if writer.Header().Get("Content-Type") == "" {
|
||||
// see http.DetectContentType, the go server is supposed to do this for us but doesn't appear to?
|
||||
var contentType string
|
||||
jsonPrefix := [1]byte{'{'} // stack allocated
|
||||
if bytes.HasPrefix(buf.Bytes(), jsonPrefix[:]) {
|
||||
// try to detect json, since DetectContentType isn't a hipster.
|
||||
contentType = "application/json; charset=utf-8"
|
||||
} else {
|
||||
contentType = http.DetectContentType(buf.Bytes())
|
||||
}
|
||||
writer.Header().Set("Content-Type", contentType)
|
||||
}
|
||||
|
||||
writer.Header().Set("Content-Length", strconv.Itoa(int(buf.Len())))
|
||||
|
||||
if writer.status > 0 {
|
||||
c.Writer.WriteHeader(writer.status)
|
||||
}
|
||||
io.Copy(c.Writer, &writer)
|
||||
|
||||
return nil
|
||||
}
|
||||
622
api/server/runner_httptrigger_test.go
Normal file
622
api/server/runner_httptrigger_test.go
Normal file
@@ -0,0 +1,622 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/fnproject/fn/api/agent"
|
||||
"github.com/fnproject/fn/api/datastore"
|
||||
"github.com/fnproject/fn/api/logs"
|
||||
"github.com/fnproject/fn/api/models"
|
||||
"github.com/fnproject/fn/api/mqs"
|
||||
"os"
|
||||
)
|
||||
|
||||
func envTweaker(name, value string) func() {
|
||||
bck, ok := os.LookupEnv(name)
|
||||
|
||||
err := os.Setenv(name, value)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
return func() {
|
||||
var err error
|
||||
if !ok {
|
||||
err = os.Unsetenv(name)
|
||||
} else {
|
||||
err = os.Setenv(name, bck)
|
||||
}
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testRunner(_ *testing.T, args ...interface{}) (agent.Agent, context.CancelFunc) {
|
||||
ls := logs.NewMock()
|
||||
var mq models.MessageQueue = &mqs.Mock{}
|
||||
for _, a := range args {
|
||||
switch arg := a.(type) {
|
||||
case models.MessageQueue:
|
||||
mq = arg
|
||||
case models.LogStore:
|
||||
ls = arg
|
||||
}
|
||||
}
|
||||
r := agent.New(agent.NewDirectCallDataAccess(ls, mq))
|
||||
return r, func() { r.Close() }
|
||||
}
|
||||
|
||||
func checkLogs(t *testing.T, tnum int, ds models.LogStore, callID string, expected []string) bool {
|
||||
|
||||
logReader, err := ds.GetLog(context.Background(), "myapp", callID)
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: GetLog for call_id:%s returned err %s",
|
||||
tnum, callID, err.Error())
|
||||
return false
|
||||
}
|
||||
|
||||
logBytes, err := ioutil.ReadAll(logReader)
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: GetLog read IO call_id:%s returned err %s",
|
||||
tnum, callID, err.Error())
|
||||
return false
|
||||
}
|
||||
|
||||
logBody := string(logBytes)
|
||||
maxLog := len(logBody)
|
||||
if maxLog > 1024 {
|
||||
maxLog = 1024
|
||||
}
|
||||
|
||||
for _, match := range expected {
|
||||
if !strings.Contains(logBody, match) {
|
||||
t.Errorf("Test %d: GetLog read IO call_id:%s cannot find: %s in logs: %s",
|
||||
tnum, callID, match, logBody[:maxLog])
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// implement models.MQ and models.APIError
|
||||
type errorMQ struct {
|
||||
error
|
||||
code int
|
||||
}
|
||||
|
||||
func (mock *errorMQ) Push(context.Context, *models.Call) (*models.Call, error) { return nil, mock }
|
||||
func (mock *errorMQ) Reserve(context.Context) (*models.Call, error) { return nil, mock }
|
||||
func (mock *errorMQ) Delete(context.Context, *models.Call) error { return mock }
|
||||
func (mock *errorMQ) Code() int { return mock.code }
|
||||
func (mock *errorMQ) Close() error { return nil }
|
||||
func TestFailedEnqueue(t *testing.T) {
|
||||
buf := setLogBuffer()
|
||||
app := &models.App{ID: "app_id", Name: "myapp", Config: models.Config{}}
|
||||
ds := datastore.NewMockInit(
|
||||
[]*models.App{app},
|
||||
[]*models.Route{
|
||||
{Path: "/dummy", Image: "dummy/dummy", Type: "async", Memory: 128, Timeout: 30, IdleTimeout: 30, AppID: app.ID},
|
||||
},
|
||||
)
|
||||
err := errors.New("Unable to push task to queue")
|
||||
mq := &errorMQ{err, http.StatusInternalServerError}
|
||||
fnl := logs.NewMock()
|
||||
rnr, cancelrnr := testRunner(t, ds, mq, fnl)
|
||||
defer cancelrnr()
|
||||
|
||||
srv := testServer(ds, mq, fnl, rnr, ServerTypeFull)
|
||||
for i, test := range []struct {
|
||||
path string
|
||||
body string
|
||||
method string
|
||||
expectedCode int
|
||||
expectedHeaders map[string][]string
|
||||
}{
|
||||
{"/r/myapp/dummy", ``, "POST", http.StatusInternalServerError, nil},
|
||||
} {
|
||||
body := strings.NewReader(test.body)
|
||||
_, rec := routerRequest(t, srv.Router, test.method, test.path, body)
|
||||
if rec.Code != test.expectedCode {
|
||||
t.Log(buf.String())
|
||||
t.Errorf("Test %d: Expected status code to be %d but was %d",
|
||||
i, test.expectedCode, rec.Code)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTriggerRunnerGet(t *testing.T) {
|
||||
buf := setLogBuffer()
|
||||
app := &models.App{ID: "app_id", Name: "myapp", Config: models.Config{}}
|
||||
ds := datastore.NewMockInit(
|
||||
[]*models.App{app},
|
||||
)
|
||||
|
||||
rnr, cancel := testRunner(t, ds)
|
||||
defer cancel()
|
||||
logDB := logs.NewMock()
|
||||
srv := testServer(ds, &mqs.Mock{}, logDB, rnr, ServerTypeFull)
|
||||
|
||||
for i, test := range []struct {
|
||||
path string
|
||||
body string
|
||||
expectedCode int
|
||||
expectedError error
|
||||
}{
|
||||
{"/t/app/route", "", http.StatusNotFound, models.ErrAppsNotFound},
|
||||
{"/t/myapp/route", "", http.StatusNotFound, models.ErrTriggerNotFound},
|
||||
} {
|
||||
_, rec := routerRequest(t, srv.Router, "GET", test.path, nil)
|
||||
|
||||
if rec.Code != test.expectedCode {
|
||||
t.Log(buf.String())
|
||||
t.Fatalf("Test %d: Expected status code for path %s to be %d but was %d",
|
||||
i, test.path, test.expectedCode, rec.Code)
|
||||
}
|
||||
|
||||
if test.expectedError != nil {
|
||||
resp := getErrorResponse(t, rec)
|
||||
|
||||
if !strings.Contains(resp.Message, test.expectedError.Error()) {
|
||||
t.Log(buf.String())
|
||||
t.Errorf("Test %d: Expected error message to have `%s`, but got `%s`",
|
||||
i, test.expectedError.Error(), resp.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTriggerRunnerPost(t *testing.T) {
|
||||
buf := setLogBuffer()
|
||||
|
||||
app := &models.App{ID: "app_id", Name: "myapp", Config: models.Config{}}
|
||||
ds := datastore.NewMockInit(
|
||||
[]*models.App{app},
|
||||
)
|
||||
|
||||
rnr, cancel := testRunner(t, ds)
|
||||
defer cancel()
|
||||
|
||||
fnl := logs.NewMock()
|
||||
srv := testServer(ds, &mqs.Mock{}, fnl, rnr, ServerTypeFull)
|
||||
|
||||
for i, test := range []struct {
|
||||
path string
|
||||
body string
|
||||
expectedCode int
|
||||
expectedError error
|
||||
}{
|
||||
{"/t/app/route", `{ "payload": "" }`, http.StatusNotFound, models.ErrAppsNotFound},
|
||||
{"/t/myapp/route", `{ "payload": "" }`, http.StatusNotFound, models.ErrTriggerNotFound},
|
||||
} {
|
||||
body := bytes.NewBuffer([]byte(test.body))
|
||||
_, rec := routerRequest(t, srv.Router, "POST", test.path, body)
|
||||
|
||||
if rec.Code != test.expectedCode {
|
||||
t.Log(buf.String())
|
||||
t.Errorf("Test %d: Expected status code for path %s to be %d but was %d",
|
||||
i, test.path, test.expectedCode, rec.Code)
|
||||
}
|
||||
|
||||
if test.expectedError != nil {
|
||||
resp := getErrorResponse(t, rec)
|
||||
respMsg := resp.Message
|
||||
expMsg := test.expectedError.Error()
|
||||
if respMsg != expMsg && !strings.Contains(respMsg, expMsg) {
|
||||
t.Log(buf.String())
|
||||
t.Errorf("Test %d: Expected error message to have `%s`",
|
||||
i, test.expectedError.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTriggerRunnerExecEmptyBody(t *testing.T) {
|
||||
buf := setLogBuffer()
|
||||
isFailure := false
|
||||
|
||||
defer func() {
|
||||
if isFailure {
|
||||
t.Log(buf.String())
|
||||
}
|
||||
}()
|
||||
|
||||
rCfg := map[string]string{"ENABLE_HEADER": "yes", "ENABLE_FOOTER": "yes"} // enable container start/end header/footer
|
||||
rImg := "fnproject/fn-test-utils"
|
||||
|
||||
app := &models.App{ID: "app_id", Name: "soup"}
|
||||
|
||||
f1 := &models.Fn{ID: "cold", Name: "cold", AppID: app.ID, Image: rImg, ResourceConfig: models.ResourceConfig{Memory: 64, Timeout: 10, IdleTimeout: 20}, Config: rCfg}
|
||||
f2 := &models.Fn{ID: "hothttp", Name: "hothttp", AppID: app.ID, Image: rImg, Format: "http", ResourceConfig: models.ResourceConfig{Memory: 64, Timeout: 10, IdleTimeout: 20}, Config: rCfg}
|
||||
f3 := &models.Fn{ID: "hotjson", Name: "hotjson", AppID: app.ID, Image: rImg, Format: "json", ResourceConfig: models.ResourceConfig{Memory: 64, Timeout: 10, IdleTimeout: 20}, Config: rCfg}
|
||||
ds := datastore.NewMockInit(
|
||||
[]*models.App{app},
|
||||
[]*models.Fn{f1, f2, f3},
|
||||
[]*models.Trigger{
|
||||
{ID: "t1", Name: "t1", AppID: app.ID, FnID: f1.ID, Type: "http", Source: "/cold"},
|
||||
{ID: "t2", Name: "t2", AppID: app.ID, FnID: f1.ID, Type: "http", Source: "/hothttp"},
|
||||
{ID: "t3", Name: "t3", AppID: app.ID, FnID: f1.ID, Type: "http", Source: "/hotjson"},
|
||||
},
|
||||
)
|
||||
ls := logs.NewMock()
|
||||
|
||||
rnr, cancelrnr := testRunner(t, ds, ls)
|
||||
defer cancelrnr()
|
||||
|
||||
srv := testServer(ds, &mqs.Mock{}, ls, rnr, ServerTypeFull)
|
||||
|
||||
emptyBody := `{"echoContent": "_TRX_ID_", "isDebug": true, "isEmptyBody": true}`
|
||||
|
||||
// Test hot cases twice to rule out hot-containers corrupting next request.
|
||||
testCases := []struct {
|
||||
path string
|
||||
}{
|
||||
{"/t/soup/cold"},
|
||||
{"/t/soup/hothttp"},
|
||||
{"/t/soup/hothttp"},
|
||||
{"/t/soup/hotjson"},
|
||||
{"/t/soup/hotjson"},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
t.Run(fmt.Sprintf("%d_%s", i, strings.Replace(test.path, "/", "_", -1)), func(t *testing.T) {
|
||||
trx := fmt.Sprintf("_trx_%d_", i)
|
||||
body := strings.NewReader(strings.Replace(emptyBody, "_TRX_ID_", trx, 1))
|
||||
_, rec := routerRequest(t, srv.Router, "GET", test.path, body)
|
||||
respBytes, _ := ioutil.ReadAll(rec.Body)
|
||||
respBody := string(respBytes)
|
||||
maxBody := len(respBody)
|
||||
if maxBody > 1024 {
|
||||
maxBody = 1024
|
||||
}
|
||||
|
||||
if rec.Code != http.StatusOK {
|
||||
isFailure = true
|
||||
t.Errorf("Test %d: Expected status code to be %d but was %d. body: %s",
|
||||
i, http.StatusOK, rec.Code, respBody[:maxBody])
|
||||
} else if len(respBytes) != 0 {
|
||||
isFailure = true
|
||||
t.Errorf("Test %d: Expected empty body but got %d. body: %s",
|
||||
i, len(respBytes), respBody[:maxBody])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTriggerRunnerExecution(t *testing.T) {
|
||||
buf := setLogBuffer()
|
||||
isFailure := false
|
||||
tweaker := envTweaker("FN_MAX_RESPONSE_SIZE", "2048")
|
||||
defer tweaker()
|
||||
|
||||
// Log once after we are done, flow of events are important (hot/cold containers, idle timeout, etc.)
|
||||
// for figuring out why things failed.
|
||||
defer func() {
|
||||
if isFailure {
|
||||
t.Log(buf.String())
|
||||
}
|
||||
}()
|
||||
|
||||
rCfg := map[string]string{"ENABLE_HEADER": "yes", "ENABLE_FOOTER": "yes"} // enable container start/end header/footer
|
||||
rImg := "fnproject/fn-test-utils"
|
||||
rImgBs1 := "fnproject/imagethatdoesnotexist"
|
||||
rImgBs2 := "localhost:5050/fnproject/imagethatdoesnotexist"
|
||||
|
||||
app := &models.App{ID: "app_id", Name: "myapp"}
|
||||
|
||||
defaultDneFn := &models.Fn{ID: "default_dne_fn_id", Name: "default_dne_fn", AppID: app.ID, Image: rImgBs1, Format: "", ResourceConfig: models.ResourceConfig{Memory: 64, Timeout: 30, IdleTimeout: 30}, Config: rCfg}
|
||||
defaultFn := &models.Fn{ID: "default_fn_id", Name: "default_fn", AppID: app.ID, Image: rImg, Format: "", ResourceConfig: models.ResourceConfig{Memory: 64, Timeout: 30, IdleTimeout: 30}, Config: rCfg}
|
||||
httpFn := &models.Fn{ID: "http_fn_id", Name: "http_fn", AppID: app.ID, Image: rImg, Format: "http", ResourceConfig: models.ResourceConfig{Memory: 64, Timeout: 30, IdleTimeout: 30}, Config: rCfg}
|
||||
httpDneFn := &models.Fn{ID: "http_dne_fn_id", Name: "http_dne_fn", AppID: app.ID, Image: rImgBs1, Format: "http", ResourceConfig: models.ResourceConfig{Memory: 64, Timeout: 30, IdleTimeout: 30}, Config: rCfg}
|
||||
httpDneRegistryFn := &models.Fn{ID: "http_dnereg_fn_id", Name: "http_dnereg_fn", AppID: app.ID, Image: rImgBs2, Format: "http", ResourceConfig: models.ResourceConfig{Memory: 64, Timeout: 30, IdleTimeout: 30}, Config: rCfg}
|
||||
jsonFn := &models.Fn{ID: "json_fn_id", Name: "json_fn", AppID: app.ID, Image: rImg, Format: "json", ResourceConfig: models.ResourceConfig{Memory: 64, Timeout: 30, IdleTimeout: 30}, Config: rCfg}
|
||||
oomFn := &models.Fn{ID: "http_fn_id", Name: "http_fn", AppID: app.ID, Image: rImg, Format: "http", ResourceConfig: models.ResourceConfig{Memory: 8, Timeout: 30, IdleTimeout: 30}, Config: rCfg}
|
||||
|
||||
ds := datastore.NewMockInit(
|
||||
[]*models.App{app},
|
||||
[]*models.Fn{defaultFn, defaultDneFn, httpDneRegistryFn, oomFn, httpFn, jsonFn, httpDneFn},
|
||||
[]*models.Trigger{
|
||||
{ID: "1", Name: "1", Source: "/", Type: "http", AppID: app.ID, FnID: defaultFn.ID},
|
||||
{ID: "2", Name: "2", Source: "/myhot", Type: "http", AppID: app.ID, FnID: httpFn.ID},
|
||||
{ID: "3", Name: "3", Source: "/myhotjason", Type: "http", AppID: app.ID, FnID: jsonFn.ID},
|
||||
{ID: "4", Name: "4", Source: "/myroute", Type: "http", AppID: app.ID, FnID: defaultFn.ID},
|
||||
{ID: "5", Name: "5", Source: "/myerror", Type: "http", AppID: app.ID, FnID: defaultFn.ID},
|
||||
{ID: "6", Name: "6", Source: "/mydne", Type: "http", AppID: app.ID, FnID: defaultDneFn.ID},
|
||||
{ID: "7", Name: "7", Source: "/mydnehot", Type: "http", AppID: app.ID, FnID: httpDneFn.ID},
|
||||
{ID: "8", Name: "8", Source: "/mydneregistry", Type: "http", AppID: app.ID, FnID: httpDneRegistryFn.ID},
|
||||
{ID: "9", Name: "9", Source: "/myoom", Type: "http", AppID: app.ID, FnID: oomFn.ID},
|
||||
{ID: "10", Name: "10", Source: "/mybigoutputcold", Type: "http", AppID: app.ID, FnID: defaultFn.ID},
|
||||
{ID: "11", Name: "11", Source: "/mybigoutputhttp", Type: "http", AppID: app.ID, FnID: httpFn.ID},
|
||||
{ID: "12", Name: "12", Source: "/mybigoutputjson", Type: "http", AppID: app.ID, FnID: jsonFn.ID},
|
||||
},
|
||||
)
|
||||
ls := logs.NewMock()
|
||||
|
||||
rnr, cancelrnr := testRunner(t, ds, ls)
|
||||
defer cancelrnr()
|
||||
|
||||
srv := testServer(ds, &mqs.Mock{}, ls, rnr, ServerTypeFull)
|
||||
|
||||
expHeaders := map[string][]string{"Content-Type": {"application/json; charset=utf-8"}}
|
||||
expCTHeaders := map[string][]string{"Content-Type": {"foo/bar"}}
|
||||
|
||||
// Checking for EndOfLogs currently depends on scheduling of go-routines (in docker/containerd) that process stderr & stdout.
|
||||
// Therefore, not testing for EndOfLogs for hot containers (which has complex I/O processing) anymore.
|
||||
multiLogExpectCold := []string{"BeginOfLogs", "EndOfLogs"}
|
||||
multiLogExpectHot := []string{"BeginOfLogs" /*, "EndOfLogs" */}
|
||||
|
||||
crasher := `{"echoContent": "_TRX_ID_", "isDebug": true, "isCrash": true}` // crash container
|
||||
oomer := `{"echoContent": "_TRX_ID_", "isDebug": true, "allocateMemory": 12000000}` // ask for 12MB
|
||||
badHot := `{"echoContent": "_TRX_ID_", "invalidResponse": true, "isDebug": true}` // write a not json/http as output
|
||||
ok := `{"echoContent": "_TRX_ID_", "isDebug": true}` // good response / ok
|
||||
respTypeLie := `{"echoContent": "_TRX_ID_", "responseContentType": "foo/bar", "isDebug": true}` // Content-Type: foo/bar
|
||||
respTypeJason := `{"echoContent": "_TRX_ID_", "jasonContentType": "foo/bar", "isDebug": true}` // Content-Type: foo/bar
|
||||
|
||||
// sleep between logs and with debug enabled, fn-test-utils will log header/footer below:
|
||||
multiLog := `{"echoContent": "_TRX_ID_", "sleepTime": 1000, "isDebug": true}`
|
||||
bigoutput := `{"echoContent": "_TRX_ID_", "isDebug": true, "trailerRepeat": 1000}` // 1000 trailers to exceed 2K
|
||||
smalloutput := `{"echoContent": "_TRX_ID_", "isDebug": true, "trailerRepeat": 1}` // 1 trailer < 2K
|
||||
|
||||
testCases := []struct {
|
||||
path string
|
||||
body string
|
||||
method string
|
||||
expectedCode int
|
||||
expectedHeaders map[string][]string
|
||||
expectedErrSubStr string
|
||||
expectedLogsSubStr []string
|
||||
}{
|
||||
{"/t/myapp/", ok, "GET", http.StatusOK, expHeaders, "", nil},
|
||||
|
||||
{"/t/myapp/myhot", badHot, "GET", http.StatusBadGateway, expHeaders, "invalid http response", nil},
|
||||
// hot container now back to normal:
|
||||
{"/t/myapp/myhot", ok, "GET", http.StatusOK, expHeaders, "", nil},
|
||||
|
||||
{"/t/myapp/myhotjason", badHot, "GET", http.StatusBadGateway, expHeaders, "invalid json response", nil},
|
||||
// hot container now back to normal:
|
||||
{"/t/myapp/myhotjason", ok, "GET", http.StatusOK, expHeaders, "", nil},
|
||||
|
||||
{"/t/myapp/myhot", respTypeLie, "GET", http.StatusOK, expCTHeaders, "", nil},
|
||||
{"/t/myapp/myhotjason", respTypeLie, "GET", http.StatusOK, expCTHeaders, "", nil},
|
||||
{"/t/myapp/myhotjason", respTypeJason, "GET", http.StatusOK, expCTHeaders, "", nil},
|
||||
|
||||
{"/t/myapp/myroute", ok, "GET", http.StatusOK, expHeaders, "", nil},
|
||||
{"/t/myapp/myerror", crasher, "GET", http.StatusBadGateway, expHeaders, "container exit code 2", nil},
|
||||
{"/t/myapp/mydne", ``, "GET", http.StatusNotFound, nil, "pull access denied", nil},
|
||||
{"/t/myapp/mydnehot", ``, "GET", http.StatusNotFound, nil, "pull access denied", nil},
|
||||
{"/t/myapp/mydneregistry", ``, "GET", http.StatusInternalServerError, nil, "connection refused", nil},
|
||||
{"/t/myapp/myoom", oomer, "GET", http.StatusBadGateway, nil, "container out of memory", nil},
|
||||
{"/t/myapp/myhot", multiLog, "GET", http.StatusOK, nil, "", multiLogExpectHot},
|
||||
{"/t/myapp/", multiLog, "GET", http.StatusOK, nil, "", multiLogExpectCold},
|
||||
{"/t/myapp/mybigoutputjson", bigoutput, "GET", http.StatusBadGateway, nil, "function response too large", nil},
|
||||
{"/t/myapp/mybigoutputjson", smalloutput, "GET", http.StatusOK, nil, "", nil},
|
||||
{"/t/myapp/mybigoutputhttp", bigoutput, "GET", http.StatusBadGateway, nil, "", nil},
|
||||
{"/t/myapp/mybigoutputhttp", smalloutput, "GET", http.StatusOK, nil, "", nil},
|
||||
{"/t/myapp/mybigoutputcold", bigoutput, "GET", http.StatusBadGateway, nil, "", nil},
|
||||
{"/t/myapp/mybigoutputcold", smalloutput, "GET", http.StatusOK, nil, "", nil},
|
||||
}
|
||||
|
||||
callIds := make([]string, len(testCases))
|
||||
|
||||
for i, test := range testCases {
|
||||
t.Run(fmt.Sprintf("Test_%d_%s", i, strings.Replace(test.path, "/", "_", -1)), func(t *testing.T) {
|
||||
trx := fmt.Sprintf("_trx_%d_", i)
|
||||
body := strings.NewReader(strings.Replace(test.body, "_TRX_ID_", trx, 1))
|
||||
_, rec := routerRequest(t, srv.Router, test.method, test.path, body)
|
||||
respBytes, _ := ioutil.ReadAll(rec.Body)
|
||||
respBody := string(respBytes)
|
||||
maxBody := len(respBody)
|
||||
if maxBody > 1024 {
|
||||
maxBody = 1024
|
||||
}
|
||||
|
||||
callIds[i] = rec.Header().Get("Fn_call_id")
|
||||
|
||||
if rec.Code != test.expectedCode {
|
||||
isFailure = true
|
||||
t.Errorf("Test %d: Expected status code to be %d but was %d. body: %s",
|
||||
i, test.expectedCode, rec.Code, respBody[:maxBody])
|
||||
}
|
||||
|
||||
if rec.Code == http.StatusOK && !strings.Contains(respBody, trx) {
|
||||
isFailure = true
|
||||
t.Errorf("Test %d: Expected response to include %s but got body: %s",
|
||||
i, trx, respBody[:maxBody])
|
||||
|
||||
}
|
||||
|
||||
if test.expectedErrSubStr != "" && !strings.Contains(respBody, test.expectedErrSubStr) {
|
||||
isFailure = true
|
||||
t.Errorf("Test %d: Expected response to include %s but got body: %s",
|
||||
i, test.expectedErrSubStr, respBody[:maxBody])
|
||||
|
||||
}
|
||||
|
||||
if test.expectedHeaders != nil {
|
||||
for name, header := range test.expectedHeaders {
|
||||
if header[0] != rec.Header().Get(name) {
|
||||
isFailure = true
|
||||
t.Errorf("Test %d: Expected header `%s` to be %s but was %s. body: %s",
|
||||
i, name, header[0], rec.Header().Get(name), respBody)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
if test.expectedLogsSubStr != nil {
|
||||
if !checkLogs(t, i, ls, callIds[i], test.expectedLogsSubStr) {
|
||||
isFailure = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTriggerRunnerTimeout(t *testing.T) {
|
||||
buf := setLogBuffer()
|
||||
isFailure := false
|
||||
|
||||
// Log once after we are done, flow of events are important (hot/cold containers, idle timeout, etc.)
|
||||
// for figuring out why things failed.
|
||||
defer func() {
|
||||
if isFailure {
|
||||
t.Log(buf.String())
|
||||
}
|
||||
}()
|
||||
|
||||
models.RouteMaxMemory = uint64(1024 * 1024 * 1024) // 1024 TB
|
||||
hugeMem := uint64(models.RouteMaxMemory - 1)
|
||||
|
||||
app := &models.App{ID: "app_id", Name: "myapp", Config: models.Config{}}
|
||||
coldFn := &models.Fn{ID: "cold", Name: "cold", AppID: app.ID, Format: "", Image: "fnproject/fn-test-utils", ResourceConfig: models.ResourceConfig{Memory: 128, Timeout: 4, IdleTimeout: 30}}
|
||||
httpFn := &models.Fn{ID: "cold", Name: "http", AppID: app.ID, Format: "http", Image: "fnproject/fn-test-utils", ResourceConfig: models.ResourceConfig{Memory: 128, Timeout: 4, IdleTimeout: 30}}
|
||||
jsonFn := &models.Fn{ID: "json", Name: "json", AppID: app.ID, Format: "json", Image: "fnproject/fn-test-utils", ResourceConfig: models.ResourceConfig{Memory: 128, Timeout: 4, IdleTimeout: 30}}
|
||||
bigMemColdFn := &models.Fn{ID: "bigmemcold", Name: "bigmemcold", AppID: app.ID, Format: "", Image: "fnproject/fn-test-utils", ResourceConfig: models.ResourceConfig{Memory: hugeMem, Timeout: 4, IdleTimeout: 30}}
|
||||
bigMemHotFn := &models.Fn{ID: "bigmemhot", Name: "bigmemhot", AppID: app.ID, Format: "http", Image: "fnproject/fn-test-utils", ResourceConfig: models.ResourceConfig{Memory: hugeMem, Timeout: 4, IdleTimeout: 30}}
|
||||
|
||||
ds := datastore.NewMockInit(
|
||||
[]*models.App{app},
|
||||
[]*models.Fn{coldFn, httpFn, jsonFn, bigMemColdFn, bigMemHotFn},
|
||||
[]*models.Trigger{
|
||||
{ID: "1", Name: "1", Source: "/cold", Type: "http", AppID: app.ID, FnID: coldFn.ID},
|
||||
{ID: "2", Name: "2", Source: "/hot", Type: "http", AppID: app.ID, FnID: httpFn.ID},
|
||||
{ID: "3", Name: "3", Source: "/hot-json", Type: "http", AppID: app.ID, FnID: jsonFn.ID},
|
||||
{ID: "4", Name: "4", Source: "/bigmem-cold", Type: "http", AppID: app.ID, FnID: bigMemColdFn.ID},
|
||||
{ID: "5", Name: "5", Source: "/bigmem-hot", Type: "http", AppID: app.ID, FnID: bigMemHotFn.ID},
|
||||
},
|
||||
)
|
||||
|
||||
fnl := logs.NewMock()
|
||||
rnr, cancelrnr := testRunner(t, ds, fnl)
|
||||
defer cancelrnr()
|
||||
|
||||
srv := testServer(ds, &mqs.Mock{}, fnl, rnr, ServerTypeFull)
|
||||
|
||||
for i, test := range []struct {
|
||||
path string
|
||||
body string
|
||||
method string
|
||||
expectedCode int
|
||||
expectedHeaders map[string][]string
|
||||
}{
|
||||
{"/t/myapp/cold", `{"echoContent": "_TRX_ID_", "sleepTime": 0, "isDebug": true}`, "POST", http.StatusOK, nil},
|
||||
{"/t/myapp/cold", `{"echoContent": "_TRX_ID_", "sleepTime": 5000, "isDebug": true}`, "POST", http.StatusGatewayTimeout, nil},
|
||||
{"/t/myapp/hot", `{"echoContent": "_TRX_ID_", "sleepTime": 5000, "isDebug": true}`, "POST", http.StatusGatewayTimeout, nil},
|
||||
{"/t/myapp/hot", `{"echoContent": "_TRX_ID_", "sleepTime": 0, "isDebug": true}`, "POST", http.StatusOK, nil},
|
||||
{"/t/myapp/hot-json", `{"echoContent": "_TRX_ID_", "sleepTime": 5000, "isDebug": true}`, "POST", http.StatusGatewayTimeout, nil},
|
||||
{"/t/myapp/hot-json", `{"echoContent": "_TRX_ID_", "sleepTime": 0, "isDebug": true}`, "POST", http.StatusOK, nil},
|
||||
{"/t/myapp/bigmem-cold", `{"echoContent": "_TRX_ID_", "sleepTime": 0, "isDebug": true}`, "POST", http.StatusServiceUnavailable, map[string][]string{"Retry-After": {"15"}}},
|
||||
{"/t/myapp/bigmem-hot", `{"echoContent": "_TRX_ID_", "sleepTime": 0, "isDebug": true}`, "POST", http.StatusServiceUnavailable, map[string][]string{"Retry-After": {"15"}}},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("%d_%s", i, strings.Replace(test.path, "/", "_", -1)), func(t *testing.T) {
|
||||
trx := fmt.Sprintf("_trx_%d_", i)
|
||||
body := strings.NewReader(strings.Replace(test.body, "_TRX_ID_", trx, 1))
|
||||
_, rec := routerRequest(t, srv.Router, test.method, test.path, body)
|
||||
respBytes, _ := ioutil.ReadAll(rec.Body)
|
||||
respBody := string(respBytes)
|
||||
maxBody := len(respBody)
|
||||
if maxBody > 1024 {
|
||||
maxBody = 1024
|
||||
}
|
||||
|
||||
if rec.Code != test.expectedCode {
|
||||
isFailure = true
|
||||
t.Errorf("Test %d: Expected status code to be %d but was %d body: %#v",
|
||||
i, test.expectedCode, rec.Code, respBody[:maxBody])
|
||||
}
|
||||
|
||||
if rec.Code == http.StatusOK && !strings.Contains(respBody, trx) {
|
||||
isFailure = true
|
||||
t.Errorf("Test %d: Expected response to include %s but got body: %s",
|
||||
i, trx, respBody[:maxBody])
|
||||
|
||||
}
|
||||
|
||||
if test.expectedHeaders != nil {
|
||||
for name, header := range test.expectedHeaders {
|
||||
if header[0] != rec.Header().Get(name) {
|
||||
isFailure = true
|
||||
t.Errorf("Test %d: Expected header `%s` to be %s but was %s body: %#v",
|
||||
i, name, header[0], rec.Header().Get(name), respBody[:maxBody])
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Minimal test that checks the possibility of invoking concurrent hot sync functions.
|
||||
func TestTriggerRunnerMinimalConcurrentHotSync(t *testing.T) {
|
||||
buf := setLogBuffer()
|
||||
|
||||
app := &models.App{ID: "app_id", Name: "myapp", Config: models.Config{}}
|
||||
fn := &models.Fn{ID: "fn_id", AppID: app.ID, Name: "myfn", Image: "fnproject/fn-test-utils", Format: "http", ResourceConfig: models.ResourceConfig{Memory: 128, Timeout: 30, IdleTimeout: 5}}
|
||||
ds := datastore.NewMockInit(
|
||||
[]*models.App{app},
|
||||
[]*models.Fn{fn},
|
||||
[]*models.Trigger{{Name: "1", Source: "/hot", AppID: app.ID, FnID: fn.ID, Type: "http"}},
|
||||
)
|
||||
|
||||
fnl := logs.NewMock()
|
||||
rnr, cancelrnr := testRunner(t, ds, fnl)
|
||||
defer cancelrnr()
|
||||
|
||||
srv := testServer(ds, &mqs.Mock{}, fnl, rnr, ServerTypeFull)
|
||||
|
||||
for i, test := range []struct {
|
||||
path string
|
||||
body string
|
||||
method string
|
||||
expectedCode int
|
||||
expectedHeaders map[string][]string
|
||||
}{
|
||||
{"/t/myapp/hot", `{"sleepTime": 100, "isDebug": true}`, "POST", http.StatusOK, nil},
|
||||
} {
|
||||
errs := make(chan error)
|
||||
numCalls := 4
|
||||
for k := 0; k < numCalls; k++ {
|
||||
go func() {
|
||||
body := strings.NewReader(test.body)
|
||||
_, rec := routerRequest(t, srv.Router, test.method, test.path, body)
|
||||
|
||||
if rec.Code != test.expectedCode {
|
||||
t.Log(buf.String())
|
||||
errs <- fmt.Errorf("Test %d: Expected status code to be %d but was %d body: %#v",
|
||||
i, test.expectedCode, rec.Code, rec.Body.String())
|
||||
return
|
||||
}
|
||||
|
||||
if test.expectedHeaders == nil {
|
||||
errs <- nil
|
||||
return
|
||||
}
|
||||
for name, header := range test.expectedHeaders {
|
||||
if header[0] != rec.Header().Get(name) {
|
||||
t.Log(buf.String())
|
||||
errs <- fmt.Errorf("Test %d: Expected header `%s` to be %s but was %s body: %#v",
|
||||
i, name, header[0], rec.Header().Get(name), rec.Body.String())
|
||||
return
|
||||
}
|
||||
}
|
||||
errs <- nil
|
||||
}()
|
||||
}
|
||||
for k := 0; k < numCalls; k++ {
|
||||
err := <-errs
|
||||
if err != nil {
|
||||
t.Errorf("%v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,60 +2,19 @@ package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/fnproject/fn/api/agent"
|
||||
"github.com/fnproject/fn/api/datastore"
|
||||
"github.com/fnproject/fn/api/logs"
|
||||
"github.com/fnproject/fn/api/models"
|
||||
"github.com/fnproject/fn/api/mqs"
|
||||
)
|
||||
|
||||
func envTweaker(name, value string) func() {
|
||||
bck, ok := os.LookupEnv(name)
|
||||
|
||||
err := os.Setenv(name, value)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
return func() {
|
||||
var err error
|
||||
if !ok {
|
||||
err = os.Unsetenv(name)
|
||||
} else {
|
||||
err = os.Setenv(name, bck)
|
||||
}
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testRunner(_ *testing.T, args ...interface{}) (agent.Agent, context.CancelFunc) {
|
||||
ds := datastore.NewMock()
|
||||
ls := logs.NewMock()
|
||||
var mq models.MessageQueue = &mqs.Mock{}
|
||||
for _, a := range args {
|
||||
switch arg := a.(type) {
|
||||
case models.Datastore:
|
||||
ds = arg
|
||||
case models.MessageQueue:
|
||||
mq = arg
|
||||
case models.LogStore:
|
||||
ls = arg
|
||||
}
|
||||
}
|
||||
r := agent.New(agent.NewDirectDataAccess(ds, ls, mq))
|
||||
return r, func() { r.Close() }
|
||||
}
|
||||
// TODO Deprecate with Routes
|
||||
|
||||
func TestRouteRunnerGet(t *testing.T) {
|
||||
buf := setLogBuffer()
|
||||
@@ -219,6 +178,7 @@ func TestRouteRunnerExecEmptyBody(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRouteRunnerExecution(t *testing.T) {
|
||||
buf := setLogBuffer()
|
||||
isFailure := false
|
||||
@@ -380,85 +340,6 @@ func TestRouteRunnerExecution(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func checkLogs(t *testing.T, tnum int, ds models.LogStore, callID string, expected []string) bool {
|
||||
|
||||
logReader, err := ds.GetLog(context.Background(), "myapp", callID)
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: GetLog for call_id:%s returned err %s",
|
||||
tnum, callID, err.Error())
|
||||
return false
|
||||
}
|
||||
|
||||
logBytes, err := ioutil.ReadAll(logReader)
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: GetLog read IO call_id:%s returned err %s",
|
||||
tnum, callID, err.Error())
|
||||
return false
|
||||
}
|
||||
|
||||
logBody := string(logBytes)
|
||||
maxLog := len(logBody)
|
||||
if maxLog > 1024 {
|
||||
maxLog = 1024
|
||||
}
|
||||
|
||||
for _, match := range expected {
|
||||
if !strings.Contains(logBody, match) {
|
||||
t.Errorf("Test %d: GetLog read IO call_id:%s cannot find: %s in logs: %s",
|
||||
tnum, callID, match, logBody[:maxLog])
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// implement models.MQ and models.APIError
|
||||
type errorMQ struct {
|
||||
error
|
||||
code int
|
||||
}
|
||||
|
||||
func (mock *errorMQ) Push(context.Context, *models.Call) (*models.Call, error) { return nil, mock }
|
||||
func (mock *errorMQ) Reserve(context.Context) (*models.Call, error) { return nil, mock }
|
||||
func (mock *errorMQ) Delete(context.Context, *models.Call) error { return mock }
|
||||
func (mock *errorMQ) Code() int { return mock.code }
|
||||
func (mock *errorMQ) Close() error { return nil }
|
||||
func TestFailedEnqueue(t *testing.T) {
|
||||
buf := setLogBuffer()
|
||||
app := &models.App{ID: "app_id", Name: "myapp", Config: models.Config{}}
|
||||
ds := datastore.NewMockInit(
|
||||
[]*models.App{app},
|
||||
[]*models.Route{
|
||||
{Path: "/dummy", Image: "dummy/dummy", Type: "async", Memory: 128, Timeout: 30, IdleTimeout: 30, AppID: app.ID},
|
||||
},
|
||||
)
|
||||
err := errors.New("Unable to push task to queue")
|
||||
mq := &errorMQ{err, http.StatusInternalServerError}
|
||||
fnl := logs.NewMock()
|
||||
rnr, cancelrnr := testRunner(t, ds, mq, fnl)
|
||||
defer cancelrnr()
|
||||
|
||||
srv := testServer(ds, mq, fnl, rnr, ServerTypeFull)
|
||||
for i, test := range []struct {
|
||||
path string
|
||||
body string
|
||||
method string
|
||||
expectedCode int
|
||||
expectedHeaders map[string][]string
|
||||
}{
|
||||
{"/r/myapp/dummy", ``, "POST", http.StatusInternalServerError, nil},
|
||||
} {
|
||||
body := strings.NewReader(test.body)
|
||||
_, rec := routerRequest(t, srv.Router, test.method, test.path, body)
|
||||
if rec.Code != test.expectedCode {
|
||||
t.Log(buf.String())
|
||||
t.Errorf("Test %d: Expected status code to be %d but was %d",
|
||||
i, test.expectedCode, rec.Code)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRouteRunnerTimeout(t *testing.T) {
|
||||
buf := setLogBuffer()
|
||||
isFailure := false
|
||||
|
||||
@@ -157,7 +157,7 @@ const (
|
||||
|
||||
func (s NodeType) String() string {
|
||||
switch s {
|
||||
default:
|
||||
case ServerTypeFull:
|
||||
return "full"
|
||||
case ServerTypeAPI:
|
||||
return "api"
|
||||
@@ -167,6 +167,8 @@ func (s NodeType) String() string {
|
||||
return "runner"
|
||||
case ServerTypePureRunner:
|
||||
return "pure-runner"
|
||||
default:
|
||||
return fmt.Sprintf("unknown(%d)", s)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -184,6 +186,10 @@ type Server struct {
|
||||
mq models.MessageQueue
|
||||
logstore models.LogStore
|
||||
nodeType NodeType
|
||||
// Agent enqueue and read stores
|
||||
lbEnqueue agent.EnqueueDataAccess
|
||||
lbReadAccess agent.ReadDataAccess
|
||||
noHTTTPTriggerEndpoint bool
|
||||
cert string
|
||||
certKey string
|
||||
certAuthority string
|
||||
@@ -244,10 +250,10 @@ func NewFromEnv(ctx context.Context, opts ...Option) *Server {
|
||||
opts = append(opts, WithNodeCertKey(getEnv(EnvCertKey, "")))
|
||||
opts = append(opts, WithNodeCertAuthority(getEnv(EnvCertAuth, "")))
|
||||
|
||||
publicLbUrl := getEnv(EnvPublicLoadBalancerURL, "")
|
||||
if publicLbUrl != "" {
|
||||
logrus.Infof("using LB Base URL: '%s'", publicLbUrl)
|
||||
opts = append(opts, WithTriggerAnnotator(NewStaticURLTriggerAnnotator(publicLbUrl)))
|
||||
publicLBURL := getEnv(EnvPublicLoadBalancerURL, "")
|
||||
if publicLBURL != "" {
|
||||
logrus.Infof("using LB Base URL: '%s'", publicLBURL)
|
||||
opts = append(opts, WithTriggerAnnotator(NewStaticURLTriggerAnnotator(publicLBURL)))
|
||||
} else {
|
||||
opts = append(opts, WithTriggerAnnotator(NewRequestBasedTriggerAnnotator()))
|
||||
}
|
||||
@@ -317,6 +323,7 @@ func WithDBURL(dbURL string) Option {
|
||||
return err
|
||||
}
|
||||
s.datastore = ds
|
||||
s.lbReadAccess = agent.NewCachedDataAccess(s.datastore)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -331,6 +338,7 @@ func WithMQURL(mqURL string) Option {
|
||||
return err
|
||||
}
|
||||
s.mq = mq
|
||||
s.lbEnqueue = agent.NewDirectEnqueueAccess(mq)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -353,12 +361,13 @@ func WithLogURL(logstoreURL string) Option {
|
||||
// WithRunnerURL maps EnvRunnerURL
|
||||
func WithRunnerURL(runnerURL string) Option {
|
||||
return func(ctx context.Context, s *Server) error {
|
||||
|
||||
if runnerURL != "" {
|
||||
cl, err := hybrid.NewClient(runnerURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.agent = agent.New(agent.NewCachedDataAccess(cl))
|
||||
s.lbReadAccess = agent.NewCachedDataAccess(cl)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -426,10 +435,21 @@ func WithNodeCertAuthority(ca string) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithReadDataAccess overrides the LB read DataAccess for a server
|
||||
func WithReadDataAccess(ds agent.ReadDataAccess) Option {
|
||||
return func(ctx context.Context, s *Server) error {
|
||||
s.lbReadAccess = ds
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDatastore allows directly setting a datastore
|
||||
func WithDatastore(ds models.Datastore) Option {
|
||||
return func(ctx context.Context, s *Server) error {
|
||||
s.datastore = ds
|
||||
if s.lbReadAccess == nil {
|
||||
s.lbReadAccess = agent.NewCachedDataAccess(ds)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -438,6 +458,7 @@ func WithDatastore(ds models.Datastore) Option {
|
||||
func WithMQ(mq models.MessageQueue) Option {
|
||||
return func(ctx context.Context, s *Server) error {
|
||||
s.mq = mq
|
||||
s.lbEnqueue = agent.NewDirectEnqueueAccess(mq)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -497,7 +518,9 @@ func WithFullAgent() Option {
|
||||
if s.datastore == nil || s.logstore == nil || s.mq == nil {
|
||||
return errors.New("full nodes must configure FN_DB_URL, FN_LOG_URL, FN_MQ_URL")
|
||||
}
|
||||
s.agent = agent.New(agent.NewCachedDataAccess(agent.NewDirectDataAccess(s.datastore, s.logstore, s.mq)))
|
||||
da := agent.NewDirectCallDataAccess(s.logstore, s.mq)
|
||||
dq := agent.NewDirectDequeueAccess(s.mq)
|
||||
s.agent = agent.New(da, agent.WithAsync(dq))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -518,7 +541,8 @@ func WithAgentFromEnv() Option {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.agent = agent.New(agent.NewCachedDataAccess(cl))
|
||||
|
||||
s.agent = agent.New(cl)
|
||||
case ServerTypePureRunner:
|
||||
if s.datastore != nil {
|
||||
return errors.New("pure runner nodes must not be configured with a datastore (FN_DB_URL)")
|
||||
@@ -574,7 +598,8 @@ func WithAgentFromEnv() Option {
|
||||
pool.RegisterPlacerViews(keys)
|
||||
agent.RegisterLBAgentViews(keys)
|
||||
|
||||
s.agent, err = agent.NewLBAgent(agent.NewCachedDataAccess(cl), runnerPool, placer)
|
||||
s.lbReadAccess = agent.NewCachedDataAccess(cl)
|
||||
s.agent, err = agent.NewLBAgent(cl, runnerPool, placer)
|
||||
if err != nil {
|
||||
return errors.New("LBAgent creation failed")
|
||||
}
|
||||
@@ -625,6 +650,7 @@ func New(ctx context.Context, opts ...Option) *Server {
|
||||
webListenPort: DefaultPort,
|
||||
adminListenPort: DefaultPort,
|
||||
grpcListenPort: DefaultGRPCPort,
|
||||
lbEnqueue: agent.NewUnsupportedAsyncEnqueueAccess(),
|
||||
// Almost everything else is configured through opts (see NewFromEnv for ex.) or below
|
||||
}
|
||||
|
||||
@@ -638,19 +664,47 @@ func New(ctx context.Context, opts ...Option) *Server {
|
||||
}
|
||||
}
|
||||
|
||||
requireConfigSet := func(id string, val interface{}) {
|
||||
if val == nil {
|
||||
log.Fatalf("Invalid configuration for server type %s, %s must be configured during startup", s.nodeType, id)
|
||||
}
|
||||
}
|
||||
requireConfigNotSet := func(id string, val interface{}) {
|
||||
if val != nil {
|
||||
log.Fatalf("Invalid configuration for server type %s, %s must not be configured during startup", s.nodeType, id)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that WithAgent options have been processed correctly.
|
||||
// Yuck the yuck - server should really be split into several interfaces (LB, Runner, API) and each should be instantiated separately
|
||||
switch s.nodeType {
|
||||
case ServerTypeAPI:
|
||||
if s.agent != nil {
|
||||
log.Fatal("Incorrect configuration, API nodes must not have an agent initialized.")
|
||||
}
|
||||
if s.triggerAnnotator == nil {
|
||||
log.Fatal("No trigger annotatator set ")
|
||||
}
|
||||
requireConfigNotSet("agent", s.agent)
|
||||
requireConfigSet("datastore", s.datastore)
|
||||
requireConfigSet("triggerAnnotator", s.triggerAnnotator)
|
||||
case ServerTypeFull:
|
||||
requireConfigSet("enqueue", s.lbEnqueue)
|
||||
requireConfigSet("agent", s.agent)
|
||||
requireConfigSet("lbReadAccess", s.lbReadAccess)
|
||||
requireConfigSet("datastore", s.datastore)
|
||||
requireConfigSet("triggerAnnotator", s.triggerAnnotator)
|
||||
|
||||
case ServerTypeLB:
|
||||
requireConfigSet("lbReadAccess", s.lbReadAccess)
|
||||
requireConfigSet("agent", s.agent)
|
||||
requireConfigSet("lbEnqueue", s.lbEnqueue)
|
||||
|
||||
case ServerTypeRunner:
|
||||
requireConfigSet("lbReadAccess", s.lbReadAccess)
|
||||
requireConfigSet("agent", s.agent)
|
||||
|
||||
case ServerTypePureRunner:
|
||||
requireConfigSet("agent", s.agent)
|
||||
|
||||
default:
|
||||
if s.agent == nil {
|
||||
log.Fatal("Incorrect configuration, non-API nodes must have an agent initialized.")
|
||||
}
|
||||
|
||||
log.Fatal("unknown server type %d", s.nodeType)
|
||||
|
||||
}
|
||||
|
||||
setMachineID()
|
||||
@@ -664,6 +718,7 @@ func New(ctx context.Context, opts ...Option) *Server {
|
||||
s.fnListeners = new(fnListeners)
|
||||
s.triggerListeners = new(triggerListeners)
|
||||
|
||||
// TODO it's not clear that this is always correct as the read store won't get wrapping
|
||||
s.datastore = datastore.Wrap(s.datastore)
|
||||
s.datastore = fnext.NewDatastore(s.datastore, s.appListeners, s.routeListeners, s.fnListeners, s.triggerListeners)
|
||||
s.logstore = logs.Wrap(s.logstore)
|
||||
@@ -702,6 +757,14 @@ func WithPrometheus() Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithoutHTTPTriggerEndpoints optionally disables the trigger and route endpoints from a LB -supporting server, allowing extensions to replace them with their own versions
|
||||
func WithoutHTTPTriggerEndpoints() Option {
|
||||
return func(ctx context.Context, s *Server) error {
|
||||
s.noHTTTPTriggerEndpoint = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithJaeger maps EnvJaegerURL
|
||||
func WithJaeger(jaegerURL string) Option {
|
||||
return func(ctx context.Context, s *Server) error {
|
||||
@@ -999,8 +1062,9 @@ func (s *Server) bindHandlers(ctx context.Context) {
|
||||
profilerSetup(admin, "/debug")
|
||||
|
||||
// Pure runners don't have any route, they have grpc
|
||||
if s.nodeType != ServerTypePureRunner {
|
||||
if s.nodeType != ServerTypeRunner {
|
||||
switch s.nodeType {
|
||||
|
||||
case ServerTypeFull, ServerTypeAPI:
|
||||
clean := engine.Group("/v1")
|
||||
v1 := clean.Group("")
|
||||
v1.Use(setAppNameInCtx)
|
||||
@@ -1016,7 +1080,7 @@ func (s *Server) bindHandlers(ctx context.Context) {
|
||||
withAppCheck := apps.Group("")
|
||||
withAppCheck.Use(s.checkAppPresenceByName())
|
||||
|
||||
withAppCheck.GET("", s.handleV1AppGetByName)
|
||||
withAppCheck.GET("", s.handleV1AppGetByIdOrName)
|
||||
withAppCheck.PATCH("", s.handleV1AppUpdate)
|
||||
withAppCheck.DELETE("", s.handleV1AppDelete)
|
||||
|
||||
@@ -1057,29 +1121,38 @@ func (s *Server) bindHandlers(ctx context.Context) {
|
||||
v2.DELETE("/triggers/:triggerID", s.handleTriggerDelete)
|
||||
}
|
||||
|
||||
{
|
||||
runner := clean.Group("/runner")
|
||||
{ // Hybrid API - this should only be enabled on API servers
|
||||
runner := cleanv2.Group("/runner")
|
||||
runner.PUT("/async", s.handleRunnerEnqueue)
|
||||
runner.GET("/async", s.handleRunnerDequeue)
|
||||
|
||||
runner.POST("/start", s.handleRunnerStart)
|
||||
runner.POST("/finish", s.handleRunnerFinish)
|
||||
|
||||
runnerAppApi := runner.Group(
|
||||
|
||||
runnerAppAPI := runner.Group(
|
||||
"/apps/:appID")
|
||||
runnerAppApi.Use(setAppIDInCtx)
|
||||
runnerAppApi.GET("", s.handleV1AppGetByName)
|
||||
runnerAppApi.GET("/routes/:route", s.handleRouteGetRunner)
|
||||
runnerAppAPI.Use(setAppIDInCtx)
|
||||
// Both of these are somewhat odd -
|
||||
// Deprecate, remove with routes
|
||||
runnerAppAPI.GET("/routes/*route", s.handleRunnerGetRoute)
|
||||
runnerAppAPI.GET("/triggerBySource/:triggerType/*triggerSource", s.handleRunnerGetTriggerBySource)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if s.nodeType != ServerTypeAPI {
|
||||
runner := engine.Group("/r")
|
||||
runner.Use(s.checkAppPresenceByNameAtRunner())
|
||||
runner.Any("/:appName", s.handleFunctionCall)
|
||||
runner.Any("/:appName/*route", s.handleFunctionCall)
|
||||
switch s.nodeType {
|
||||
case ServerTypeFull, ServerTypeLB, ServerTypeRunner:
|
||||
if !s.noHTTTPTriggerEndpoint {
|
||||
lbTriggerGroup := engine.Group("/t")
|
||||
lbTriggerGroup.Any("/:appName", s.handleHTTPTriggerCall)
|
||||
lbTriggerGroup.Any("/:appName/*triggerSource", s.handleHTTPTriggerCall)
|
||||
|
||||
// TODO Deprecate with routes
|
||||
lbRouteGroup := engine.Group("/r")
|
||||
lbRouteGroup.Use(s.checkAppPresenceByNameAtLB())
|
||||
lbRouteGroup.Any("/:appName", s.handleV1FunctionCall)
|
||||
lbRouteGroup.Any("/:appName/*route", s.handleV1FunctionCall)
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -10,14 +10,12 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/fnproject/fn/api/agent"
|
||||
"github.com/fnproject/fn/api/datastore"
|
||||
"github.com/fnproject/fn/api/datastore/sql"
|
||||
_ "github.com/fnproject/fn/api/datastore/sql/sqlite"
|
||||
"github.com/fnproject/fn/api/id"
|
||||
"github.com/fnproject/fn/api/logs"
|
||||
"github.com/fnproject/fn/api/models"
|
||||
"github.com/fnproject/fn/api/mqs"
|
||||
@@ -220,6 +218,7 @@ func TestRunnerNode(t *testing.T) {
|
||||
{"get all routes not found", "GET", "/v1/apps/myapp/routes", ``, http.StatusBadRequest, 0},
|
||||
{"delete app not found", "DELETE", "/v1/apps/myapp", ``, http.StatusBadRequest, 0},
|
||||
} {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
_, rec := routerRequest(t, srv.Router, test.method, test.path, bytes.NewBuffer([]byte(test.body)))
|
||||
|
||||
if rec.Code != test.expectedCode {
|
||||
@@ -227,6 +226,8 @@ func TestRunnerNode(t *testing.T) {
|
||||
t.Errorf("Test \"%s\": Expected status code to be %d but was %d",
|
||||
test.name, test.expectedCode, rec.Code)
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -278,58 +279,3 @@ func TestApiNode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHybridEndpoints(t *testing.T) {
|
||||
buf := setLogBuffer()
|
||||
app := &models.App{ID: "app_id", Name: "myapp"}
|
||||
ds := datastore.NewMockInit(
|
||||
[]*models.App{app},
|
||||
[]*models.Route{{
|
||||
AppID: app.ID,
|
||||
Path: "yodawg",
|
||||
}},
|
||||
)
|
||||
|
||||
logDB := logs.NewMock()
|
||||
|
||||
srv := testServer(ds, &mqs.Mock{}, logDB, nil /* TODO */, ServerTypeAPI)
|
||||
|
||||
newCallBody := func() string {
|
||||
call := &models.Call{
|
||||
AppID: app.ID,
|
||||
ID: id.New().String(),
|
||||
Path: "yodawg",
|
||||
// TODO ?
|
||||
}
|
||||
var b bytes.Buffer
|
||||
json.NewEncoder(&b).Encode(&call)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
for _, test := range []struct {
|
||||
name string
|
||||
method string
|
||||
path string
|
||||
body string
|
||||
expectedCode int
|
||||
}{
|
||||
// TODO change all these tests to just do an async task in normal order once plumbing is done
|
||||
|
||||
{"post async call", "PUT", "/v1/runner/async", newCallBody(), http.StatusOK},
|
||||
|
||||
// TODO this one only works if it's not the same as the first since update isn't hooked up
|
||||
{"finish call", "POST", "/v1/runner/finish", newCallBody(), http.StatusOK},
|
||||
|
||||
// TODO these won't work until update works and the agent gets shut off
|
||||
//{"get async call", "GET", "/v1/runner/async", "", http.StatusOK},
|
||||
//{"start call", "POST", "/v1/runner/start", "TODO", http.StatusOK},
|
||||
} {
|
||||
_, rec := routerRequest(t, srv.Router, test.method, test.path, strings.NewReader(test.body))
|
||||
|
||||
if rec.Code != test.expectedCode {
|
||||
t.Log(buf.String())
|
||||
t.Errorf("Test \"%s\": Expected status code to be %d but was %d",
|
||||
test.name, test.expectedCode, rec.Code)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ type TriggerAnnotator interface {
|
||||
|
||||
type requestBasedTriggerAnnotator struct{}
|
||||
|
||||
func annotateTriggerWithBaseUrl(baseURL string, app *models.App, t *models.Trigger) (*models.Trigger, error) {
|
||||
func annotateTriggerWithBaseURL(baseURL string, app *models.App, t *models.Trigger) (*models.Trigger, error) {
|
||||
if t.Type != models.TriggerTypeHTTP {
|
||||
return t, nil
|
||||
}
|
||||
@@ -41,23 +41,25 @@ func (tp *requestBasedTriggerAnnotator) AnnotateTrigger(ctx *gin.Context, app *m
|
||||
scheme = "https"
|
||||
}
|
||||
|
||||
return annotateTriggerWithBaseUrl(fmt.Sprintf("%s://%s", scheme, ctx.Request.Host), app, t)
|
||||
return annotateTriggerWithBaseURL(fmt.Sprintf("%s://%s", scheme, ctx.Request.Host), app, t)
|
||||
}
|
||||
|
||||
//NewRequestBasedTriggerAnnotator creates a TriggerAnnotator that inspects the incoming request host and port, and uses this to generate http trigger endpoint URLs based on those
|
||||
func NewRequestBasedTriggerAnnotator() TriggerAnnotator {
|
||||
return &requestBasedTriggerAnnotator{}
|
||||
}
|
||||
|
||||
type staticUrlTriggerAnnotator struct {
|
||||
urlBase string
|
||||
type staticURLTriggerAnnotator struct {
|
||||
baseURL string
|
||||
}
|
||||
|
||||
func NewStaticURLTriggerAnnotator(baseUrl string) TriggerAnnotator {
|
||||
//NewStaticURLTriggerAnnotator annotates triggers bases on a given, specified URL base - e.g. "https://my.domain" ---> "https://my.domain/t/app/source"
|
||||
func NewStaticURLTriggerAnnotator(baseURL string) TriggerAnnotator {
|
||||
|
||||
return &staticUrlTriggerAnnotator{urlBase: baseUrl}
|
||||
return &staticURLTriggerAnnotator{baseURL: baseURL}
|
||||
}
|
||||
|
||||
func (s *staticUrlTriggerAnnotator) AnnotateTrigger(ctx *gin.Context, app *models.App, trigger *models.Trigger) (*models.Trigger, error) {
|
||||
return annotateTriggerWithBaseUrl(s.urlBase, app, trigger)
|
||||
func (s *staticURLTriggerAnnotator) AnnotateTrigger(ctx *gin.Context, app *models.App, trigger *models.Trigger) (*models.Trigger, error) {
|
||||
return annotateTriggerWithBaseURL(s.baseURL, app, trigger)
|
||||
|
||||
}
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"fmt"
|
||||
"github.com/fnproject/fn/api"
|
||||
"github.com/gin-gonic/gin"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func (s *Server) handleTriggerGet(c *gin.Context) {
|
||||
|
||||
498
test/fn-system-tests/exec_http_trigger_test.go
Normal file
498
test/fn-system-tests/exec_http_trigger_test.go
Normal file
@@ -0,0 +1,498 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/fnproject/fn/api/datastore/datastoretest"
|
||||
"github.com/fnproject/fn/api/models"
|
||||
)
|
||||
|
||||
// See fn-test-utils for json response
|
||||
func getEchoContent(respBytes []byte) (string, error) {
|
||||
|
||||
var respJs map[string]interface{}
|
||||
|
||||
err := json.Unmarshal(respBytes, &respJs)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
req, ok := respJs["request"].(map[string]interface{})
|
||||
if !ok {
|
||||
return "", errors.New("unexpected json: request map")
|
||||
}
|
||||
|
||||
echo, ok := req["echoContent"].(string)
|
||||
if !ok {
|
||||
return "", errors.New("unexpected json: echoContent string")
|
||||
}
|
||||
|
||||
return echo, nil
|
||||
}
|
||||
|
||||
// See fn-test-utils for json response
|
||||
func getConfigContent(key string, respBytes []byte) (string, error) {
|
||||
|
||||
var respJs map[string]interface{}
|
||||
|
||||
err := json.Unmarshal(respBytes, &respJs)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
cfg, ok := respJs["config"].(map[string]interface{})
|
||||
if !ok {
|
||||
return "", errors.New("unexpected json: config map")
|
||||
}
|
||||
|
||||
val, ok := cfg[key].(string)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unexpected json: %s string", key)
|
||||
}
|
||||
|
||||
return val, nil
|
||||
}
|
||||
|
||||
type systemTestResourceProvider struct {
|
||||
datastoretest.ResourceProvider
|
||||
}
|
||||
|
||||
func (rp *systemTestResourceProvider) ValidFn(appID string) *models.Fn {
|
||||
fn := rp.ResourceProvider.ValidFn(appID)
|
||||
fn.Memory = memory
|
||||
fn.Image = image
|
||||
fn.Format = format
|
||||
return fn
|
||||
}
|
||||
|
||||
var rp = &systemTestResourceProvider{
|
||||
ResourceProvider: datastoretest.NewBasicResourceProvider(),
|
||||
}
|
||||
|
||||
func TestCanExecuteFunctionViaTrigger(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
app := ensureApp(t, rp.ValidApp())
|
||||
fn := ensureFn(t, rp.ValidFn(app.ID))
|
||||
trigger := ensureTrigger(t, rp.ValidTrigger(app.ID, fn.ID))
|
||||
|
||||
lb, err := LB()
|
||||
if err != nil {
|
||||
t.Fatalf("Got unexpected error: %v", err)
|
||||
}
|
||||
u := url.URL{
|
||||
Scheme: "http",
|
||||
Host: lb,
|
||||
}
|
||||
u.Path = path.Join(u.Path, "t", app.Name, trigger.Source)
|
||||
|
||||
body := `{"echoContent": "HelloWorld", "sleepTime": 0, "isDebug": true}`
|
||||
content := bytes.NewBuffer([]byte(body))
|
||||
output := &bytes.Buffer{}
|
||||
|
||||
resp, err := callTrigger(ctx, u.String(), content, output, "POST")
|
||||
if err != nil {
|
||||
t.Fatalf("Got unexpected error: %v", err)
|
||||
}
|
||||
|
||||
echo, err := getEchoContent(output.Bytes())
|
||||
if err != nil || echo != "HelloWorld" {
|
||||
t.Fatalf("getEchoContent/HelloWorld check failed on %v", output)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("StatusCode check failed on %v", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Now let's check FN_CHEESE, since LB and runners have override/extension mechanism
|
||||
// to insert FN_CHEESE into config
|
||||
cheese, err := getConfigContent("FN_CHEESE", output.Bytes())
|
||||
if err != nil || cheese != "Tete de Moine" {
|
||||
t.Fatalf("getConfigContent/FN_CHEESE check failed (%v) on %v", err, output)
|
||||
}
|
||||
|
||||
// Now let's check FN_WINE, since runners have override to insert this.
|
||||
wine, err := getConfigContent("FN_WINE", output.Bytes())
|
||||
if err != nil || wine != "1982 Margaux" {
|
||||
t.Fatalf("getConfigContent/FN_WINE check failed (%v) on %v", err, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanExecuteTriggerBigOutput(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
app := ensureApp(t, rp.ValidApp())
|
||||
fn := ensureFn(t, rp.ValidFn(app.ID))
|
||||
trigger := ensureTrigger(t, rp.ValidTrigger(app.ID, fn.ID))
|
||||
|
||||
lb, err := LB()
|
||||
if err != nil {
|
||||
t.Fatalf("Got unexpected error: %v", err)
|
||||
}
|
||||
u := url.URL{
|
||||
Scheme: "http",
|
||||
Host: lb,
|
||||
}
|
||||
u.Path = path.Join(u.Path, "t", app.Name, trigger.Source)
|
||||
|
||||
// Approx 5.3MB output
|
||||
body := `{"echoContent": "HelloWorld", "sleepTime": 0, "isDebug": true, "trailerRepeat": 410000}`
|
||||
content := bytes.NewBuffer([]byte(body))
|
||||
output := &bytes.Buffer{}
|
||||
|
||||
resp, err := callTrigger(ctx, u.String(), content, output, "POST")
|
||||
if err != nil {
|
||||
t.Fatalf("Got unexpected error: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("getEchoContent/HelloWorld size %d", len(output.Bytes()))
|
||||
|
||||
echo, err := getEchoContent(output.Bytes())
|
||||
if err != nil || echo != "HelloWorld" {
|
||||
t.Fatalf("getEchoContent/HelloWorld check failed on %v", output)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("StatusCode check failed on %v", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanExecuteTriggerTooBigOutput(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
app := ensureApp(t, rp.ValidApp())
|
||||
fn := ensureFn(t, rp.ValidFn(app.ID))
|
||||
trigger := ensureTrigger(t, rp.ValidTrigger(app.ID, fn.ID))
|
||||
|
||||
lb, err := LB()
|
||||
if err != nil {
|
||||
t.Fatalf("Got unexpected error: %v", err)
|
||||
}
|
||||
u := url.URL{
|
||||
Scheme: "http",
|
||||
Host: lb,
|
||||
}
|
||||
u.Path = path.Join(u.Path, "t", app.Name, trigger.Source)
|
||||
|
||||
// > 6MB output
|
||||
body := `{"echoContent": "HelloWorld", "sleepTime": 0, "isDebug": true, "trailerRepeat": 600000}`
|
||||
content := bytes.NewBuffer([]byte(body))
|
||||
output := &bytes.Buffer{}
|
||||
|
||||
resp, err := callTrigger(ctx, u.String(), content, output, "POST")
|
||||
if err != nil {
|
||||
t.Fatalf("Got unexpected error: %v", err)
|
||||
}
|
||||
|
||||
exp := "{\"message\":\"function response too large\"}\n"
|
||||
actual := output.String()
|
||||
|
||||
if !strings.Contains(exp, actual) || len(exp) != len(actual) {
|
||||
t.Fatalf("Assertion error.\n\tExpected: %v\n\tActual: %v", exp, output.String())
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusBadGateway {
|
||||
t.Fatalf("StatusCode check failed on %v", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanExecuteTriggerEmptyOutput(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
app := ensureApp(t, rp.ValidApp())
|
||||
fn := ensureFn(t, rp.ValidFn(app.ID))
|
||||
trigger := ensureTrigger(t, rp.ValidTrigger(app.ID, fn.ID))
|
||||
|
||||
lb, err := LB()
|
||||
if err != nil {
|
||||
t.Fatalf("Got unexpected error: %v", err)
|
||||
}
|
||||
u := url.URL{
|
||||
Scheme: "http",
|
||||
Host: lb,
|
||||
}
|
||||
u.Path = path.Join(u.Path, "t", app.Name, trigger.Source)
|
||||
|
||||
// empty body output
|
||||
body := `{"sleepTime": 0, "isDebug": true, "isEmptyBody": true}`
|
||||
content := bytes.NewBuffer([]byte(body))
|
||||
output := &bytes.Buffer{}
|
||||
|
||||
resp, err := callTrigger(ctx, u.String(), content, output, "POST")
|
||||
if err != nil {
|
||||
t.Fatalf("Got unexpected error: %v", err)
|
||||
}
|
||||
|
||||
actual := output.String()
|
||||
|
||||
if 0 != len(actual) {
|
||||
t.Fatalf("Assertion error.\n\tExpected empty\n\tActual: %v", output.String())
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("StatusCode check failed on %v", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBasicTriggerConcurrentExecution(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
app := ensureApp(t, rp.ValidApp())
|
||||
fn := ensureFn(t, rp.ValidFn(app.ID))
|
||||
trigger := ensureTrigger(t, rp.ValidTrigger(app.ID, fn.ID))
|
||||
|
||||
lb, err := LB()
|
||||
if err != nil {
|
||||
t.Fatalf("Got unexpected error: %v", err)
|
||||
}
|
||||
u := url.URL{
|
||||
Scheme: "http",
|
||||
Host: lb,
|
||||
}
|
||||
u.Path = path.Join(u.Path, "t", app.Name, trigger.Source)
|
||||
|
||||
results := make(chan error)
|
||||
concurrentFuncs := 10
|
||||
for i := 0; i < concurrentFuncs; i++ {
|
||||
go func() {
|
||||
body := `{"echoContent": "HelloWorld", "sleepTime": 0, "isDebug": true}`
|
||||
content := bytes.NewBuffer([]byte(body))
|
||||
output := &bytes.Buffer{}
|
||||
resp, err := callTrigger(ctx, u.String(), content, output, "POST")
|
||||
if err != nil {
|
||||
results <- fmt.Errorf("Got unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
echo, err := getEchoContent(output.Bytes())
|
||||
if err != nil || echo != "HelloWorld" {
|
||||
results <- fmt.Errorf("Assertion error.\n\tActual: %v", output.String())
|
||||
return
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
results <- fmt.Errorf("StatusCode check failed on %v", resp.StatusCode)
|
||||
return
|
||||
}
|
||||
|
||||
results <- nil
|
||||
}()
|
||||
}
|
||||
for i := 0; i < concurrentFuncs; i++ {
|
||||
err := <-results
|
||||
if err != nil {
|
||||
t.Fatalf("Error in basic concurrency execution test: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestTriggerSaturatedSystem(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 4*time.Second)
|
||||
defer cancel()
|
||||
|
||||
app := ensureApp(t, rp.ValidApp())
|
||||
validFn := rp.ValidFn(app.ID)
|
||||
validFn.ResourceConfig.Timeout = 1
|
||||
validFn.ResourceConfig.Memory = 300
|
||||
|
||||
fn := ensureFn(t, validFn)
|
||||
trigger := ensureTrigger(t, rp.ValidTrigger(app.ID, fn.ID))
|
||||
|
||||
lb, err := LB()
|
||||
if err != nil {
|
||||
t.Fatalf("Got unexpected error: %v", err)
|
||||
}
|
||||
u := url.URL{
|
||||
Scheme: "http",
|
||||
Host: lb,
|
||||
}
|
||||
u.Path = path.Join(u.Path, "t", app.Name, trigger.Source)
|
||||
|
||||
body := `{"echoContent": "HelloWorld", "sleepTime": 0, "isDebug": true}`
|
||||
content := bytes.NewBuffer([]byte(body))
|
||||
output := &bytes.Buffer{}
|
||||
|
||||
resp, err := callTrigger(ctx, u.String(), content, output, "POST")
|
||||
if resp != nil || err == nil || ctx.Err() == nil {
|
||||
t.Fatalf("Expected response: %v err:%v", resp, err)
|
||||
}
|
||||
}
|
||||
|
||||
func callTrigger(ctx context.Context, u string, content io.Reader, output io.Writer, method string) (*http.Response, error) {
|
||||
if method == "" {
|
||||
if content == nil {
|
||||
method = "GET"
|
||||
} else {
|
||||
method = "POST"
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(method, u, content)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error running route: %s", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error running route: %s", err)
|
||||
}
|
||||
|
||||
io.Copy(output, resp.Body)
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func getAPIURL() (string, *url.URL) {
|
||||
apiURL := getEnv("FN_API_URL", "http://localhost:8085")
|
||||
u, err := url.Parse(apiURL)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't parse API URL: %s error: %s", apiURL, err)
|
||||
}
|
||||
return apiURL, u
|
||||
}
|
||||
|
||||
func host() string {
|
||||
u, _ := getAPIURL()
|
||||
return u
|
||||
}
|
||||
|
||||
const (
|
||||
appName = "systemtestapp"
|
||||
routeName = "/systemtestroute"
|
||||
image = "fnproject/fn-test-utils"
|
||||
format = "json"
|
||||
memory = 128
|
||||
typ = "sync"
|
||||
)
|
||||
|
||||
func ensureApp(t *testing.T, app *models.App) *models.App {
|
||||
var buf bytes.Buffer
|
||||
err := json.NewEncoder(&buf).Encode(app)
|
||||
if err != nil {
|
||||
t.Fatal("error encoding body", err)
|
||||
}
|
||||
|
||||
urlStr := host() + "/v2/apps"
|
||||
u, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
t.Fatal("error creating url", urlStr, err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", u.String(), &buf)
|
||||
if err != nil {
|
||||
t.Fatal("error creating request", err)
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal("error creating route", err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
io.Copy(&buf, resp.Body)
|
||||
if resp.StatusCode != 200 {
|
||||
t.Fatal("error creating/updating app or otherwise ensuring it exists:", resp.StatusCode, buf.String())
|
||||
}
|
||||
|
||||
var appOut models.App
|
||||
err = json.NewDecoder(&buf).Decode(&appOut)
|
||||
if err != nil {
|
||||
t.Fatal("error decoding response")
|
||||
}
|
||||
|
||||
return &appOut
|
||||
}
|
||||
|
||||
func ensureFn(t *testing.T, fn *models.Fn) *models.Fn {
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := json.NewEncoder(&buf).Encode(fn)
|
||||
if err != nil {
|
||||
t.Fatal("error encoding body", err)
|
||||
}
|
||||
|
||||
urlStr := host() + "/v2/fns"
|
||||
u, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
t.Fatal("error creating url", urlStr, err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", u.String(), &buf)
|
||||
if err != nil {
|
||||
t.Fatal("error creating request", err)
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal("error creating route", err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
io.Copy(&buf, resp.Body)
|
||||
if resp.StatusCode != 200 {
|
||||
t.Fatal("error creating/updating app or otherwise ensuring it exists:", resp.StatusCode, buf.String())
|
||||
}
|
||||
|
||||
var fnOut models.Fn
|
||||
err = json.NewDecoder(&buf).Decode(&fnOut)
|
||||
if err != nil {
|
||||
t.Fatal("error decoding response")
|
||||
}
|
||||
|
||||
return &fnOut
|
||||
|
||||
}
|
||||
|
||||
func ensureTrigger(t *testing.T, trigger *models.Trigger) *models.Trigger {
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := json.NewEncoder(&buf).Encode(trigger)
|
||||
if err != nil {
|
||||
t.Fatal("error encoding body", err)
|
||||
}
|
||||
|
||||
urlStr := host() + "/v2/triggers"
|
||||
u, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
t.Fatal("error creating url", urlStr, err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", u.String(), &buf)
|
||||
if err != nil {
|
||||
t.Fatal("error creating request", err)
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal("error creating route", err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
io.Copy(&buf, resp.Body)
|
||||
if resp.StatusCode != 200 {
|
||||
t.Fatal("error creating/updating app or otherwise ensuring it exists:", resp.StatusCode, buf.String())
|
||||
}
|
||||
|
||||
var triggerOut models.Trigger
|
||||
err = json.NewDecoder(&buf).Decode(&triggerOut)
|
||||
if err != nil {
|
||||
t.Fatal("error decoding response")
|
||||
}
|
||||
|
||||
return &triggerOut
|
||||
}
|
||||
@@ -4,10 +4,8 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -18,51 +16,7 @@ import (
|
||||
"github.com/fnproject/fn/api/models"
|
||||
)
|
||||
|
||||
// See fn-test-utils for json response
|
||||
func getEchoContent(respBytes []byte) (string, error) {
|
||||
|
||||
var respJs map[string]interface{}
|
||||
|
||||
err := json.Unmarshal(respBytes, &respJs)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
req, ok := respJs["request"].(map[string]interface{})
|
||||
if !ok {
|
||||
return "", errors.New("unexpected json: request map")
|
||||
}
|
||||
|
||||
echo, ok := req["echoContent"].(string)
|
||||
if !ok {
|
||||
return "", errors.New("unexpected json: echoContent string")
|
||||
}
|
||||
|
||||
return echo, nil
|
||||
}
|
||||
|
||||
// See fn-test-utils for json response
|
||||
func getConfigContent(key string, respBytes []byte) (string, error) {
|
||||
|
||||
var respJs map[string]interface{}
|
||||
|
||||
err := json.Unmarshal(respBytes, &respJs)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
cfg, ok := respJs["config"].(map[string]interface{})
|
||||
if !ok {
|
||||
return "", errors.New("unexpected json: config map")
|
||||
}
|
||||
|
||||
val, ok := cfg[key].(string)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unexpected json: %s string", key)
|
||||
}
|
||||
|
||||
return val, nil
|
||||
}
|
||||
// TODO deprecate with routes
|
||||
|
||||
func TestCanExecuteFunction(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
@@ -330,29 +284,6 @@ func callFN(ctx context.Context, u string, content io.Reader, output io.Writer,
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func getAPIURL() (string, *url.URL) {
|
||||
apiURL := getEnv("FN_API_URL", "http://localhost:8085")
|
||||
u, err := url.Parse(apiURL)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't parse API URL: %s error: %s", apiURL, err)
|
||||
}
|
||||
return apiURL, u
|
||||
}
|
||||
|
||||
func host() string {
|
||||
u, _ := getAPIURL()
|
||||
return u
|
||||
}
|
||||
|
||||
const (
|
||||
appName = "systemtestapp"
|
||||
routeName = "/systemtestroute"
|
||||
image = "fnproject/fn-test-utils"
|
||||
format = "json"
|
||||
memory = 128
|
||||
typ = "sync"
|
||||
)
|
||||
|
||||
func ensureRoute(t *testing.T, rts ...*models.Route) *models.Route {
|
||||
var rt *models.Route
|
||||
if len(rts) > 0 {
|
||||
@@ -224,12 +224,12 @@ func SetUpLBNode(ctx context.Context) (*server.Server, error) {
|
||||
|
||||
// Create an LB Agent with a Call Overrider to intercept calls in GetCall(). Overrider in this example
|
||||
// scrubs CPU/TmpFsSize and adds FN_CHEESE key/value into extensions.
|
||||
lbAgent, err := agent.NewLBAgent(agent.NewCachedDataAccess(cl), nodePool, placer, agent.WithLBCallOverrider(LBCallOverrider))
|
||||
lbAgent, err := agent.NewLBAgent(cl, nodePool, placer, agent.WithLBCallOverrider(LBCallOverrider))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts = append(opts, server.WithAgent(lbAgent))
|
||||
opts = append(opts, server.WithAgent(lbAgent), server.WithReadDataAccess(agent.NewCachedDataAccess(cl)))
|
||||
return server.New(ctx, opts...), nil
|
||||
}
|
||||
|
||||
@@ -267,7 +267,6 @@ func SetUpPureRunnerNode(ctx context.Context, nodeNum int) (*server.Server, erro
|
||||
innerAgent := agent.New(ds,
|
||||
agent.WithConfig(cfg),
|
||||
agent.WithDockerDriver(drv),
|
||||
agent.WithoutAsyncDequeue(),
|
||||
agent.WithCallOverrider(PureRunnerCallOverrider))
|
||||
|
||||
cancelCtx, cancel := context.WithCancel(ctx)
|
||||
|
||||
Reference in New Issue
Block a user