Hybrid plumby (#585)

* fix configuration of agent and server to be future proof and plumb in the hybrid client agent

* fixes up the tests, turns off /r/ on api nodes

* fix up defaults for runner nodes

* shove the runner async push code down into agent land to use client

* plumb up async-age

* return full call from async dequeue endpoint, since we're storing a whole
call in the MQ we don't need to worry about caching of app/route [for now]
* fast safe shutdown of dequeue looper in runner / tidying of agent
* nice errors for path not found against /r/, /v1/ or other path not found
* removed some stale TODO in agent
* mq backends are only loud mouths in debug mode now

* update tests

* Add caching to hybrid client

* Fix HTTP error handling in hybrid client.

The type switch was on the value rather than a pointer.

* Gofmt.

* Better caching with a nice caching wrapper

* Remove datastore cache which is now unused

* Don't need to manually wrap interface methods

* Go fmt
This commit is contained in:
Reed Allman
2017-12-12 15:54:55 -08:00
committed by GitHub
parent 05ce2e3868
commit bb92547b95
18 changed files with 433 additions and 375 deletions

View File

@@ -2,9 +2,13 @@ package agent
import (
"context"
"github.com/fnproject/fn/api/common"
"github.com/fnproject/fn/api/models"
"io"
"time"
"github.com/fnproject/fn/api/common"
"github.com/fnproject/fn/api/common/singleflight"
"github.com/fnproject/fn/api/models"
"github.com/patrickmn/go-cache"
)
// DataAccess abstracts the datastore and message queue operations done by the
@@ -19,7 +23,7 @@ type DataAccess interface {
GetRoute(ctx context.Context, appName string, routePath string) (*models.Route, error)
// Enqueue will add a Call to the queue (ultimately forwards to mq.Push).
Enqueue(ctx context.Context, mCall *models.Call) (*models.Call, error)
Enqueue(ctx context.Context, mCall *models.Call) error
// Dequeue will query the queue for the next available Call that can be run
// by this Agent, and reserve it (ultimately forwards to mq.Reserve).
@@ -31,7 +35,71 @@ type DataAccess interface {
// Finish will notify the system that the Call has been processed, and
// fulfill the reservation in the queue if the call came from a queue.
Finish(ctx context.Context, mCall *models.Call, stderr io.ReadWriteCloser, async bool) error
Finish(ctx context.Context, mCall *models.Call, stderr io.Reader, async bool) error
}
// CachedDataAccess wraps a DataAccess and caches the results of GetApp and GetRoute.
type CachedDataAccess struct {
DataAccess
cache *cache.Cache
singleflight singleflight.SingleFlight
}
func NewCachedDataAccess(da DataAccess) DataAccess {
cda := &CachedDataAccess{
DataAccess: da,
cache: cache.New(5*time.Second, 1*time.Minute),
}
return cda
}
func routeCacheKey(appname, path string) string {
return "r:" + appname + "\x00" + path
}
func appCacheKey(appname string) string {
return "a:" + appname
}
func (da *CachedDataAccess) GetApp(ctx context.Context, appName string) (*models.App, error) {
key := appCacheKey(appName)
app, ok := da.cache.Get(key)
if ok {
return app.(*models.App), nil
}
resp, err := da.singleflight.Do(key,
func() (interface{}, error) {
return da.DataAccess.GetApp(ctx, appName)
})
if err != nil {
return nil, err
}
app = resp.(*models.App)
da.cache.Set(key, app, cache.DefaultExpiration)
return app.(*models.App), nil
}
func (da *CachedDataAccess) GetRoute(ctx context.Context, appName string, routePath string) (*models.Route, error) {
key := routeCacheKey(appName, routePath)
r, ok := da.cache.Get(key)
if ok {
return r.(*models.Route), nil
}
resp, err := da.singleflight.Do(key,
func() (interface{}, error) {
return da.DataAccess.GetRoute(ctx, appName, routePath)
})
if err != nil {
return nil, err
}
r = resp.(*models.Route)
da.cache.Set(key, r, cache.DefaultExpiration)
return r.(*models.Route), nil
}
type directDataAccess struct {
@@ -57,8 +125,9 @@ func (da *directDataAccess) GetRoute(ctx context.Context, appName string, routeP
return da.ds.GetRoute(ctx, appName, routePath)
}
func (da *directDataAccess) Enqueue(ctx context.Context, mCall *models.Call) (*models.Call, error) {
return da.mq.Push(ctx, mCall)
func (da *directDataAccess) Enqueue(ctx context.Context, mCall *models.Call) error {
_, err := da.mq.Push(ctx, mCall)
return err
// TODO: Insert a call in the datastore with the 'queued' state
}
@@ -77,7 +146,7 @@ func (da *directDataAccess) Start(ctx context.Context, mCall *models.Call) error
return da.mq.Delete(ctx, mCall)
}
func (da *directDataAccess) Finish(ctx context.Context, mCall *models.Call, stderr io.ReadWriteCloser, async bool) error {
func (da *directDataAccess) Finish(ctx context.Context, mCall *models.Call, stderr io.Reader, async bool) error {
// this means that we could potentially store an error / timeout status for a
// call that ran successfully [by a user's perspective]
// TODO: this should be update, really
@@ -90,8 +159,6 @@ func (da *directDataAccess) Finish(ctx context.Context, mCall *models.Call, stde
common.Logger(ctx).WithError(err).Error("error uploading log")
// note: Not returning err here since the job could have already finished successfully.
}
// NOTE call this after InsertLog or the buffer will get reset
stderr.Close()
if async {
// XXX (reed): delete MQ message, eventually