Hybrid plumby (#585)

* fix configuration of agent and server to be future proof and plumb in the hybrid client agent

* fixes up the tests, turns off /r/ on api nodes

* fix up defaults for runner nodes

* shove the runner async push code down into agent land to use client

* plumb up async-age

* return full call from async dequeue endpoint, since we're storing a whole
call in the MQ we don't need to worry about caching of app/route [for now]
* fast safe shutdown of dequeue looper in runner / tidying of agent
* nice errors for path not found against /r/, /v1/ or other path not found
* removed some stale TODO in agent
* mq backends are only loud mouths in debug mode now

* update tests

* Add caching to hybrid client

* Fix HTTP error handling in hybrid client.

The type switch was on the value rather than a pointer.

* Gofmt.

* Better caching with a nice caching wrapper

* Remove datastore cache which is now unused

* Don't need to manually wrap interface methods

* Go fmt
This commit is contained in:
Reed Allman
2017-12-12 15:54:55 -08:00
committed by GitHub
parent 05ce2e3868
commit bb92547b95
18 changed files with 433 additions and 375 deletions

View File

@@ -206,7 +206,7 @@ func (mq *BoltDbMQ) delayCall(job *models.Call) (*models.Call, error) {
func (mq *BoltDbMQ) Push(ctx context.Context, job *models.Call) (*models.Call, error) {
ctx, log := common.LoggerWithFields(ctx, logrus.Fields{"call_id": job.ID})
log.Println("Pushed to MQ")
log.Debugln("Pushed to MQ")
if job.Delay > 0 {
return mq.delayCall(job)
@@ -316,7 +316,7 @@ func (mq *BoltDbMQ) Reserve(ctx context.Context) (*models.Call, error) {
}
_, log := common.LoggerWithFields(ctx, logrus.Fields{"call_id": job.ID})
log.Println("Reserved")
log.Debugln("Reserved")
return &job, nil
}
@@ -326,7 +326,7 @@ func (mq *BoltDbMQ) Reserve(ctx context.Context) (*models.Call, error) {
func (mq *BoltDbMQ) Delete(ctx context.Context, job *models.Call) error {
_, log := common.LoggerWithFields(ctx, logrus.Fields{"call_id": job.ID})
defer log.Println("Deleted")
defer log.Debugln("Deleted")
return mq.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(timeoutName(int(*job.Priority)))

View File

@@ -113,7 +113,7 @@ func (ji *callItem) Less(than btree.Item) bool {
func (mq *MemoryMQ) Push(ctx context.Context, job *models.Call) (*models.Call, error) {
_, log := common.LoggerWithFields(ctx, logrus.Fields{"call_id": job.ID})
log.Println("Pushed to MQ")
log.Debugln("Pushed to MQ")
// It seems to me that using the job ID in the reservation is acceptable since each job can only have one outstanding reservation.
// job.MsgId = randSeq(20)
@@ -174,7 +174,7 @@ func (mq *MemoryMQ) Reserve(ctx context.Context) (*models.Call, error) {
}
_, log := common.LoggerWithFields(ctx, logrus.Fields{"call_id": job.ID})
log.Println("Reserved")
log.Debugln("Reserved")
return job, mq.pushTimeout(job)
}
@@ -189,6 +189,6 @@ func (mq *MemoryMQ) Delete(ctx context.Context, job *models.Call) error {
}
delete(mq.Timeouts, job.ID)
log.Println("Deleted")
log.Debugln("Deleted")
return nil
}

View File

@@ -211,7 +211,7 @@ func (mq *RedisMQ) delayCall(conn redis.Conn, job *models.Call) (*models.Call, e
func (mq *RedisMQ) Push(ctx context.Context, job *models.Call) (*models.Call, error) {
_, log := common.LoggerWithFields(ctx, logrus.Fields{"call_id": job.ID})
defer log.Println("Pushed to MQ")
defer log.Debugln("Pushed to MQ")
conn := mq.pool.Get()
defer conn.Close()
@@ -281,14 +281,14 @@ func (mq *RedisMQ) Reserve(ctx context.Context) (*models.Call, error) {
}
_, log := common.LoggerWithFields(ctx, logrus.Fields{"call_id": job.ID})
log.Println("Reserved")
log.Debugln("Reserved")
return &job, nil
}
func (mq *RedisMQ) Delete(ctx context.Context, job *models.Call) error {
_, log := common.LoggerWithFields(ctx, logrus.Fields{"call_id": job.ID})
defer log.Println("Deleted")
defer log.Debugln("Deleted")
conn := mq.pool.Get()
defer conn.Close()