Files
fn-serverless/api/runnerpool/naive_placer.go
Shreya Garge 91f6ef3402 added context for runnerpool interface (#1320)
* added context for runnerpool interface

* added context for runnerpool interface
2018-11-20 17:02:47 +00:00

65 lines
1.4 KiB
Go

package runnerpool
import (
"context"
"sync/atomic"
"time"
"github.com/fnproject/fn/api/models"
"github.com/sirupsen/logrus"
)
type naivePlacer struct {
cfg PlacerConfig
rrIndex uint64
}
func NewNaivePlacer(cfg *PlacerConfig) Placer {
logrus.Infof("Creating new naive runnerpool placer with config=%+v", cfg)
return &naivePlacer{
cfg: *cfg,
rrIndex: uint64(time.Now().Nanosecond()),
}
}
func (sp *naivePlacer) GetPlacerConfig() PlacerConfig {
return sp.cfg
}
func (sp *naivePlacer) PlaceCall(ctx context.Context, rp RunnerPool, call RunnerCall) error {
state := NewPlacerTracker(ctx, &sp.cfg, call)
defer state.HandleDone()
var runnerPoolErr error
for {
var runners []Runner
runners, runnerPoolErr = rp.Runners(ctx, call)
for j := 0; j < len(runners) && !state.IsDone(); j++ {
i := atomic.AddUint64(&sp.rrIndex, uint64(1))
r := runners[int(i)%len(runners)]
placed, err := state.TryRunner(r, call)
if placed {
return err
}
}
if !state.RetryAllBackoff(len(runners)) {
break
}
}
if runnerPoolErr != nil {
// If we haven't been able to place the function and we got an error
// from the runner pool, return that error (since we don't have
// enough runners to handle the current load and the runner pool is
// having trouble).
state.HandleFindRunnersFailure(runnerPoolErr)
return runnerPoolErr
}
return models.ErrCallTimeoutServerBusy
}