mirror of
https://github.com/fnproject/fn.git
synced 2022-10-28 21:29:17 +03:00
* fn: runner status and docker load images Introducing a function run for pure runner Status calls. Previously, Status gRPC calls returned active inflight request counts with the purpose of a simple health checker. However this is not sufficient since it does not show if agent or docker is healthy. With this change, if pure runner is configured with a status image, that image is executed through docker. The call uses zero memory/cpu/tmpsize settings to ensure resource tracker does not block it. However, operators might not always have a docker repository accessible/available for status image. Or operators might not want the status to go over the network. To allow such cases, and in general possibly caching docker images, added a new environment variable FN_DOCKER_LOAD_FILE. If this is set, fn-agent during startup will load these images that were previously saved with 'docker save' into docker.
145 lines
3.7 KiB
Go
145 lines
3.7 KiB
Go
package tests
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"io"
|
|
"net/http"
|
|
"net/url"
|
|
"path"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/fnproject/fn/api/models"
|
|
"github.com/fnproject/fn/api/runnerpool"
|
|
)
|
|
|
|
// We should not be able to invoke a StatusImage
|
|
func TestCannotExecuteStatusImage(t *testing.T) {
|
|
if StatusImage == "" {
|
|
t.Skip("no status image defined")
|
|
}
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
|
|
rt := &models.Route{
|
|
Path: routeName + "yogurt",
|
|
Image: StatusImage,
|
|
Format: format,
|
|
Memory: memory,
|
|
Type: typ,
|
|
}
|
|
|
|
rt = ensureRoute(t, rt)
|
|
|
|
lb, err := LB()
|
|
if err != nil {
|
|
t.Fatalf("Got unexpected error: %v", err)
|
|
}
|
|
u := url.URL{
|
|
Scheme: "http",
|
|
Host: lb,
|
|
}
|
|
u.Path = path.Join(u.Path, "r", appName, rt.Path)
|
|
|
|
content := bytes.NewBuffer([]byte(`status`))
|
|
output := &bytes.Buffer{}
|
|
|
|
resp, err := callFN(ctx, u.String(), content, output, "POST")
|
|
if err != nil {
|
|
t.Fatalf("Got unexpected error: %v", err)
|
|
}
|
|
|
|
if resp.StatusCode != http.StatusBadRequest {
|
|
t.Fatalf("StatusCode check failed on %v", resp.StatusCode)
|
|
}
|
|
}
|
|
|
|
// Some dummy RunnerCall implementation
|
|
type myCall struct{}
|
|
|
|
// implements RunnerCall
|
|
func (c *myCall) SlotHashId() string { return "" }
|
|
func (c *myCall) Extensions() map[string]string { return nil }
|
|
func (c *myCall) RequestBody() io.ReadCloser { return nil }
|
|
func (c *myCall) ResponseWriter() http.ResponseWriter { return nil }
|
|
func (c *myCall) StdErr() io.ReadWriteCloser { return nil }
|
|
func (c *myCall) Model() *models.Call { return nil }
|
|
|
|
func TestExecuteRunnerStatus(t *testing.T) {
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
|
|
var zoo myCall
|
|
|
|
pool, err := NewSystemTestNodePool()
|
|
if err != nil {
|
|
t.Fatalf("Creating Node Pool failed %v", err)
|
|
}
|
|
|
|
runners, err := pool.Runners(&zoo)
|
|
if err != nil {
|
|
t.Fatalf("Getting Runners from Pool failed %v", err)
|
|
}
|
|
if len(runners) == 0 {
|
|
t.Fatalf("Getting Runners from Pool failed no-runners")
|
|
}
|
|
|
|
concurrency := 10
|
|
res := make(chan *runnerpool.RunnerStatus, concurrency*len(runners))
|
|
|
|
for _, runner := range runners {
|
|
for i := 0; i < concurrency; i++ {
|
|
go func(dest runnerpool.Runner) {
|
|
status, err := dest.Status(ctx)
|
|
if err != nil {
|
|
t.Fatalf("Runners Status failed for %v err=%v", dest.Address(), err)
|
|
}
|
|
if status == nil || status.StatusFailed {
|
|
t.Fatalf("Runners Status not OK for %v %v", dest.Address(), status)
|
|
}
|
|
t.Logf("Runner %v got Status=%+v", dest.Address(), status)
|
|
res <- status
|
|
}(runner)
|
|
}
|
|
}
|
|
|
|
lookup := make(map[string][]*runnerpool.RunnerStatus)
|
|
|
|
for i := 0; i < concurrency*len(runners); i++ {
|
|
status := <-res
|
|
lookup[status.StatusId] = append(lookup[status.StatusId], status)
|
|
}
|
|
|
|
// WARNING: Possibly flappy test below. Might need to relax the numbers below.
|
|
// Why 3? We have a idleTimeout + gracePeriod = 1.5 secs (for cache timeout) for status calls.
|
|
// This normally should easily serve all the queries above. (We have 3 runners, each should
|
|
// easily take on 10 status calls for that period.
|
|
if len(lookup) > 3 {
|
|
for key, arr := range lookup {
|
|
t.Fatalf("key=%v count=%v", key, len(arr))
|
|
}
|
|
}
|
|
|
|
// delay
|
|
time.Sleep(time.Duration(2 * time.Second))
|
|
|
|
// now we should get fresh data
|
|
for _, dest := range runners {
|
|
status, err := dest.Status(ctx)
|
|
if err != nil {
|
|
t.Fatalf("Runners Status failed for %v err=%v", dest.Address(), err)
|
|
}
|
|
if status == nil || status.StatusFailed {
|
|
t.Fatalf("Runners Status not OK for %v %v", dest.Address(), status)
|
|
}
|
|
t.Logf("Runner %v got Status=%+v", dest.Address(), status)
|
|
_, ok := lookup[status.StatusId]
|
|
if ok {
|
|
t.Fatalf("Runners Status did not return fresh status id %v %v", dest.Address(), status)
|
|
}
|
|
}
|
|
|
|
}
|