Files
fn-serverless/api/server/server.go
Reed Allman 7fbbd75349 fn, dockerd pid collector & go collector metrics (#837)
* fn, dockerd pid collector & go collector metrics

the prometheus client we're using has a nice collector for process metrics and
for go metrics. these are things we are very interested in operationally and
recently the benevolent team at opencensus made this possible again, so this
hooks it up for us with added dockerd sugar.

nannying the dockerd we're using should be super useful since that thing likes
to get carried away, it'll be nice to differentiate memory/cpu usage between
dockerd  / the host / fn. this will basically only work in a 'dind'
environment, or on a linux host that is running fn outside of docker that is
configured with the permissions to be able to check this.  otherwise, it will
simply fail. we also probably want disk i/o and net i/o information for that
as well, or at least it would be interesting to differentiate from the host,
but this isn't hooked up in the default collectors unfortunately.

dockerd:

```
dockerd_process_cpu_seconds_total 520.74
dockerd_process_max_fds 1.048576e+06
dockerd_process_resident_memory_bytes 9.033728e+07
dockerd_process_start_time_seconds 1.52029677322e+09
dockerd_process_virtual_memory_bytes 1.782509568e+09
```

fn:

```
fn_process_cpu_seconds_total 0.14
fn_process_max_fds 1024
fn_process_open_fds 12
fn_process_resident_memory_bytes 2.7348992e+07
fn_process_start_time_seconds 1.52056274238e+09
fn_process_virtual_memory_bytes 7.20068608e+08
```

go:

```
go_gc_duration_seconds{quantile="0"} 4.4194e-05
go_gc_duration_seconds{quantile="0.25"} 9.8118e-05
go_gc_duration_seconds{quantile="0.5"} 0.000105989
go_gc_duration_seconds{quantile="0.75"} 0.000106251
go_gc_duration_seconds{quantile="1"} 0.000157864
go_gc_duration_seconds_sum 0.000512416
go_gc_duration_seconds_count 5
go_goroutines 30
go_memstats_alloc_bytes 3.897696e+06
go_memstats_alloc_bytes_total 1.2916016e+07
go_memstats_buck_hash_sys_bytes 1.45034e+06
go_memstats_frees_total 75399
go_memstats_gc_sys_bytes 450560
go_memstats_heap_alloc_bytes 3.897696e+06
go_memstats_heap_idle_bytes 868352
go_memstats_heap_inuse_bytes 5.750784e+06
go_memstats_heap_objects 29925
go_memstats_heap_released_bytes_total 0
go_memstats_heap_sys_bytes 6.619136e+06
go_memstats_last_gc_time_seconds 1.520562751182639e+09
go_memstats_lookups_total 239
go_memstats_mallocs_total 105324
go_memstats_mcache_inuse_bytes 3472
go_memstats_mcache_sys_bytes 16384
go_memstats_mspan_inuse_bytes 90592
go_memstats_mspan_sys_bytes 98304
go_memstats_next_gc_bytes 6.31304e+06
go_memstats_other_sys_bytes 710548
go_memstats_stack_inuse_bytes 720896
go_memstats_stack_sys_bytes 720896
go_memstats_sys_bytes 1.0066168e+07
```

* cache pid until it stops working
2018-03-13 15:42:43 -07:00

871 lines
24 KiB
Go

package server
import (
"bufio"
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"net"
"net/http"
"os"
"path"
"path/filepath"
"reflect"
"strconv"
"strings"
"syscall"
"github.com/fnproject/fn/api/agent"
"github.com/fnproject/fn/api/agent/hybrid"
agent_grpc "github.com/fnproject/fn/api/agent/nodepool/grpc"
"github.com/fnproject/fn/api/common"
"github.com/fnproject/fn/api/datastore"
"github.com/fnproject/fn/api/id"
"github.com/fnproject/fn/api/logs"
"github.com/fnproject/fn/api/models"
"github.com/fnproject/fn/api/mqs"
"github.com/fnproject/fn/api/version"
"github.com/fnproject/fn/fnext"
"github.com/gin-gonic/gin"
zipkinhttp "github.com/openzipkin/zipkin-go/reporter/http"
promclient "github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
"go.opencensus.io/exporter/prometheus"
"go.opencensus.io/exporter/zipkin"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/stats/view"
"go.opencensus.io/trace"
)
const (
EnvLogLevel = "FN_LOG_LEVEL"
EnvLogDest = "FN_LOG_DEST"
EnvLogPrefix = "FN_LOG_PREFIX"
EnvMQURL = "FN_MQ_URL"
EnvDBURL = "FN_DB_URL"
EnvLOGDBURL = "FN_LOGSTORE_URL"
EnvRunnerURL = "FN_RUNNER_API_URL"
EnvNPMAddress = "FN_NPM_ADDRESS"
EnvRunnerAddresses = "FN_RUNNER_ADDRESSES"
EnvLBPlacementAlg = "FN_PLACER"
EnvNodeType = "FN_NODE_TYPE"
EnvPort = "FN_PORT" // be careful, Gin expects this variable to be "port"
EnvGRPCPort = "FN_GRPC_PORT"
EnvAPICORS = "FN_API_CORS"
EnvZipkinURL = "FN_ZIPKIN_URL"
// Certificates to communicate with other FN nodes
EnvCert = "FN_NODE_CERT"
EnvCertKey = "FN_NODE_CERT_KEY"
EnvCertAuth = "FN_NODE_CERT_AUTHORITY"
// Defaults
DefaultLogLevel = "info"
DefaultLogDest = "stderr"
DefaultPort = 8080
DefaultGRPCPort = 9190
)
type ServerNodeType int32
const (
ServerTypeFull ServerNodeType = iota
ServerTypeAPI
ServerTypeLB
ServerTypeRunner
ServerTypePureRunner
)
func (s ServerNodeType) String() string {
switch s {
default:
return "full"
case ServerTypeAPI:
return "api"
case ServerTypeLB:
return "lb"
case ServerTypeRunner:
return "runner"
case ServerTypePureRunner:
return "pure-runner"
}
}
type Server struct {
// TODO this one maybe we have `AddRoute` in extensions?
Router *gin.Engine
webListenPort int
grpcListenPort int
agent agent.Agent
datastore models.Datastore
mq models.MessageQueue
logstore models.LogStore
nodeType ServerNodeType
cert string
certKey string
certAuthority string
appListeners *appListeners
rootMiddlewares []fnext.Middleware
apiMiddlewares []fnext.Middleware
promExporter *prometheus.Exporter
// Extensions can append to this list of contexts so that cancellations are properly handled.
extraCtxs []context.Context
}
func nodeTypeFromString(value string) ServerNodeType {
switch value {
case "api":
return ServerTypeAPI
case "lb":
return ServerTypeLB
case "runner":
return ServerTypeRunner
case "pure-runner":
return ServerTypePureRunner
default:
return ServerTypeFull
}
}
// NewFromEnv creates a new Functions server based on env vars.
func NewFromEnv(ctx context.Context, opts ...ServerOption) *Server {
curDir := pwd()
var defaultDB, defaultMQ string
nodeType := nodeTypeFromString(getEnv(EnvNodeType, "")) // default to full
switch nodeType {
case ServerTypeLB: // nothing
case ServerTypeRunner: // nothing
case ServerTypePureRunner: // nothing
default:
// only want to activate these for full and api nodes
defaultDB = fmt.Sprintf("sqlite3://%s/data/fn.db", curDir)
defaultMQ = fmt.Sprintf("bolt://%s/data/fn.mq", curDir)
}
opts = append(opts, WithWebPort(getEnvInt(EnvPort, DefaultPort)))
opts = append(opts, WithGRPCPort(getEnvInt(EnvGRPCPort, DefaultGRPCPort)))
opts = append(opts, WithLogLevel(getEnv(EnvLogLevel, DefaultLogLevel)))
opts = append(opts, WithLogDest(getEnv(EnvLogDest, DefaultLogDest), getEnv(EnvLogPrefix, "")))
opts = append(opts, WithTracer(getEnv(EnvZipkinURL, ""))) // do this early on, so below can use these
opts = append(opts, WithDBURL(getEnv(EnvDBURL, defaultDB)))
opts = append(opts, WithMQURL(getEnv(EnvMQURL, defaultMQ)))
opts = append(opts, WithLogURL(getEnv(EnvLOGDBURL, "")))
opts = append(opts, WithRunnerURL(getEnv(EnvRunnerURL, "")))
opts = append(opts, WithType(nodeType))
opts = append(opts, WithNodeCert(getEnv(EnvCert, "")))
opts = append(opts, WithNodeCertKey(getEnv(EnvCertKey, "")))
opts = append(opts, WithNodeCertAuthority(getEnv(EnvCertAuth, "")))
// Agent handling depends on node type and several other options so it must be the last processed option.
// Also we only need to create an agent if this is not an API node.
if nodeType != ServerTypeAPI {
opts = append(opts, WithAgentFromEnv())
} else {
opts = append(opts, WithLogstoreFromDatastore())
}
return New(ctx, opts...)
}
func pwd() string {
cwd, err := os.Getwd()
if err != nil {
logrus.WithError(err).Fatalln("couldn't get working directory, possibly unsupported platform?")
}
// Replace forward slashes in case this is windows, URL parser errors
return strings.Replace(cwd, "\\", "/", -1)
}
func WithWebPort(port int) ServerOption {
return func(ctx context.Context, s *Server) error {
s.webListenPort = port
return nil
}
}
func WithGRPCPort(port int) ServerOption {
return func(ctx context.Context, s *Server) error {
s.grpcListenPort = port
return nil
}
}
func WithLogLevel(ll string) ServerOption {
return func(ctx context.Context, s *Server) error {
common.SetLogLevel(ll)
return nil
}
}
func WithLogDest(dst, prefix string) ServerOption {
return func(ctx context.Context, s *Server) error {
common.SetLogDest(dst, prefix)
return nil
}
}
func WithDBURL(dbURL string) ServerOption {
return func(ctx context.Context, s *Server) error {
if dbURL != "" {
ds, err := datastore.New(ctx, dbURL)
if err != nil {
return err
}
s.datastore = ds
}
return nil
}
}
func WithMQURL(mqURL string) ServerOption {
return func(ctx context.Context, s *Server) error {
if mqURL != "" {
mq, err := mqs.New(mqURL)
if err != nil {
return err
}
s.mq = mq
}
return nil
}
}
func WithLogURL(logstoreURL string) ServerOption {
return func(ctx context.Context, s *Server) error {
if ldb := logstoreURL; ldb != "" {
logDB, err := logs.New(ctx, logstoreURL)
if err != nil {
return err
}
s.logstore = logDB
}
return nil
}
}
func WithRunnerURL(runnerURL string) ServerOption {
return func(ctx context.Context, s *Server) error {
if runnerURL != "" {
cl, err := hybrid.NewClient(runnerURL)
if err != nil {
return err
}
s.agent = agent.New(agent.NewCachedDataAccess(cl))
}
return nil
}
}
func WithType(t ServerNodeType) ServerOption {
return func(ctx context.Context, s *Server) error {
s.nodeType = t
return nil
}
}
func WithNodeCert(cert string) ServerOption {
return func(ctx context.Context, s *Server) error {
if cert != "" {
abscert, err := filepath.Abs(cert)
if err != nil {
return fmt.Errorf("Unable to resolve %v: please specify a valid and readable cert file", cert)
}
_, err = os.Stat(abscert)
if err != nil {
return fmt.Errorf("Cannot stat %v: please specify a valid and readable cert file", abscert)
}
s.cert = abscert
}
return nil
}
}
func WithNodeCertKey(key string) ServerOption {
return func(ctx context.Context, s *Server) error {
if key != "" {
abskey, err := filepath.Abs(key)
if err != nil {
return fmt.Errorf("Unable to resolve %v: please specify a valid and readable cert key file", key)
}
_, err = os.Stat(abskey)
if err != nil {
return fmt.Errorf("Cannot stat %v: please specify a valid and readable cert key file", abskey)
}
s.certKey = abskey
}
return nil
}
}
func WithNodeCertAuthority(ca string) ServerOption {
return func(ctx context.Context, s *Server) error {
if ca != "" {
absca, err := filepath.Abs(ca)
if err != nil {
return fmt.Errorf("Unable to resolve %v: please specify a valid and readable cert authority file", ca)
}
_, err = os.Stat(absca)
if err != nil {
return fmt.Errorf("Cannot stat %v: please specify a valid and readable cert authority file", absca)
}
s.certAuthority = absca
}
return nil
}
}
func WithDatastore(ds models.Datastore) ServerOption {
return func(ctx context.Context, s *Server) error {
s.datastore = ds
return nil
}
}
func WithMQ(mq models.MessageQueue) ServerOption {
return func(ctx context.Context, s *Server) error {
s.mq = mq
return nil
}
}
func WithLogstore(ls models.LogStore) ServerOption {
return func(ctx context.Context, s *Server) error {
s.logstore = ls
return nil
}
}
func WithAgent(agent agent.Agent) ServerOption {
return func(ctx context.Context, s *Server) error {
s.agent = agent
return nil
}
}
func WithLogstoreFromDatastore() ServerOption {
return func(ctx context.Context, s *Server) error {
if s.datastore == nil {
return errors.New("Need a datastore in order to use it as a logstore")
}
if s.logstore == nil {
s.logstore = s.datastore
}
return nil
}
}
// WithFullAgent is a shorthand for WithAgent(... create a full agent here ...)
func WithFullAgent() ServerOption {
return func(ctx context.Context, s *Server) error {
s.nodeType = ServerTypeFull
if s.logstore == nil {
s.logstore = s.datastore
}
if s.datastore == nil || s.logstore == nil || s.mq == nil {
return errors.New("Full nodes must configure FN_DB_URL, FN_LOG_URL, FN_MQ_URL.")
}
s.agent = agent.New(agent.NewCachedDataAccess(agent.NewDirectDataAccess(s.datastore, s.logstore, s.mq)))
return nil
}
}
// WithAgentFromEnv must be provided as the last server option because it relies
// on all other options being set first.
func WithAgentFromEnv() ServerOption {
return func(ctx context.Context, s *Server) error {
switch s.nodeType {
case ServerTypeAPI:
return errors.New("Should not initialize an agent for an Fn API node.")
case ServerTypeRunner:
runnerURL := getEnv(EnvRunnerURL, "")
if runnerURL == "" {
return errors.New("No FN_RUNNER_API_URL provided for an Fn Runner node.")
}
cl, err := hybrid.NewClient(runnerURL)
if err != nil {
return err
}
s.agent = agent.New(agent.NewCachedDataAccess(cl))
case ServerTypePureRunner:
if s.datastore != nil {
return errors.New("Pure runner nodes must not be configured with a datastore (FN_DB_URL).")
}
if s.mq != nil {
return errors.New("Pure runner nodes must not be configured with a message queue (FN_MQ_URL).")
}
ds, err := hybrid.NewNopDataStore()
if err != nil {
return err
}
grpcAddr := fmt.Sprintf(":%d", s.grpcListenPort)
delegatedAgent := agent.NewSyncOnly(agent.NewCachedDataAccess(ds))
cancelCtx, cancel := context.WithCancel(ctx)
prAgent, err := agent.NewPureRunner(cancel, grpcAddr, delegatedAgent, s.cert, s.certKey, s.certAuthority)
if err != nil {
return err
}
s.agent = prAgent
s.extraCtxs = append(s.extraCtxs, cancelCtx)
case ServerTypeLB:
s.nodeType = ServerTypeLB
runnerURL := getEnv(EnvRunnerURL, "")
if runnerURL == "" {
return errors.New("No FN_RUNNER_API_URL provided for an Fn NuLB node.")
}
if s.datastore != nil {
return errors.New("NuLB nodes must not be configured with a datastore (FN_DB_URL).")
}
if s.mq != nil {
return errors.New("NuLB nodes must not be configured with a message queue (FN_MQ_URL).")
}
cl, err := hybrid.NewClient(runnerURL)
if err != nil {
return err
}
delegatedAgent := agent.New(agent.NewCachedDataAccess(cl))
npmAddress := getEnv(EnvNPMAddress, "")
runnerAddresses := getEnv(EnvRunnerAddresses, "")
var nodePool agent.NodePool
if npmAddress != "" {
// TODO refactor DefaultgRPCNodePool as an extension
nodePool = agent_grpc.DefaultgRPCNodePool(npmAddress, s.cert, s.certKey, s.certAuthority)
} else if runnerAddresses != "" {
nodePool = agent_grpc.DefaultStaticNodePool(strings.Split(runnerAddresses, ","))
} else {
return errors.New("Must provide either FN_NPM_ADDRESS or FN_RUNNER_ADDRESSES for an Fn NuLB node.")
}
// Select the placement algorithm
var placer agent.Placer
switch getEnv(EnvLBPlacementAlg, "") {
case "ch":
placer = agent.NewCHPlacer()
default:
placer = agent.NewNaivePlacer()
}
s.agent, err = agent.NewLBAgent(delegatedAgent, nodePool, placer)
if err != nil {
return errors.New("LBAgent creation failed")
}
default:
s.nodeType = ServerTypeFull
if s.logstore == nil { // TODO seems weird?
s.logstore = s.datastore
}
if s.datastore == nil || s.logstore == nil || s.mq == nil {
return errors.New("Full nodes must configure FN_DB_URL, FN_LOG_URL, FN_MQ_URL.")
}
s.agent = agent.New(agent.NewCachedDataAccess(agent.NewDirectDataAccess(s.datastore, s.logstore, s.mq)))
}
return nil
}
}
// WithExtraCtx appends a context to the list of contexts the server will watch for cancellations / errors / signals.
func WithExtraCtx(extraCtx context.Context) ServerOption {
return func(ctx context.Context, s *Server) error {
s.extraCtxs = append(s.extraCtxs, extraCtx)
return nil
}
}
// New creates a new Functions server with the opts given. For convenience, users may
// prefer to use NewFromEnv but New is more flexible if needed.
func New(ctx context.Context, opts ...ServerOption) *Server {
ctx, span := trace.StartSpan(ctx, "server_init")
defer span.End()
log := common.Logger(ctx)
s := &Server{
Router: gin.New(),
// Add default ports
webListenPort: DefaultPort,
grpcListenPort: DefaultGRPCPort,
// Almost everything else is configured through opts (see NewFromEnv for ex.) or below
}
for _, opt := range opts {
if opt == nil {
continue
}
err := opt(ctx, s)
if err != nil {
log.WithError(err).Fatal("Error during server opt initialization.")
}
}
// Check that WithAgent options have been processed correctly.
switch s.nodeType {
case ServerTypeAPI:
if s.agent != nil {
log.Fatal("Incorrect configuration, API nodes must not have an agent initialized.")
}
default:
if s.agent == nil {
log.Fatal("Incorrect configuration, non-API nodes must have an agent initialized.")
}
}
setMachineID()
s.Router.Use(loggerWrap, traceWrap, panicWrap) // TODO should be opts
optionalCorsWrap(s.Router) // TODO should be an opt
s.bindHandlers(ctx)
s.appListeners = new(appListeners)
s.datastore = fnext.NewDatastore(s.datastore, s.appListeners)
return s
}
// TODO this should be a 'plugin' most likely
func WithTracer(zipkinURL string) ServerOption {
return func(ctx context.Context, s *Server) error {
var (
// TODO add server identifier to this crap
//debugMode = false
//serviceName = "fnserver"
//serviceHostPort = "localhost:8080" // meh
zipkinHTTPEndpoint = zipkinURL
// ex: "http://zipkin:9411/api/v2/spans"
)
if zipkinHTTPEndpoint != "" {
reporter := zipkinhttp.NewReporter(zipkinURL, zipkinhttp.MaxBacklog(10000))
exporter := zipkin.NewExporter(reporter, nil)
trace.RegisterExporter(exporter)
logrus.WithFields(logrus.Fields{"url": zipkinHTTPEndpoint}).Info("exporting spans to zipkin")
// TODO don't do this. testing parity.
trace.SetDefaultSampler(trace.AlwaysSample())
}
reg := promclient.NewRegistry()
reg.MustRegister(promclient.NewProcessCollector(os.Getpid(), "fn"),
promclient.NewProcessCollectorPIDFn(dockerPid(), "dockerd"),
promclient.NewGoCollector(),
)
exporter, err := prometheus.NewExporter(prometheus.Options{
Namespace: "fn",
Registry: reg,
OnError: func(err error) { logrus.WithError(err).Error("opencensus prometheus exporter err") },
})
if err != nil {
logrus.Fatal(err)
}
s.promExporter = exporter
view.RegisterExporter(exporter)
return nil
}
}
// TODO plumbing considerations, we've put the S pipe next to the chandalier...
func dockerPid() func() (int, error) {
// prometheus' process collector only works on linux anyway. let them do the
// process detection, if we return an error here we just get 0 metrics and it
// does not log / blow up (that's fine!) it's also likely we hit permissions
// errors here for many installations, we want to do similar and ignore (we
// just want for prod).
var pid int
return func() (int, error) {
if pid != 0 {
// make sure it's docker pid.
if isDockerPid("/proc/" + strconv.Itoa(pid) + "/status") {
return pid, nil
}
pid = 0 // reset to go search
}
err := filepath.Walk("/proc", func(path string, info os.FileInfo, err error) error {
if err != nil || pid != 0 {
// we get permission errors digging around in here, ignore them and press on
return nil
}
// /proc/<pid>/status
if strings.Count(path, "/") == 3 && strings.Contains(path, "/status") {
if isDockerPid(path) {
// extract pid from path
pid, _ = strconv.Atoi(path[6:strings.LastIndex(path, "/")])
return io.EOF // end the search
}
}
// keep searching
return nil
})
if err == io.EOF { // used as sentinel
err = nil
}
return pid, err
}
}
func isDockerPid(path string) bool {
// first line of status file is: "Name: <name>"
f, err := os.Open(path)
if err != nil {
return false
}
defer f.Close()
// scan first line only
scanner := bufio.NewScanner(f)
scanner.Scan()
return strings.HasSuffix(scanner.Text(), "dockerd")
}
func setMachineID() {
port := uint16(getEnvInt(EnvPort, DefaultPort))
addr := whoAmI().To4()
if addr == nil {
addr = net.ParseIP("127.0.0.1").To4()
logrus.Warn("could not find non-local ipv4 address to use, using '127.0.0.1' for ids, if this is a cluster beware of duplicate ids!")
}
id.SetMachineIdHost(addr, port)
}
// whoAmI searches for a non-local address on any network interface, returning
// the first one it finds. it could be expanded to search eth0 or en0 only but
// to date this has been unnecessary.
func whoAmI() net.IP {
ints, _ := net.Interfaces()
for _, i := range ints {
if i.Name == "docker0" || i.Name == "lo" {
// not perfect
continue
}
addrs, _ := i.Addrs()
for _, a := range addrs {
ip, _, err := net.ParseCIDR(a.String())
if a.Network() == "ip+net" && err == nil && ip.To4() != nil {
if !bytes.Equal(ip, net.ParseIP("127.0.0.1")) {
return ip
}
}
}
}
return nil
}
func extractFields(c *gin.Context) logrus.Fields {
fields := logrus.Fields{"action": path.Base(c.HandlerName())}
for _, param := range c.Params {
fields[param.Key] = param.Value
}
return fields
}
func (s *Server) Start(ctx context.Context) {
newctx, cancel := contextWithSignal(ctx, os.Interrupt, syscall.SIGTERM)
s.startGears(newctx, cancel)
}
func (s *Server) startGears(ctx context.Context, cancel context.CancelFunc) {
// By default it serves on :8080 unless a
// FN_PORT environment variable was defined.
listen := fmt.Sprintf(":%d", s.webListenPort)
const runHeader = `
______
/ ____/___
/ /_ / __ \
/ __/ / / / /
/_/ /_/ /_/`
fmt.Println(runHeader)
fmt.Printf(" v%s\n\n", version.Version)
logrus.WithField("type", s.nodeType).Infof("Fn serving on `%v`", listen)
installChildReaper()
server := http.Server{
Addr: listen,
Handler: &ochttp.Handler{Handler: s.Router},
// TODO we should set read/write timeouts
}
go func() {
err := server.ListenAndServe()
if err != nil && err != http.ErrServerClosed {
logrus.WithError(err).Error("server error")
cancel()
} else {
logrus.Info("server stopped")
}
}()
// listening for signals or listener errors or cancellations on all registered contexts.
s.extraCtxs = append(s.extraCtxs, ctx)
cases := make([]reflect.SelectCase, len(s.extraCtxs))
for i, ctx := range s.extraCtxs {
cases[i] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(ctx.Done())}
}
nth, recv, wasSend := reflect.Select(cases)
if wasSend {
logrus.WithFields(logrus.Fields{
"ctxNumber": nth,
"receivedValue": recv.String(),
}).Debug("Stopping because of received value from done context.")
} else {
logrus.WithFields(logrus.Fields{
"ctxNumber": nth,
}).Debug("Stopping because of closed channel from done context.")
}
// TODO: do not wait forever during graceful shutdown (add graceful shutdown timeout)
if err := server.Shutdown(context.Background()); err != nil {
logrus.WithError(err).Error("server shutdown error")
}
if s.agent != nil {
err := s.agent.Close() // after we stop taking requests, wait for all tasks to finish
if err != nil {
logrus.WithError(err).Error("Fail to close the agent")
}
}
}
func (s *Server) bindHandlers(ctx context.Context) {
engine := s.Router
// now for extendible middleware
engine.Use(s.rootMiddlewareWrapper())
engine.GET("/", handlePing)
engine.GET("/version", handleVersion)
// TODO: move under v1 ?
if s.promExporter != nil {
engine.GET("/metrics", gin.WrapH(s.promExporter))
}
profilerSetup(engine, "/debug")
// Pure runners don't have any route, they have grpc
if s.nodeType != ServerTypePureRunner {
if s.nodeType != ServerTypeRunner {
v1 := engine.Group("/v1")
v1.Use(setAppNameInCtx)
v1.Use(s.apiMiddlewareWrapper())
v1.GET("/apps", s.handleAppList)
v1.POST("/apps", s.handleAppCreate)
{
apps := v1.Group("/apps/:app")
apps.Use(appNameCheck)
apps.GET("", s.handleAppGet)
apps.PATCH("", s.handleAppUpdate)
apps.DELETE("", s.handleAppDelete)
apps.GET("/routes", s.handleRouteList)
apps.POST("/routes", s.handleRoutesPostPutPatch)
apps.GET("/routes/:route", s.handleRouteGet)
apps.PATCH("/routes/*route", s.handleRoutesPostPutPatch)
apps.PUT("/routes/*route", s.handleRoutesPostPutPatch)
apps.DELETE("/routes/*route", s.handleRouteDelete)
apps.GET("/calls", s.handleCallList)
apps.GET("/calls/:call", s.handleCallGet)
apps.GET("/calls/:call/log", s.handleCallLogGet)
}
{
runner := v1.Group("/runner")
runner.PUT("/async", s.handleRunnerEnqueue)
runner.GET("/async", s.handleRunnerDequeue)
runner.POST("/start", s.handleRunnerStart)
runner.POST("/finish", s.handleRunnerFinish)
}
}
if s.nodeType != ServerTypeAPI {
runner := engine.Group("/r")
runner.Use(appNameCheck)
runner.Any("/:app", s.handleFunctionCall)
runner.Any("/:app/*route", s.handleFunctionCall)
}
}
engine.NoRoute(func(c *gin.Context) {
var err error
switch {
case s.nodeType == ServerTypeAPI && strings.HasPrefix(c.Request.URL.Path, "/r/"):
err = models.ErrInvokeNotSupported
case s.nodeType == ServerTypeRunner && strings.HasPrefix(c.Request.URL.Path, "/v1/"):
err = models.ErrAPINotSupported
default:
var e models.APIError = models.ErrPathNotFound
err = models.NewAPIError(e.Code(), fmt.Errorf("%v: %s", e.Error(), c.Request.URL.Path))
}
handleErrorResponse(c, err)
})
}
// implements fnext.ExtServer
func (s *Server) Datastore() models.Datastore {
return s.datastore
}
// returns the unescaped ?cursor and ?perPage values
// pageParams clamps 0 < ?perPage <= 100 and defaults to 30 if 0
// ignores parsing errors and falls back to defaults.
func pageParams(c *gin.Context, base64d bool) (cursor string, perPage int) {
cursor = c.Query("cursor")
if base64d {
cbytes, _ := base64.RawURLEncoding.DecodeString(cursor)
cursor = string(cbytes)
}
perPage, _ = strconv.Atoi(c.Query("per_page"))
if perPage > 100 {
perPage = 100
} else if perPage <= 0 {
perPage = 30
}
return cursor, perPage
}
type appResponse struct {
Message string `json:"message"`
App *models.App `json:"app"`
}
type appsResponse struct {
Message string `json:"message"`
NextCursor string `json:"next_cursor"`
Apps []*models.App `json:"apps"`
}
type routeResponse struct {
Message string `json:"message"`
Route *models.Route `json:"route"`
}
type routesResponse struct {
Message string `json:"message"`
NextCursor string `json:"next_cursor"`
Routes []*models.Route `json:"routes"`
}
type callResponse struct {
Message string `json:"message"`
Call *models.Call `json:"call"`
}
type callsResponse struct {
Message string `json:"message"`
NextCursor string `json:"next_cursor"`
Calls []*models.Call `json:"calls"`
}