all: rename hot containers to hot functions (#465)

This commit is contained in:
C Cirello
2016-12-29 20:07:41 +01:00
committed by GitHub
parent e968576d96
commit 2f0520521c
12 changed files with 73 additions and 73 deletions

View File

@@ -292,7 +292,7 @@ These are the high level roadmap goals. See [milestones](https://github.com/iron
* Initial release of base framework * Initial release of base framework
* Lambda support * Lambda support
* Alpha 2 - December 2016 * Alpha 2 - December 2016
* Streaming input for hot containers #214 * Streaming input for hot functions #214
* Logging endpoint(s) for per function debugging #263 * Logging endpoint(s) for per function debugging #263
* Beta 1 - January 2017 * Beta 1 - January 2017
* Smart Load Balancer #151 * Smart Load Balancer #151

View File

@@ -11,7 +11,7 @@ import (
var errInvalidProtocol = errors.New("Invalid Protocol") var errInvalidProtocol = errors.New("Invalid Protocol")
// ContainerIO defines the interface used to talk to a hot container. // ContainerIO defines the interface used to talk to a hot function.
// Internally, a protocol must know when to alternate between stdin and stdout. // Internally, a protocol must know when to alternate between stdin and stdout.
// It returns any protocol error, if present. // It returns any protocol error, if present.
type ContainerIO interface { type ContainerIO interface {
@@ -22,7 +22,7 @@ type ContainerIO interface {
// Protocol defines all protocols that operates a ContainerIO. // Protocol defines all protocols that operates a ContainerIO.
type Protocol string type Protocol string
// Hot container protocols // hot function protocols
const ( const (
Default Protocol = models.FormatDefault Default Protocol = models.FormatDefault
HTTP Protocol = models.FormatHTTP HTTP Protocol = models.FormatHTTP
@@ -43,7 +43,7 @@ func New(p Protocol, in io.Writer, out io.Reader) (ContainerIO, error) {
} }
// IsStreamable says whether the given protocol can be used for streaming into // IsStreamable says whether the given protocol can be used for streaming into
// hot containers. // hot functions.
func IsStreamable(p string) (bool, error) { func IsStreamable(p string) (bool, error) {
proto, err := New(Protocol(p), nil, nil) proto, err := New(Protocol(p), nil, nil)
if err != nil { if err != nil {

View File

@@ -14,26 +14,26 @@ import (
"github.com/iron-io/runner/drivers" "github.com/iron-io/runner/drivers"
) )
// Hot containers - theory of operation // hot functions - theory of operation
// //
// A function is converted into a hot container if its `Format` is either // A function is converted into a hot function if its `Format` is either
// a streamable format/protocol. At the very first task request a hot // a streamable format/protocol. At the very first task request a hot
// container shall be started and run it. Each hot container has an internal // container shall be started and run it. Each hot function has an internal
// clock that actually halts the container if it goes idle long enough. In the // clock that actually halts the container if it goes idle long enough. In the
// absence of workload, it just stops the whole clockwork. // absence of workload, it just stops the whole clockwork.
// //
// Internally, the hot container uses a modified Config whose Stdin and Stdout // Internally, the hot function uses a modified Config whose Stdin and Stdout
// are bound to an internal pipe. This internal pipe is fed with incoming tasks // are bound to an internal pipe. This internal pipe is fed with incoming tasks
// Stdin and feeds incoming tasks with Stdout. // Stdin and feeds incoming tasks with Stdout.
// //
// Each execution is the alternation of feeding hot containers stdin with tasks // Each execution is the alternation of feeding hot functions stdin with tasks
// stdin, and reading the answer back from containers stdout. For all `Format`s // stdin, and reading the answer back from containers stdout. For all `Format`s
// we send embedded into the message metadata to help the container to know when // we send embedded into the message metadata to help the container to know when
// to stop reading from its stdin and Functions expect the container to do the // to stop reading from its stdin and Functions expect the container to do the
// same. Refer to api/runner/protocol.go for details of these communications. // same. Refer to api/runner/protocol.go for details of these communications.
// //
// Hot Containers implementation relies in two moving parts (drawn below): // hot functions implementation relies in two moving parts (drawn below):
// htcntrmgr and htcntr. Refer to their respective comments for // htfnmgr and htfn. Refer to their respective comments for
// details. // details.
// │ // │
// Incoming // Incoming
@@ -55,15 +55,15 @@ import (
// ▼ ▼ ▼ ▼ // ▼ ▼ ▼ ▼
// ┌───────────┐ ┌───────────┐ ┌───────────┐ ┌───────────┐ // ┌───────────┐ ┌───────────┐ ┌───────────┐ ┌───────────┐
// │ Hot │ │ Hot │ │ Hot │ │ Cold │ // │ Hot │ │ Hot │ │ Hot │ │ Cold │
// │ Container │ │ Container │ │ Container │ │ Container // │ Function │ │ Function │ │ Function │ │ Function
// └───────────┘ └───────────┘ └───────────┘ └───────────┘ // └───────────┘ └───────────┘ └───────────┘ └───────────┘
// Timeout // Timeout
// Terminate // Terminate
// (internal clock) // (internal clock)
const ( const (
// Terminate hot container after this timeout // Terminate hot function after this timeout
htcntrScaleDownTimeout = 30 * time.Second htfnScaleDownTimeout = 30 * time.Second
) )
// RunTask helps sending a task.Request into the common concurrency stream. // RunTask helps sending a task.Request into the common concurrency stream.
@@ -78,11 +78,11 @@ func RunTask(tasks chan task.Request, ctx context.Context, cfg *task.Config) (dr
// StartWorkers operates the common concurrency stream, ie, it will process all // StartWorkers operates the common concurrency stream, ie, it will process all
// IronFunctions tasks, either sync or async. In the process, it also dispatches // IronFunctions tasks, either sync or async. In the process, it also dispatches
// the workload to either regular or hot containers. // the workload to either regular or hot functions.
func StartWorkers(ctx context.Context, rnr *Runner, tasks <-chan task.Request) { func StartWorkers(ctx context.Context, rnr *Runner, tasks <-chan task.Request) {
var wg sync.WaitGroup var wg sync.WaitGroup
defer wg.Wait() defer wg.Wait()
var hcmgr htcntrmgr var hcmgr htfnmgr
for { for {
select { select {
@@ -105,15 +105,15 @@ func StartWorkers(ctx context.Context, rnr *Runner, tasks <-chan task.Request) {
} }
} }
// htcntrmgr is the intermediate between the common concurrency stream and // htfnmgr is the intermediate between the common concurrency stream and
// hot containers. All hot containers share a single task.Request stream per // hot functions. All hot functions share a single task.Request stream per
// function (chn), but each function may have more than one hot container (hc). // function (chn), but each function may have more than one hot function (hc).
type htcntrmgr struct { type htfnmgr struct {
chn map[string]chan task.Request chn map[string]chan task.Request
hc map[string]*htcntrsvr hc map[string]*htfnsvr
} }
func (h *htcntrmgr) getPipe(ctx context.Context, rnr *Runner, cfg *task.Config) chan task.Request { func (h *htfnmgr) getPipe(ctx context.Context, rnr *Runner, cfg *task.Config) chan task.Request {
isStream, err := protocol.IsStreamable(cfg.Format) isStream, err := protocol.IsStreamable(cfg.Format)
if err != nil { if err != nil {
logrus.WithError(err).Info("could not detect container IO protocol") logrus.WithError(err).Info("could not detect container IO protocol")
@@ -124,7 +124,7 @@ func (h *htcntrmgr) getPipe(ctx context.Context, rnr *Runner, cfg *task.Config)
if h.chn == nil { if h.chn == nil {
h.chn = make(map[string]chan task.Request) h.chn = make(map[string]chan task.Request)
h.hc = make(map[string]*htcntrsvr) h.hc = make(map[string]*htfnsvr)
} }
// TODO(ccirello): re-implement this without memory allocation (fmt.Sprint) // TODO(ccirello): re-implement this without memory allocation (fmt.Sprint)
@@ -133,9 +133,9 @@ func (h *htcntrmgr) getPipe(ctx context.Context, rnr *Runner, cfg *task.Config)
if !ok { if !ok {
h.chn[fn] = make(chan task.Request) h.chn[fn] = make(chan task.Request)
tasks = h.chn[fn] tasks = h.chn[fn]
svr := newhtcntrsvr(ctx, cfg, rnr, tasks) svr := newhtfnsvr(ctx, cfg, rnr, tasks)
if err := svr.launch(ctx); err != nil { if err := svr.launch(ctx); err != nil {
logrus.WithError(err).Error("cannot start hot container supervisor") logrus.WithError(err).Error("cannot start hot function supervisor")
return nil return nil
} }
h.hc[fn] = svr h.hc[fn] = svr
@@ -144,11 +144,11 @@ func (h *htcntrmgr) getPipe(ctx context.Context, rnr *Runner, cfg *task.Config)
return tasks return tasks
} }
// htcntrsvr is part of htcntrmgr, abstracted apart for simplicity, its only // htfnsvr is part of htfnmgr, abstracted apart for simplicity, its only
// purpose is to test for hot containers saturation and try starting as many as // purpose is to test for hot functions saturation and try starting as many as
// needed. In case of absence of workload, it will stop trying to start new hot // needed. In case of absence of workload, it will stop trying to start new hot
// containers. // containers.
type htcntrsvr struct { type htfnsvr struct {
cfg *task.Config cfg *task.Config
rnr *Runner rnr *Runner
tasksin <-chan task.Request tasksin <-chan task.Request
@@ -156,8 +156,8 @@ type htcntrsvr struct {
maxc chan struct{} maxc chan struct{}
} }
func newhtcntrsvr(ctx context.Context, cfg *task.Config, rnr *Runner, tasks <-chan task.Request) *htcntrsvr { func newhtfnsvr(ctx context.Context, cfg *task.Config, rnr *Runner, tasks <-chan task.Request) *htfnsvr {
svr := &htcntrsvr{ svr := &htfnsvr{
cfg: cfg, cfg: cfg,
rnr: rnr, rnr: rnr,
tasksin: tasks, tasksin: tasks,
@@ -166,23 +166,23 @@ func newhtcntrsvr(ctx context.Context, cfg *task.Config, rnr *Runner, tasks <-ch
} }
// This pipe will take all incoming tasks and just forward them to the // This pipe will take all incoming tasks and just forward them to the
// started hot containers. The catch here is that it feeds a buffered // started hot functions. The catch here is that it feeds a buffered
// channel from an unbuffered one. And this buffered channel is // channel from an unbuffered one. And this buffered channel is
// then used to determine the presence of running hot containers. // then used to determine the presence of running hot functions.
// If no hot container is available, tasksout will fill up to its // If no hot function is available, tasksout will fill up to its
// capacity and pipe() will start them. // capacity and pipe() will start them.
go svr.pipe(ctx) go svr.pipe(ctx)
return svr return svr
} }
func (svr *htcntrsvr) pipe(ctx context.Context) { func (svr *htfnsvr) pipe(ctx context.Context) {
for { for {
select { select {
case t := <-svr.tasksin: case t := <-svr.tasksin:
svr.tasksout <- t svr.tasksout <- t
if len(svr.tasksout) > 0 { if len(svr.tasksout) > 0 {
if err := svr.launch(ctx); err != nil { if err := svr.launch(ctx); err != nil {
logrus.WithError(err).Error("cannot start more hot containers") logrus.WithError(err).Error("cannot start more hot functions")
} }
} }
case <-ctx.Done(): case <-ctx.Done():
@@ -191,10 +191,10 @@ func (svr *htcntrsvr) pipe(ctx context.Context) {
} }
} }
func (svr *htcntrsvr) launch(ctx context.Context) error { func (svr *htfnsvr) launch(ctx context.Context) error {
select { select {
case svr.maxc <- struct{}{}: case svr.maxc <- struct{}{}:
hc, err := newhtcntr( hc, err := newhtfn(
svr.cfg, svr.cfg,
protocol.Protocol(svr.cfg.Format), protocol.Protocol(svr.cfg.Format),
svr.tasksout, svr.tasksout,
@@ -213,10 +213,10 @@ func (svr *htcntrsvr) launch(ctx context.Context) error {
return nil return nil
} }
// htcntr actually interfaces an incoming task from the common concurrency // htfn actually interfaces an incoming task from the common concurrency
// stream into a long lived container. If idle long enough, it will stop. It // stream into a long lived container. If idle long enough, it will stop. It
// uses route configuration to determine which protocol to use. // uses route configuration to determine which protocol to use.
type htcntr struct { type htfn struct {
cfg *task.Config cfg *task.Config
proto protocol.ContainerIO proto protocol.ContainerIO
tasks <-chan task.Request tasks <-chan task.Request
@@ -233,7 +233,7 @@ type htcntr struct {
rnr *Runner rnr *Runner
} }
func newhtcntr(cfg *task.Config, proto protocol.Protocol, tasks <-chan task.Request, rnr *Runner) (*htcntr, error) { func newhtfn(cfg *task.Config, proto protocol.Protocol, tasks <-chan task.Request, rnr *Runner) (*htfn, error) {
stdinr, stdinw := io.Pipe() stdinr, stdinw := io.Pipe()
stdoutr, stdoutw := io.Pipe() stdoutr, stdoutw := io.Pipe()
@@ -242,7 +242,7 @@ func newhtcntr(cfg *task.Config, proto protocol.Protocol, tasks <-chan task.Requ
return nil, err return nil, err
} }
hc := &htcntr{ hc := &htfn{
cfg: cfg, cfg: cfg,
proto: p, proto: p,
tasks: tasks, tasks: tasks,
@@ -259,14 +259,14 @@ func newhtcntr(cfg *task.Config, proto protocol.Protocol, tasks <-chan task.Requ
return hc, nil return hc, nil
} }
func (hc *htcntr) serve(ctx context.Context) { func (hc *htfn) serve(ctx context.Context) {
lctx, cancel := context.WithCancel(ctx) lctx, cancel := context.WithCancel(ctx)
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done() defer wg.Done()
for { for {
inactivity := time.After(htcntrScaleDownTimeout) inactivity := time.After(htfnScaleDownTimeout)
select { select {
case <-lctx.Done(): case <-lctx.Done():
@@ -309,10 +309,10 @@ func (hc *htcntr) serve(ctx context.Context) {
// Stderr, on the other hand, can be written by anything any time: // Stderr, on the other hand, can be written by anything any time:
// failure between requests, failures inside requests and messages send // failure between requests, failures inside requests and messages send
// right after stdout has been finished being transmitted. Thus, with // right after stdout has been finished being transmitted. Thus, with
// hot containers, there is not a 1:1 relation between stderr and tasks. // hot functions, there is not a 1:1 relation between stderr and tasks.
// //
// Still, we do pass - at protocol level - a Task-ID header, from which // Still, we do pass - at protocol level - a Task-ID header, from which
// the application running inside the hot container can use to identify // the application running inside the hot function can use to identify
// its own stderr output. // its own stderr output.
errr, errw := io.Pipe() errr, errw := io.Pipe()
cfg.Stderr = errw cfg.Stderr = errw
@@ -334,12 +334,12 @@ func (hc *htcntr) serve(ctx context.Context) {
result, err := hc.rnr.Run(lctx, &cfg) result, err := hc.rnr.Run(lctx, &cfg)
if err != nil { if err != nil {
logrus.WithError(err).Error("hot container failure detected") logrus.WithError(err).Error("hot function failure detected")
} }
cancel() cancel()
errw.Close() errw.Close()
wg.Wait() wg.Wait()
logrus.WithField("result", result).Info("hot container terminated") logrus.WithField("result", result).Info("hot function terminated")
} }
func runTaskReq(rnr *Runner, wg *sync.WaitGroup, t task.Request) { func runTaskReq(rnr *Runner, wg *sync.WaitGroup, t task.Request) {

View File

@@ -15,7 +15,7 @@ If you are a developer using IronFunctions through the API, this section is for
* [Packaging functions](packaging.md) * [Packaging functions](packaging.md)
* [Open Function Format](function-format.md) * [Open Function Format](function-format.md)
* [API Reference](http://petstore.swagger.io/?url=https://raw.githubusercontent.com/iron-io/functions/master/docs/swagger.yml) * [API Reference](http://petstore.swagger.io/?url=https://raw.githubusercontent.com/iron-io/functions/master/docs/swagger.yml)
* [Hot containers](hot-containers.md) * [Hot functions](hot-functions.md)
## For Operators ## For Operators

View File

@@ -39,7 +39,7 @@ curl -H "Content-Type: application/json" -X POST -d '{
#### name (string) #### name (string)
`name` is a property that references an unique app. `name` is a property that references an unique app.
App names are immutable. When updating apps with `PATCH` requests, keep in mind that although you App names are immutable. When updating apps with `PATCH` requests, keep in mind that although you
are able to update an app's configuration set, you cannot really rename it. are able to update an app's configuration set, you cannot really rename it.
@@ -47,7 +47,7 @@ are able to update an app's configuration set, you cannot really rename it.
#### config (object) #### config (object)
`config` is a map of values passed to the route runtime in the form of `config` is a map of values passed to the route runtime in the form of
environment variables. environment variables.
Note: Route level configuration overrides app level configuration. Note: Route level configuration overrides app level configuration.
@@ -138,16 +138,16 @@ Note: Route level configuration overrides app level configuration.
#### format (string) #### format (string)
`format` defines if the function is running or not in `hot container` mode. `format` defines if the function is running or not in `hot function` mode.
To define the function execution as `hot container` you set it as one of the following formats: To define the function execution as `hot function` you set it as one of the following formats:
- `"http"` - `"http"`
### 'Hot Container' Only Properties ### 'Hot function' Only Properties
This properties are only used if the function is in `hot container` mode This properties are only used if the function is in `hot function` mode
#### max_concurrency (string) #### max_concurrency (string)
This property defines the maximum amount of concurrent hot containers instances the function should have (per IronFunction node). This property defines the maximum amount of concurrent hot functions instances the function should have (per IronFunction node).

View File

@@ -55,17 +55,17 @@ during functions execution.
`build` (optional) is an array of local shell calls which are used to help `build` (optional) is an array of local shell calls which are used to help
building the function. building the function.
## Hot containers ## Hot functions
Hot containers support also adds two extra options to this configuration file. hot functions support also adds two extra options to this configuration file.
`format` (optional) is one of the streaming formats covered at [function-format.md](function-format.md). `format` (optional) is one of the streaming formats covered at [function-format.md](function-format.md).
`max_concurrency` (optional) is the maximum of hot containers per node to be `max_concurrency` (optional) is the maximum of hot functions per node to be
started for a certain function. It defaults to one per function. If you started for a certain function. It defaults to one per function. If you
understand you need more processing power, make sure to raise this number. understand you need more processing power, make sure to raise this number.
Keep in mind that if there is not available memory to execute the configured Keep in mind that if there is not available memory to execute the configured
workload, it will fail to start new hot containers. workload, it will fail to start new hot functions.
## Testing functions ## Testing functions

View File

@@ -1,4 +1,4 @@
# Hot containers # Hot functions
IronFunctions is built on top of container technologies, for each incoming IronFunctions is built on top of container technologies, for each incoming
workload, it spins a new container, feed it with the payload and sends the workload, it spins a new container, feed it with the payload and sends the
@@ -8,8 +8,8 @@ container. You may refer to [this blog](https://medium.com/travis-on-docker/the-
In the case you need faster start times for your function, you may use a hot In the case you need faster start times for your function, you may use a hot
container instead. container instead.
Hot containers are started once and kept alive while there is incoming workload. hot functions are started once and kept alive while there is incoming workload.
Thus, it means that once you decide to use a hot container, you must be able to Thus, it means that once you decide to use a hot function, you must be able to
tell the moment it should reading from standard input to start writing to tell the moment it should reading from standard input to start writing to
standard output. standard output.
@@ -17,9 +17,9 @@ Currently, IronFunctions implements a HTTP-like protocol to operate hot
containers, but instead of communication through a TCP/IP port, it uses standard containers, but instead of communication through a TCP/IP port, it uses standard
input/output. input/output.
## Implementing a hot container ## Implementing a hot function
In the [examples directory](https://github.com/iron-io/functions/blob/master/examples/hotcontainers/http/func.go), there is one simple implementation of a hot container In the [examples directory](https://github.com/iron-io/functions/blob/master/examples/hotcontainers/http/func.go), there is one simple implementation of a hot function
which we are going to get in the details here. which we are going to get in the details here.
The basic cycle comprises three steps: read standard input up to a previosly The basic cycle comprises three steps: read standard input up to a previosly
@@ -75,9 +75,9 @@ res.Write(os.Stdout)
Rinse and repeat for each incoming workload. Rinse and repeat for each incoming workload.
## Deploying a hot container ## Deploying a hot function
Once your functions is adapted to be handled as hot container, you must tell Once your functions is adapted to be handled as hot function, you must tell
IronFunctions daemon that this function is now ready to be reused across IronFunctions daemon that this function is now ready to be reused across
requests: requests:
@@ -99,5 +99,5 @@ requests:
`format` (mandatory) either "default" or "http". If "http", then it is a hot `format` (mandatory) either "default" or "http". If "http", then it is a hot
container. container.
`max_concurrency` (optional) - the number of simultaneous hot containers for `max_concurrency` (optional) - the number of simultaneous hot functions for
this functions. This is a per-node configuration option. Default: 1 this functions. This is a per-node configuration option. Default: 1

View File

@@ -6,7 +6,7 @@ swagger: '2.0'
info: info:
title: IronFunctions title: IronFunctions
description: The open source serverless platform. description: The open source serverless platform.
version: "0.1.28" version: "0.1.29"
# the domain of the service # the domain of the service
host: "127.0.0.1:8080" host: "127.0.0.1:8080"
# array of all schemes that your API supports # array of all schemes that your API supports
@@ -370,7 +370,7 @@ definitions:
max_concurrency: max_concurrency:
type: integer type: integer
format: int32 format: int32
description: Maximum number of hot containers concurrency description: Maximum number of hot functions concurrency
config: config:
type: object type: object
description: Route configuration - overrides application configuration description: Route configuration - overrides application configuration

View File

@@ -77,13 +77,13 @@ func initFn() cli.Command {
}, },
cli.StringFlag{ cli.StringFlag{
Name: "format", Name: "format",
Usage: "hot container IO format - json or http", Usage: "hot function IO format - json or http",
Destination: &a.format, Destination: &a.format,
Value: "", Value: "",
}, },
cli.IntFlag{ cli.IntFlag{
Name: "max-concurrency", Name: "max-concurrency",
Usage: "maximum concurrency for hot container", Usage: "maximum concurrency for hot function",
Destination: &a.maxConcurrency, Destination: &a.maxConcurrency,
Value: 1, Value: 1,
}, },

View File

@@ -66,12 +66,12 @@ func routes() cli.Command {
}, },
cli.StringFlag{ cli.StringFlag{
Name: "format,f", Name: "format,f",
Usage: "hot container IO format - json or http", Usage: "hot function IO format - json or http",
Value: "", Value: "",
}, },
cli.IntFlag{ cli.IntFlag{
Name: "max-concurrency", Name: "max-concurrency",
Usage: "maximum concurrency for hot container", Usage: "maximum concurrency for hot function",
Value: 1, Value: 1,
}, },
cli.DurationFlag{ cli.DurationFlag{