diff --git a/README.md b/README.md index 2a66bf3e9..666e53d56 100644 --- a/README.md +++ b/README.md @@ -292,7 +292,7 @@ These are the high level roadmap goals. See [milestones](https://github.com/iron * Initial release of base framework * Lambda support * Alpha 2 - December 2016 - * Streaming input for hot containers #214 + * Streaming input for hot functions #214 * Logging endpoint(s) for per function debugging #263 * Beta 1 - January 2017 * Smart Load Balancer #151 diff --git a/api/runner/protocol/factory.go b/api/runner/protocol/factory.go index abd1997f1..ae5d72a7e 100644 --- a/api/runner/protocol/factory.go +++ b/api/runner/protocol/factory.go @@ -11,7 +11,7 @@ import ( var errInvalidProtocol = errors.New("Invalid Protocol") -// ContainerIO defines the interface used to talk to a hot container. +// ContainerIO defines the interface used to talk to a hot function. // Internally, a protocol must know when to alternate between stdin and stdout. // It returns any protocol error, if present. type ContainerIO interface { @@ -22,7 +22,7 @@ type ContainerIO interface { // Protocol defines all protocols that operates a ContainerIO. type Protocol string -// Hot container protocols +// hot function protocols const ( Default Protocol = models.FormatDefault HTTP Protocol = models.FormatHTTP @@ -43,7 +43,7 @@ func New(p Protocol, in io.Writer, out io.Reader) (ContainerIO, error) { } // IsStreamable says whether the given protocol can be used for streaming into -// hot containers. +// hot functions. func IsStreamable(p string) (bool, error) { proto, err := New(Protocol(p), nil, nil) if err != nil { diff --git a/api/runner/worker.go b/api/runner/worker.go index 462c3f869..e97db5461 100644 --- a/api/runner/worker.go +++ b/api/runner/worker.go @@ -14,26 +14,26 @@ import ( "github.com/iron-io/runner/drivers" ) -// Hot containers - theory of operation +// hot functions - theory of operation // -// A function is converted into a hot container if its `Format` is either +// A function is converted into a hot function if its `Format` is either // a streamable format/protocol. At the very first task request a hot -// container shall be started and run it. Each hot container has an internal +// container shall be started and run it. Each hot function has an internal // clock that actually halts the container if it goes idle long enough. In the // absence of workload, it just stops the whole clockwork. // -// Internally, the hot container uses a modified Config whose Stdin and Stdout +// Internally, the hot function uses a modified Config whose Stdin and Stdout // are bound to an internal pipe. This internal pipe is fed with incoming tasks // Stdin and feeds incoming tasks with Stdout. // -// Each execution is the alternation of feeding hot containers stdin with tasks +// Each execution is the alternation of feeding hot functions stdin with tasks // stdin, and reading the answer back from containers stdout. For all `Format`s // we send embedded into the message metadata to help the container to know when // to stop reading from its stdin and Functions expect the container to do the // same. Refer to api/runner/protocol.go for details of these communications. // -// Hot Containers implementation relies in two moving parts (drawn below): -// htcntrmgr and htcntr. Refer to their respective comments for +// hot functions implementation relies in two moving parts (drawn below): +// htfnmgr and htfn. Refer to their respective comments for // details. // │ // Incoming @@ -55,15 +55,15 @@ import ( // ▼ ▼ ▼ ▼ // ┌───────────┐ ┌───────────┐ ┌───────────┐ ┌───────────┐ // │ Hot │ │ Hot │ │ Hot │ │ Cold │ -// │ Container │ │ Container │ │ Container │ │ Container │ +// │ Function │ │ Function │ │ Function │ │ Function │ // └───────────┘ └───────────┘ └───────────┘ └───────────┘ // Timeout // Terminate // (internal clock) const ( - // Terminate hot container after this timeout - htcntrScaleDownTimeout = 30 * time.Second + // Terminate hot function after this timeout + htfnScaleDownTimeout = 30 * time.Second ) // RunTask helps sending a task.Request into the common concurrency stream. @@ -78,11 +78,11 @@ func RunTask(tasks chan task.Request, ctx context.Context, cfg *task.Config) (dr // StartWorkers operates the common concurrency stream, ie, it will process all // IronFunctions tasks, either sync or async. In the process, it also dispatches -// the workload to either regular or hot containers. +// the workload to either regular or hot functions. func StartWorkers(ctx context.Context, rnr *Runner, tasks <-chan task.Request) { var wg sync.WaitGroup defer wg.Wait() - var hcmgr htcntrmgr + var hcmgr htfnmgr for { select { @@ -105,15 +105,15 @@ func StartWorkers(ctx context.Context, rnr *Runner, tasks <-chan task.Request) { } } -// htcntrmgr is the intermediate between the common concurrency stream and -// hot containers. All hot containers share a single task.Request stream per -// function (chn), but each function may have more than one hot container (hc). -type htcntrmgr struct { +// htfnmgr is the intermediate between the common concurrency stream and +// hot functions. All hot functions share a single task.Request stream per +// function (chn), but each function may have more than one hot function (hc). +type htfnmgr struct { chn map[string]chan task.Request - hc map[string]*htcntrsvr + hc map[string]*htfnsvr } -func (h *htcntrmgr) getPipe(ctx context.Context, rnr *Runner, cfg *task.Config) chan task.Request { +func (h *htfnmgr) getPipe(ctx context.Context, rnr *Runner, cfg *task.Config) chan task.Request { isStream, err := protocol.IsStreamable(cfg.Format) if err != nil { logrus.WithError(err).Info("could not detect container IO protocol") @@ -124,7 +124,7 @@ func (h *htcntrmgr) getPipe(ctx context.Context, rnr *Runner, cfg *task.Config) if h.chn == nil { h.chn = make(map[string]chan task.Request) - h.hc = make(map[string]*htcntrsvr) + h.hc = make(map[string]*htfnsvr) } // TODO(ccirello): re-implement this without memory allocation (fmt.Sprint) @@ -133,9 +133,9 @@ func (h *htcntrmgr) getPipe(ctx context.Context, rnr *Runner, cfg *task.Config) if !ok { h.chn[fn] = make(chan task.Request) tasks = h.chn[fn] - svr := newhtcntrsvr(ctx, cfg, rnr, tasks) + svr := newhtfnsvr(ctx, cfg, rnr, tasks) if err := svr.launch(ctx); err != nil { - logrus.WithError(err).Error("cannot start hot container supervisor") + logrus.WithError(err).Error("cannot start hot function supervisor") return nil } h.hc[fn] = svr @@ -144,11 +144,11 @@ func (h *htcntrmgr) getPipe(ctx context.Context, rnr *Runner, cfg *task.Config) return tasks } -// htcntrsvr is part of htcntrmgr, abstracted apart for simplicity, its only -// purpose is to test for hot containers saturation and try starting as many as +// htfnsvr is part of htfnmgr, abstracted apart for simplicity, its only +// purpose is to test for hot functions saturation and try starting as many as // needed. In case of absence of workload, it will stop trying to start new hot // containers. -type htcntrsvr struct { +type htfnsvr struct { cfg *task.Config rnr *Runner tasksin <-chan task.Request @@ -156,8 +156,8 @@ type htcntrsvr struct { maxc chan struct{} } -func newhtcntrsvr(ctx context.Context, cfg *task.Config, rnr *Runner, tasks <-chan task.Request) *htcntrsvr { - svr := &htcntrsvr{ +func newhtfnsvr(ctx context.Context, cfg *task.Config, rnr *Runner, tasks <-chan task.Request) *htfnsvr { + svr := &htfnsvr{ cfg: cfg, rnr: rnr, tasksin: tasks, @@ -166,23 +166,23 @@ func newhtcntrsvr(ctx context.Context, cfg *task.Config, rnr *Runner, tasks <-ch } // This pipe will take all incoming tasks and just forward them to the - // started hot containers. The catch here is that it feeds a buffered + // started hot functions. The catch here is that it feeds a buffered // channel from an unbuffered one. And this buffered channel is - // then used to determine the presence of running hot containers. - // If no hot container is available, tasksout will fill up to its + // then used to determine the presence of running hot functions. + // If no hot function is available, tasksout will fill up to its // capacity and pipe() will start them. go svr.pipe(ctx) return svr } -func (svr *htcntrsvr) pipe(ctx context.Context) { +func (svr *htfnsvr) pipe(ctx context.Context) { for { select { case t := <-svr.tasksin: svr.tasksout <- t if len(svr.tasksout) > 0 { if err := svr.launch(ctx); err != nil { - logrus.WithError(err).Error("cannot start more hot containers") + logrus.WithError(err).Error("cannot start more hot functions") } } case <-ctx.Done(): @@ -191,10 +191,10 @@ func (svr *htcntrsvr) pipe(ctx context.Context) { } } -func (svr *htcntrsvr) launch(ctx context.Context) error { +func (svr *htfnsvr) launch(ctx context.Context) error { select { case svr.maxc <- struct{}{}: - hc, err := newhtcntr( + hc, err := newhtfn( svr.cfg, protocol.Protocol(svr.cfg.Format), svr.tasksout, @@ -213,10 +213,10 @@ func (svr *htcntrsvr) launch(ctx context.Context) error { return nil } -// htcntr actually interfaces an incoming task from the common concurrency +// htfn actually interfaces an incoming task from the common concurrency // stream into a long lived container. If idle long enough, it will stop. It // uses route configuration to determine which protocol to use. -type htcntr struct { +type htfn struct { cfg *task.Config proto protocol.ContainerIO tasks <-chan task.Request @@ -233,7 +233,7 @@ type htcntr struct { rnr *Runner } -func newhtcntr(cfg *task.Config, proto protocol.Protocol, tasks <-chan task.Request, rnr *Runner) (*htcntr, error) { +func newhtfn(cfg *task.Config, proto protocol.Protocol, tasks <-chan task.Request, rnr *Runner) (*htfn, error) { stdinr, stdinw := io.Pipe() stdoutr, stdoutw := io.Pipe() @@ -242,7 +242,7 @@ func newhtcntr(cfg *task.Config, proto protocol.Protocol, tasks <-chan task.Requ return nil, err } - hc := &htcntr{ + hc := &htfn{ cfg: cfg, proto: p, tasks: tasks, @@ -259,14 +259,14 @@ func newhtcntr(cfg *task.Config, proto protocol.Protocol, tasks <-chan task.Requ return hc, nil } -func (hc *htcntr) serve(ctx context.Context) { +func (hc *htfn) serve(ctx context.Context) { lctx, cancel := context.WithCancel(ctx) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() for { - inactivity := time.After(htcntrScaleDownTimeout) + inactivity := time.After(htfnScaleDownTimeout) select { case <-lctx.Done(): @@ -309,10 +309,10 @@ func (hc *htcntr) serve(ctx context.Context) { // Stderr, on the other hand, can be written by anything any time: // failure between requests, failures inside requests and messages send // right after stdout has been finished being transmitted. Thus, with - // hot containers, there is not a 1:1 relation between stderr and tasks. + // hot functions, there is not a 1:1 relation between stderr and tasks. // // Still, we do pass - at protocol level - a Task-ID header, from which - // the application running inside the hot container can use to identify + // the application running inside the hot function can use to identify // its own stderr output. errr, errw := io.Pipe() cfg.Stderr = errw @@ -334,12 +334,12 @@ func (hc *htcntr) serve(ctx context.Context) { result, err := hc.rnr.Run(lctx, &cfg) if err != nil { - logrus.WithError(err).Error("hot container failure detected") + logrus.WithError(err).Error("hot function failure detected") } cancel() errw.Close() wg.Wait() - logrus.WithField("result", result).Info("hot container terminated") + logrus.WithField("result", result).Info("hot function terminated") } func runTaskReq(rnr *Runner, wg *sync.WaitGroup, t task.Request) { diff --git a/docs/README.md b/docs/README.md index ae801040e..e2fe36fda 100644 --- a/docs/README.md +++ b/docs/README.md @@ -15,7 +15,7 @@ If you are a developer using IronFunctions through the API, this section is for * [Packaging functions](packaging.md) * [Open Function Format](function-format.md) * [API Reference](http://petstore.swagger.io/?url=https://raw.githubusercontent.com/iron-io/functions/master/docs/swagger.yml) -* [Hot containers](hot-containers.md) +* [Hot functions](hot-functions.md) ## For Operators diff --git a/docs/definitions.md b/docs/definitions.md index 8a63cfb2a..8ad9b113e 100644 --- a/docs/definitions.md +++ b/docs/definitions.md @@ -39,7 +39,7 @@ curl -H "Content-Type: application/json" -X POST -d '{ #### name (string) -`name` is a property that references an unique app. +`name` is a property that references an unique app. App names are immutable. When updating apps with `PATCH` requests, keep in mind that although you are able to update an app's configuration set, you cannot really rename it. @@ -47,7 +47,7 @@ are able to update an app's configuration set, you cannot really rename it. #### config (object) `config` is a map of values passed to the route runtime in the form of -environment variables. +environment variables. Note: Route level configuration overrides app level configuration. @@ -138,16 +138,16 @@ Note: Route level configuration overrides app level configuration. #### format (string) -`format` defines if the function is running or not in `hot container` mode. +`format` defines if the function is running or not in `hot function` mode. -To define the function execution as `hot container` you set it as one of the following formats: +To define the function execution as `hot function` you set it as one of the following formats: - `"http"` -### 'Hot Container' Only Properties +### 'Hot function' Only Properties -This properties are only used if the function is in `hot container` mode +This properties are only used if the function is in `hot function` mode #### max_concurrency (string) -This property defines the maximum amount of concurrent hot containers instances the function should have (per IronFunction node). \ No newline at end of file +This property defines the maximum amount of concurrent hot functions instances the function should have (per IronFunction node). \ No newline at end of file diff --git a/docs/function-file.md b/docs/function-file.md index 2d00281e5..7bd2facde 100644 --- a/docs/function-file.md +++ b/docs/function-file.md @@ -55,17 +55,17 @@ during functions execution. `build` (optional) is an array of local shell calls which are used to help building the function. -## Hot containers +## Hot functions -Hot containers support also adds two extra options to this configuration file. +hot functions support also adds two extra options to this configuration file. `format` (optional) is one of the streaming formats covered at [function-format.md](function-format.md). -`max_concurrency` (optional) is the maximum of hot containers per node to be +`max_concurrency` (optional) is the maximum of hot functions per node to be started for a certain function. It defaults to one per function. If you understand you need more processing power, make sure to raise this number. Keep in mind that if there is not available memory to execute the configured -workload, it will fail to start new hot containers. +workload, it will fail to start new hot functions. ## Testing functions diff --git a/docs/hot-containers.md b/docs/hot-functions.md similarity index 85% rename from docs/hot-containers.md rename to docs/hot-functions.md index 0e271e4c6..d314b2c44 100644 --- a/docs/hot-containers.md +++ b/docs/hot-functions.md @@ -1,4 +1,4 @@ -# Hot containers +# Hot functions IronFunctions is built on top of container technologies, for each incoming workload, it spins a new container, feed it with the payload and sends the @@ -8,8 +8,8 @@ container. You may refer to [this blog](https://medium.com/travis-on-docker/the- In the case you need faster start times for your function, you may use a hot container instead. -Hot containers are started once and kept alive while there is incoming workload. -Thus, it means that once you decide to use a hot container, you must be able to +hot functions are started once and kept alive while there is incoming workload. +Thus, it means that once you decide to use a hot function, you must be able to tell the moment it should reading from standard input to start writing to standard output. @@ -17,9 +17,9 @@ Currently, IronFunctions implements a HTTP-like protocol to operate hot containers, but instead of communication through a TCP/IP port, it uses standard input/output. -## Implementing a hot container +## Implementing a hot function -In the [examples directory](https://github.com/iron-io/functions/blob/master/examples/hotcontainers/http/func.go), there is one simple implementation of a hot container +In the [examples directory](https://github.com/iron-io/functions/blob/master/examples/hotcontainers/http/func.go), there is one simple implementation of a hot function which we are going to get in the details here. The basic cycle comprises three steps: read standard input up to a previosly @@ -75,9 +75,9 @@ res.Write(os.Stdout) Rinse and repeat for each incoming workload. -## Deploying a hot container +## Deploying a hot function -Once your functions is adapted to be handled as hot container, you must tell +Once your functions is adapted to be handled as hot function, you must tell IronFunctions daemon that this function is now ready to be reused across requests: @@ -99,5 +99,5 @@ requests: `format` (mandatory) either "default" or "http". If "http", then it is a hot container. -`max_concurrency` (optional) - the number of simultaneous hot containers for +`max_concurrency` (optional) - the number of simultaneous hot functions for this functions. This is a per-node configuration option. Default: 1 \ No newline at end of file diff --git a/docs/swagger.yml b/docs/swagger.yml index 74588d261..03c8db511 100644 --- a/docs/swagger.yml +++ b/docs/swagger.yml @@ -6,7 +6,7 @@ swagger: '2.0' info: title: IronFunctions description: The open source serverless platform. - version: "0.1.28" + version: "0.1.29" # the domain of the service host: "127.0.0.1:8080" # array of all schemes that your API supports @@ -370,7 +370,7 @@ definitions: max_concurrency: type: integer format: int32 - description: Maximum number of hot containers concurrency + description: Maximum number of hot functions concurrency config: type: object description: Route configuration - overrides application configuration diff --git a/examples/hotcontainers/http/func.go b/examples/hotfunctions/http/func.go similarity index 100% rename from examples/hotcontainers/http/func.go rename to examples/hotfunctions/http/func.go diff --git a/examples/hotcontainers/http/hotroute.json b/examples/hotfunctions/http/hotroute.json similarity index 100% rename from examples/hotcontainers/http/hotroute.json rename to examples/hotfunctions/http/hotroute.json diff --git a/fn/init.go b/fn/init.go index e6fd64b1e..621080092 100644 --- a/fn/init.go +++ b/fn/init.go @@ -77,13 +77,13 @@ func initFn() cli.Command { }, cli.StringFlag{ Name: "format", - Usage: "hot container IO format - json or http", + Usage: "hot function IO format - json or http", Destination: &a.format, Value: "", }, cli.IntFlag{ Name: "max-concurrency", - Usage: "maximum concurrency for hot container", + Usage: "maximum concurrency for hot function", Destination: &a.maxConcurrency, Value: 1, }, diff --git a/fn/routes.go b/fn/routes.go index f695b0389..21fd97dca 100644 --- a/fn/routes.go +++ b/fn/routes.go @@ -66,12 +66,12 @@ func routes() cli.Command { }, cli.StringFlag{ Name: "format,f", - Usage: "hot container IO format - json or http", + Usage: "hot function IO format - json or http", Value: "", }, cli.IntFlag{ Name: "max-concurrency", - Usage: "maximum concurrency for hot container", + Usage: "maximum concurrency for hot function", Value: 1, }, cli.DurationFlag{