mirror of
https://github.com/fnproject/fn.git
synced 2022-10-28 21:29:17 +03:00
* Initial stab at the protocol * initial protocol sketch for node pool manager * Added http header frame as a message * Force the use of WithAgent variants when creating a server * adds grpc models for node pool manager plus go deps * Naming things is really hard * Merge (and optionally purge) details received by the NPM * WIP: starting to add the runner-side functionality of the new data plane * WIP: Basic startup of grpc server for pure runner. Needs proper certs. * Go fmt * Initial agent for LB nodes. * Agent implementation for LB nodes. * Pass keys and certs to LB node agent. * Remove accidentally left reference to env var. * Add env variables for certificate files * stub out the capacity and group membership server channels * implement server-side runner manager service * removes unused variable * fixes build error * splits up GetCall and GetLBGroupId * Change LB node agent to use TLS connection. * Encode call model as JSON to send to runner node. * Use hybrid client in LB node agent. This should provide access to get app and route information for the call from an API node. * More error handling on the pure runner side * Tentative fix for GetCall problem: set deadlines correctly when reserving slot * Connect loop for LB agent to runner nodes. * Extract runner connection function in LB agent. * drops committed capacity counts * Bugfix - end state tracker only in submit * Do logs properly * adds first pass of tracking capacity metrics in agent * maked memory capacity metric uint64 * maked memory capacity metric uint64 * removes use of old capacity field * adds remove capacity call * merges overwritten reconnect logic * First pass of a NPM Provide a service that talks to a (simulated) CP. - Receive incoming capacity assertions from LBs for LBGs - expire LB requests after a short period - ask the CP to add runners to a LBG - note runner set changes and readvertise - scale down by marking runners as "draining" - shut off draining runners after some cool-down period * add capacity update on schedule * Send periodic capcacity metrics Sending capcacity metrics to node pool manager * splits grpc and api interfaces for capacity manager * failure to advertise capacity shouldn't panic * Add some instructions for starting DP/CP parts. * Create the poolmanager server with TLS * Use logrus * Get npm compiling with cert fixups. * Fix: pure runner should not start async processing * brings runner, nulb and npm together * Add field to acknowledgment to record slot allocation latency; fix a bug too * iterating on pool manager locking issue * raises timeout of placement retry loop * Fix up NPM Improve logging Ensure that channels etc. are actually initialised in the structure creation! * Update the docs - runners GRPC port is 9120 * Bugfix: return runner pool accurately. * Double locking * Note purges as LBs stop talking to us * Get the purging of old LBs working. * Tweak: on restart, load runner set before making scaling decisions. * more agent synchronization improvements * Deal with teh CP pulling out active hosts from under us. * lock at lbgroup level * Send request and receive response from runner. * Add capacity check right before slot reservation * Pass the full Call into the receive loop. * Wait for the data from the runner before finishing * force runner list refresh every time * Don't init db and mq for pure runners * adds shutdown of npm * fixes broken log line * Extract an interface for the Predictor used by the NPM * purge drained connections from npm * Refactor of the LB agent into the agent package * removes capacitytest wip * Fix undefined err issue * updating README for poolmanager set up * ues retrying dial for lb to npm connections * Rename lb_calls to lb_agent now that all functionality is there * Use the right deadline and errors in LBAgent * Make stream error flag per-call rather than global otherwise the whole runner is damaged by one call dropping * abstracting gRPCNodePool * Make stream error flag per-call rather than global otherwise the whole runner is damaged by one call dropping * Add some init checks for LB and pure runner nodes * adding some useful debug * Fix default db and mq for lb node * removes unreachable code, fixes typo * Use datastore as logstore in API nodes. This fixes a bug caused by trying to insert logs into a nil logstore. It was nil because it wasn't being set for API nodes. * creates placement abstraction and moves capacity APIs to NodePool * removed TODO, added logging * Dial reconnections for LB <-> runners LB grpc connections to runners are established using a backoff stategy in event of reconnections, this allows to let the LB up even in case one of the runners go away and reconnect to it as soon as it is back. * Add a status call to the Runner protocol Stub at the moment. To be used for things like draindown, health checks. * Remove comment. * makes assign/release capacity lockless * Fix hanging issue in lb agent when connections drop * Add the CH hash from fnlb Select this with FN_PLACER=ch when launching the LB. * small improvement for locking on reloadLBGmembership * Stabilise the list of Runenrs returned by NodePool The NodePoolManager makes some attempt to keep the list of runner nodes advertised as stable as possible. Let's preserve this effort in the client side. The main point of this is to attempt to keep the same runner at the same inxed in the []Runner returned by NodePool.Runners(lbgid); the ch algorithm likes it when this is the case. * Factor out a generator function for the Runners so that mocks can be injected * temporarily allow lbgroup to be specified in HTTP header, while we sort out changes to the model * fixes bug with nil runners * Initial work for mocking things in tests * fix for anonymouse go routine error * fixing lb_test to compile * Refactor: internal objects for gRPCNodePool are now injectable, with defaults for the real world case * Make GRPC port configurable, fix weird handling of web port too * unit test reload Members * check on runner creation failure * adding nullRunner in case of failure during runner creation * Refactored capacity advertisements/aggregations. Made grpc advertisement post asynchronous and non-blocking. * make capacityEntry private * Change the runner gRPC bind address. This uses the existing `whoAmI` function, so that the gRPC server works when the runner is running on a different host. * Add support for multiple fixed runners to pool mgr * Added harness for dataplane system tests, minor refactors * Add Dockerfiles for components, along with docs. * Doc fix: second runner needs a different name. * Let us have three runners in system tests, why not * The first system test running a function in API/LB/PureRunner mode * Add unit test for Advertiser logic * Fix issue with Pure Runner not sending the last data frame * use config in models.Call as a temporary mechanism to override lb group ID * make gofmt happy * Updates documentation for how to configure lb groups for an app/route * small refactor unit test * Factor NodePool into its own package * Lots of fixes to Pure Runner - concurrency woes with errors and cancellations * New dataplane with static runnerpool (#813) Added static node pool as default implementation * moved nullRunner to grpc package * remove duplication in README * fix go vet issues * Fix server initialisation in api tests * Tiny logging changes in pool manager. Using `WithError` instead of `Errorf` when appropriate. * Change some log levels in the pure runner * fixing readme * moves multitenant compute documentation * adds introduction to multitenant readme * Proper triggering of system tests in makefile * Fix insructions about starting up the components * Change db file for system tests to avoid contention in parallel tests * fixes revisions from merge * Fix merge issue with handling of reserved slot * renaming nulb to lb in the doc and images folder * better TryExec sleep logic clean shutdown In this change we implement a better way to deal with the sleep inside the for loop during the attempt for placing a call. Plus we added a clean way to shutdown the connections with external component when we shut down the server. * System_test mysql port set mysql port for system test to a different value to the one set for the api tests to avoid conflicts as they can run in parallel. * change the container name for system-test * removes flaky test TestRouteRunnerExecution pending resolution by issue #796 * amend remove_containers to remove new added containers * Rework capacity reservation logic at a higher level for now * LB agent implements Submit rather than delegating. * Fix go vet linting errors * Changed a couple of error levels * Fix formatting * removes commmented out test * adds snappy to vendor directory * updates Gopkg and vendor directories, removing snappy and addhing siphash * wait for db containers to come up before starting the tests * make system tests start API node on 8085 to avoid port conflict with api_tests * avoid port conflicts with api_test.sh which are run in parallel * fixes postgres port conflict and issue with removal of old containers * Remove spurious println
410 lines
12 KiB
Go
410 lines
12 KiB
Go
/*
|
|
*
|
|
* Copyright 2016 gRPC authors.
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*
|
|
*/
|
|
|
|
package grpc
|
|
|
|
import (
|
|
"fmt"
|
|
"net"
|
|
"sync"
|
|
|
|
"golang.org/x/net/context"
|
|
"google.golang.org/grpc/codes"
|
|
"google.golang.org/grpc/credentials"
|
|
"google.golang.org/grpc/grpclog"
|
|
"google.golang.org/grpc/naming"
|
|
"google.golang.org/grpc/status"
|
|
)
|
|
|
|
// Address represents a server the client connects to.
|
|
// This is the EXPERIMENTAL API and may be changed or extended in the future.
|
|
type Address struct {
|
|
// Addr is the server address on which a connection will be established.
|
|
Addr string
|
|
// Metadata is the information associated with Addr, which may be used
|
|
// to make load balancing decision.
|
|
Metadata interface{}
|
|
}
|
|
|
|
// BalancerConfig specifies the configurations for Balancer.
|
|
type BalancerConfig struct {
|
|
// DialCreds is the transport credential the Balancer implementation can
|
|
// use to dial to a remote load balancer server. The Balancer implementations
|
|
// can ignore this if it does not need to talk to another party securely.
|
|
DialCreds credentials.TransportCredentials
|
|
// Dialer is the custom dialer the Balancer implementation can use to dial
|
|
// to a remote load balancer server. The Balancer implementations
|
|
// can ignore this if it doesn't need to talk to remote balancer.
|
|
Dialer func(context.Context, string) (net.Conn, error)
|
|
}
|
|
|
|
// BalancerGetOptions configures a Get call.
|
|
// This is the EXPERIMENTAL API and may be changed or extended in the future.
|
|
type BalancerGetOptions struct {
|
|
// BlockingWait specifies whether Get should block when there is no
|
|
// connected address.
|
|
BlockingWait bool
|
|
}
|
|
|
|
// Balancer chooses network addresses for RPCs.
|
|
// This is the EXPERIMENTAL API and may be changed or extended in the future.
|
|
type Balancer interface {
|
|
// Start does the initialization work to bootstrap a Balancer. For example,
|
|
// this function may start the name resolution and watch the updates. It will
|
|
// be called when dialing.
|
|
Start(target string, config BalancerConfig) error
|
|
// Up informs the Balancer that gRPC has a connection to the server at
|
|
// addr. It returns down which is called once the connection to addr gets
|
|
// lost or closed.
|
|
// TODO: It is not clear how to construct and take advantage of the meaningful error
|
|
// parameter for down. Need realistic demands to guide.
|
|
Up(addr Address) (down func(error))
|
|
// Get gets the address of a server for the RPC corresponding to ctx.
|
|
// i) If it returns a connected address, gRPC internals issues the RPC on the
|
|
// connection to this address;
|
|
// ii) If it returns an address on which the connection is under construction
|
|
// (initiated by Notify(...)) but not connected, gRPC internals
|
|
// * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or
|
|
// Shutdown state;
|
|
// or
|
|
// * issues RPC on the connection otherwise.
|
|
// iii) If it returns an address on which the connection does not exist, gRPC
|
|
// internals treats it as an error and will fail the corresponding RPC.
|
|
//
|
|
// Therefore, the following is the recommended rule when writing a custom Balancer.
|
|
// If opts.BlockingWait is true, it should return a connected address or
|
|
// block if there is no connected address. It should respect the timeout or
|
|
// cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast
|
|
// RPCs), it should return an address it has notified via Notify(...) immediately
|
|
// instead of blocking.
|
|
//
|
|
// The function returns put which is called once the rpc has completed or failed.
|
|
// put can collect and report RPC stats to a remote load balancer.
|
|
//
|
|
// This function should only return the errors Balancer cannot recover by itself.
|
|
// gRPC internals will fail the RPC if an error is returned.
|
|
Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error)
|
|
// Notify returns a channel that is used by gRPC internals to watch the addresses
|
|
// gRPC needs to connect. The addresses might be from a name resolver or remote
|
|
// load balancer. gRPC internals will compare it with the existing connected
|
|
// addresses. If the address Balancer notified is not in the existing connected
|
|
// addresses, gRPC starts to connect the address. If an address in the existing
|
|
// connected addresses is not in the notification list, the corresponding connection
|
|
// is shutdown gracefully. Otherwise, there are no operations to take. Note that
|
|
// the Address slice must be the full list of the Addresses which should be connected.
|
|
// It is NOT delta.
|
|
Notify() <-chan []Address
|
|
// Close shuts down the balancer.
|
|
Close() error
|
|
}
|
|
|
|
// downErr implements net.Error. It is constructed by gRPC internals and passed to the down
|
|
// call of Balancer.
|
|
type downErr struct {
|
|
timeout bool
|
|
temporary bool
|
|
desc string
|
|
}
|
|
|
|
func (e downErr) Error() string { return e.desc }
|
|
func (e downErr) Timeout() bool { return e.timeout }
|
|
func (e downErr) Temporary() bool { return e.temporary }
|
|
|
|
func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr {
|
|
return downErr{
|
|
timeout: timeout,
|
|
temporary: temporary,
|
|
desc: fmt.Sprintf(format, a...),
|
|
}
|
|
}
|
|
|
|
// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
|
|
// the name resolution updates and updates the addresses available correspondingly.
|
|
func RoundRobin(r naming.Resolver) Balancer {
|
|
return &roundRobin{r: r}
|
|
}
|
|
|
|
type addrInfo struct {
|
|
addr Address
|
|
connected bool
|
|
}
|
|
|
|
type roundRobin struct {
|
|
r naming.Resolver
|
|
w naming.Watcher
|
|
addrs []*addrInfo // all the addresses the client should potentially connect
|
|
mu sync.Mutex
|
|
addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to.
|
|
next int // index of the next address to return for Get()
|
|
waitCh chan struct{} // the channel to block when there is no connected address available
|
|
done bool // The Balancer is closed.
|
|
}
|
|
|
|
func (rr *roundRobin) watchAddrUpdates() error {
|
|
updates, err := rr.w.Next()
|
|
if err != nil {
|
|
grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err)
|
|
return err
|
|
}
|
|
rr.mu.Lock()
|
|
defer rr.mu.Unlock()
|
|
for _, update := range updates {
|
|
addr := Address{
|
|
Addr: update.Addr,
|
|
Metadata: update.Metadata,
|
|
}
|
|
switch update.Op {
|
|
case naming.Add:
|
|
var exist bool
|
|
for _, v := range rr.addrs {
|
|
if addr == v.addr {
|
|
exist = true
|
|
grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr)
|
|
break
|
|
}
|
|
}
|
|
if exist {
|
|
continue
|
|
}
|
|
rr.addrs = append(rr.addrs, &addrInfo{addr: addr})
|
|
case naming.Delete:
|
|
for i, v := range rr.addrs {
|
|
if addr == v.addr {
|
|
copy(rr.addrs[i:], rr.addrs[i+1:])
|
|
rr.addrs = rr.addrs[:len(rr.addrs)-1]
|
|
break
|
|
}
|
|
}
|
|
default:
|
|
grpclog.Errorln("Unknown update.Op ", update.Op)
|
|
}
|
|
}
|
|
// Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified.
|
|
open := make([]Address, len(rr.addrs))
|
|
for i, v := range rr.addrs {
|
|
open[i] = v.addr
|
|
}
|
|
if rr.done {
|
|
return ErrClientConnClosing
|
|
}
|
|
select {
|
|
case <-rr.addrCh:
|
|
default:
|
|
}
|
|
rr.addrCh <- open
|
|
return nil
|
|
}
|
|
|
|
func (rr *roundRobin) Start(target string, config BalancerConfig) error {
|
|
rr.mu.Lock()
|
|
defer rr.mu.Unlock()
|
|
if rr.done {
|
|
return ErrClientConnClosing
|
|
}
|
|
if rr.r == nil {
|
|
// If there is no name resolver installed, it is not needed to
|
|
// do name resolution. In this case, target is added into rr.addrs
|
|
// as the only address available and rr.addrCh stays nil.
|
|
rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}})
|
|
return nil
|
|
}
|
|
w, err := rr.r.Resolve(target)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
rr.w = w
|
|
rr.addrCh = make(chan []Address, 1)
|
|
go func() {
|
|
for {
|
|
if err := rr.watchAddrUpdates(); err != nil {
|
|
return
|
|
}
|
|
}
|
|
}()
|
|
return nil
|
|
}
|
|
|
|
// Up sets the connected state of addr and sends notification if there are pending
|
|
// Get() calls.
|
|
func (rr *roundRobin) Up(addr Address) func(error) {
|
|
rr.mu.Lock()
|
|
defer rr.mu.Unlock()
|
|
var cnt int
|
|
for _, a := range rr.addrs {
|
|
if a.addr == addr {
|
|
if a.connected {
|
|
return nil
|
|
}
|
|
a.connected = true
|
|
}
|
|
if a.connected {
|
|
cnt++
|
|
}
|
|
}
|
|
// addr is only one which is connected. Notify the Get() callers who are blocking.
|
|
if cnt == 1 && rr.waitCh != nil {
|
|
close(rr.waitCh)
|
|
rr.waitCh = nil
|
|
}
|
|
return func(err error) {
|
|
rr.down(addr, err)
|
|
}
|
|
}
|
|
|
|
// down unsets the connected state of addr.
|
|
func (rr *roundRobin) down(addr Address, err error) {
|
|
rr.mu.Lock()
|
|
defer rr.mu.Unlock()
|
|
for _, a := range rr.addrs {
|
|
if addr == a.addr {
|
|
a.connected = false
|
|
break
|
|
}
|
|
}
|
|
}
|
|
|
|
// Get returns the next addr in the rotation.
|
|
func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
|
|
var ch chan struct{}
|
|
rr.mu.Lock()
|
|
if rr.done {
|
|
rr.mu.Unlock()
|
|
err = ErrClientConnClosing
|
|
return
|
|
}
|
|
|
|
if len(rr.addrs) > 0 {
|
|
if rr.next >= len(rr.addrs) {
|
|
rr.next = 0
|
|
}
|
|
next := rr.next
|
|
for {
|
|
a := rr.addrs[next]
|
|
next = (next + 1) % len(rr.addrs)
|
|
if a.connected {
|
|
addr = a.addr
|
|
rr.next = next
|
|
rr.mu.Unlock()
|
|
return
|
|
}
|
|
if next == rr.next {
|
|
// Has iterated all the possible address but none is connected.
|
|
break
|
|
}
|
|
}
|
|
}
|
|
if !opts.BlockingWait {
|
|
if len(rr.addrs) == 0 {
|
|
rr.mu.Unlock()
|
|
err = status.Errorf(codes.Unavailable, "there is no address available")
|
|
return
|
|
}
|
|
// Returns the next addr on rr.addrs for failfast RPCs.
|
|
addr = rr.addrs[rr.next].addr
|
|
rr.next++
|
|
rr.mu.Unlock()
|
|
return
|
|
}
|
|
// Wait on rr.waitCh for non-failfast RPCs.
|
|
if rr.waitCh == nil {
|
|
ch = make(chan struct{})
|
|
rr.waitCh = ch
|
|
} else {
|
|
ch = rr.waitCh
|
|
}
|
|
rr.mu.Unlock()
|
|
for {
|
|
select {
|
|
case <-ctx.Done():
|
|
err = ctx.Err()
|
|
return
|
|
case <-ch:
|
|
rr.mu.Lock()
|
|
if rr.done {
|
|
rr.mu.Unlock()
|
|
err = ErrClientConnClosing
|
|
return
|
|
}
|
|
|
|
if len(rr.addrs) > 0 {
|
|
if rr.next >= len(rr.addrs) {
|
|
rr.next = 0
|
|
}
|
|
next := rr.next
|
|
for {
|
|
a := rr.addrs[next]
|
|
next = (next + 1) % len(rr.addrs)
|
|
if a.connected {
|
|
addr = a.addr
|
|
rr.next = next
|
|
rr.mu.Unlock()
|
|
return
|
|
}
|
|
if next == rr.next {
|
|
// Has iterated all the possible address but none is connected.
|
|
break
|
|
}
|
|
}
|
|
}
|
|
// The newly added addr got removed by Down() again.
|
|
if rr.waitCh == nil {
|
|
ch = make(chan struct{})
|
|
rr.waitCh = ch
|
|
} else {
|
|
ch = rr.waitCh
|
|
}
|
|
rr.mu.Unlock()
|
|
}
|
|
}
|
|
}
|
|
|
|
func (rr *roundRobin) Notify() <-chan []Address {
|
|
return rr.addrCh
|
|
}
|
|
|
|
func (rr *roundRobin) Close() error {
|
|
rr.mu.Lock()
|
|
defer rr.mu.Unlock()
|
|
if rr.done {
|
|
return errBalancerClosed
|
|
}
|
|
rr.done = true
|
|
if rr.w != nil {
|
|
rr.w.Close()
|
|
}
|
|
if rr.waitCh != nil {
|
|
close(rr.waitCh)
|
|
rr.waitCh = nil
|
|
}
|
|
if rr.addrCh != nil {
|
|
close(rr.addrCh)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn.
|
|
// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get()
|
|
// returns the only address Up by resetTransport().
|
|
type pickFirst struct {
|
|
*roundRobin
|
|
}
|
|
|
|
func pickFirstBalancerV1(r naming.Resolver) Balancer {
|
|
return &pickFirst{&roundRobin{r: r}}
|
|
}
|