fn: cancellations in WaitAsyncResource (#694)

* fn: cancellations in WaitAsyncResource

Added go context with cancel to wait async resource. Although
today, the only case for cancellation is shutdown, this cleans
up agent shutdown a little bit.

* fn: locked broadcast to avoid missed wake-ups

* fn: removed ctx arg to WaitAsyncResource and startDequeuer

This is confusing and unnecessary.
This commit is contained in:
Tolga Ceylan
2018-01-17 16:08:54 -08:00
committed by GitHub
parent 7c74c2fe88
commit 5a7778a656
6 changed files with 41 additions and 24 deletions

View File

@@ -24,7 +24,7 @@ const (
// A simple resource (memory, cpu, disk, etc.) tracker for scheduling.
// TODO: add cpu, disk, network IO for future
type ResourceTracker interface {
WaitAsyncResource() chan struct{}
WaitAsyncResource() (chan struct{}, context.CancelFunc)
// returns a closed channel if the resource can never me met.
GetResourceToken(ctx context.Context, memory uint64, cpuQuota uint64, isAsync bool) <-chan ResourceToken
}
@@ -184,21 +184,32 @@ func (a *resourceTracker) GetResourceToken(ctx context.Context, memory uint64, c
// WaitAsyncResource will send a signal on the returned channel when RAM and CPU in-use
// in the async area is less than high water mark
func (a *resourceTracker) WaitAsyncResource() chan struct{} {
ch := make(chan struct{})
func (a *resourceTracker) WaitAsyncResource() (chan struct{}, context.CancelFunc) {
ch := make(chan struct{}, 1)
ctx, cancel := context.WithCancel(context.Background())
c := a.cond
myCancel := func() {
cancel()
c.L.Lock()
c.Broadcast()
c.L.Unlock()
}
go func() {
c.L.Lock()
for a.ramAsyncUsed >= a.ramAsyncHWMark || a.cpuAsyncUsed >= a.cpuAsyncHWMark {
for (a.ramAsyncUsed >= a.ramAsyncHWMark || a.cpuAsyncUsed >= a.cpuAsyncHWMark) && ctx.Err() == nil {
c.Wait()
}
c.L.Unlock()
ch <- struct{}{}
// TODO this could leak forever (only in shutdown, blech)
if ctx.Err() == nil {
ch <- struct{}{}
}
}()
return ch
return ch, myCancel
}
func minUint64(a, b uint64) uint64 {