mirror of
https://github.com/fnproject/fn.git
synced 2022-10-28 21:29:17 +03:00
fn: remove async+sync seperation in resource tracker (#1254)
This simplifies resource tracker. Originally, logically we had split the cpu/mem into two pools where a 20% was kept specifically for sync calls to avoid async calls dominating the system. However, resource tracker should not handle such call prioritization. Given the improvements to the evictor, I think we can get rid of this code in resource tracker for time being.
This commit is contained in:
@@ -10,16 +10,12 @@ import (
|
||||
func setTrackerTestVals(tr *resourceTracker, vals *trackerVals) {
|
||||
tr.cond.L.Lock()
|
||||
|
||||
tr.ramSyncTotal = vals.mst
|
||||
tr.ramSyncUsed = vals.msu
|
||||
tr.ramAsyncTotal = vals.mat
|
||||
tr.ramAsyncUsed = vals.mau
|
||||
tr.ramTotal = vals.mt
|
||||
tr.ramUsed = vals.mu
|
||||
tr.ramAsyncHWMark = vals.mam
|
||||
|
||||
tr.cpuSyncTotal = vals.cst
|
||||
tr.cpuSyncUsed = vals.csu
|
||||
tr.cpuAsyncTotal = vals.cat
|
||||
tr.cpuAsyncUsed = vals.cau
|
||||
tr.cpuTotal = vals.ct
|
||||
tr.cpuUsed = vals.cu
|
||||
tr.cpuAsyncHWMark = vals.cam
|
||||
|
||||
tr.cond.L.Unlock()
|
||||
@@ -30,16 +26,12 @@ func getTrackerTestVals(tr *resourceTracker, vals *trackerVals) {
|
||||
|
||||
tr.cond.L.Lock()
|
||||
|
||||
vals.mst = tr.ramSyncTotal
|
||||
vals.msu = tr.ramSyncUsed
|
||||
vals.mat = tr.ramAsyncTotal
|
||||
vals.mau = tr.ramAsyncUsed
|
||||
vals.mt = tr.ramTotal
|
||||
vals.mu = tr.ramUsed
|
||||
vals.mam = tr.ramAsyncHWMark
|
||||
|
||||
vals.cst = tr.cpuSyncTotal
|
||||
vals.csu = tr.cpuSyncUsed
|
||||
vals.cat = tr.cpuAsyncTotal
|
||||
vals.cau = tr.cpuAsyncUsed
|
||||
vals.ct = tr.cpuTotal
|
||||
vals.cu = tr.cpuUsed
|
||||
vals.cam = tr.cpuAsyncHWMark
|
||||
|
||||
tr.cond.L.Unlock()
|
||||
@@ -47,31 +39,23 @@ func getTrackerTestVals(tr *resourceTracker, vals *trackerVals) {
|
||||
|
||||
// helper to debug print (fields correspond to resourceTracker CPU/MEM fields)
|
||||
type trackerVals struct {
|
||||
mst uint64
|
||||
msu uint64
|
||||
mat uint64
|
||||
mau uint64
|
||||
mt uint64
|
||||
mu uint64
|
||||
mam uint64
|
||||
cst uint64
|
||||
csu uint64
|
||||
cat uint64
|
||||
cau uint64
|
||||
ct uint64
|
||||
cu uint64
|
||||
cam uint64
|
||||
}
|
||||
|
||||
func (vals *trackerVals) setDefaults() {
|
||||
// set set these to known vals (4GB total: 1GB sync, 3 async)
|
||||
vals.mst = 1 * Mem1GB
|
||||
vals.msu = 0
|
||||
vals.mat = 3 * Mem1GB
|
||||
vals.mau = 0
|
||||
// set set these to known vals (4GB total: 1GB async hw mark)
|
||||
vals.mt = 4 * Mem1GB
|
||||
vals.mu = 0
|
||||
vals.mam = 1 * Mem1GB
|
||||
|
||||
// let's assume 10 CPUs (2 CPU sync, 8 CPU async)
|
||||
vals.cst = 2000
|
||||
vals.csu = 0
|
||||
vals.cat = 8000
|
||||
vals.cau = 0
|
||||
// let's assume 10 CPUs (6 CPU async hw mark)
|
||||
vals.ct = 10000
|
||||
vals.cu = 0
|
||||
vals.cam = 6000
|
||||
}
|
||||
|
||||
@@ -104,17 +88,17 @@ func TestResourceAsyncWait(t *testing.T) {
|
||||
tr := trI.(*resourceTracker)
|
||||
|
||||
getTrackerTestVals(tr, &vals)
|
||||
if vals.mst <= 0 || vals.msu != 0 || vals.mat <= 0 || vals.mau != 0 || vals.mam <= 0 {
|
||||
if vals.mt <= 0 || vals.mu != 0 || vals.mam <= 0 {
|
||||
t.Fatalf("faulty init MEM %#v", vals)
|
||||
}
|
||||
if vals.cst <= 0 || vals.csu != 0 || vals.cat <= 0 || vals.cau != 0 || vals.cam <= 0 {
|
||||
if vals.ct <= 0 || vals.cu != 0 || vals.cam <= 0 {
|
||||
t.Fatalf("faulty init CPU %#v", vals)
|
||||
}
|
||||
|
||||
vals.setDefaults()
|
||||
|
||||
// should block & wait
|
||||
vals.mau = vals.mam
|
||||
vals.mu = vals.mam
|
||||
setTrackerTestVals(tr, &vals)
|
||||
|
||||
ctx1, cancel1 := context.WithCancel(context.Background())
|
||||
@@ -128,7 +112,7 @@ func TestResourceAsyncWait(t *testing.T) {
|
||||
}
|
||||
|
||||
// should not block & wait
|
||||
vals.mau = 0
|
||||
vals.mu = 0
|
||||
setTrackerTestVals(tr, &vals)
|
||||
|
||||
select {
|
||||
@@ -143,7 +127,7 @@ func TestResourceAsyncWait(t *testing.T) {
|
||||
defer cancel2()
|
||||
|
||||
// should block & wait
|
||||
vals.cau = vals.cam
|
||||
vals.cu = vals.cam
|
||||
setTrackerTestVals(tr, &vals)
|
||||
|
||||
select {
|
||||
@@ -153,7 +137,7 @@ func TestResourceAsyncWait(t *testing.T) {
|
||||
}
|
||||
|
||||
// should not block & wait
|
||||
vals.cau = 0
|
||||
vals.cu = 0
|
||||
setTrackerTestVals(tr, &vals)
|
||||
|
||||
select {
|
||||
@@ -172,14 +156,14 @@ func TestResourceGetSimple(t *testing.T) {
|
||||
vals.setDefaults()
|
||||
|
||||
// let's make it like CPU and MEM are 100% full
|
||||
vals.mau = vals.mat
|
||||
vals.cau = vals.cat
|
||||
vals.mu = vals.mt
|
||||
vals.cu = vals.ct
|
||||
|
||||
setTrackerTestVals(tr, &vals)
|
||||
|
||||
// ask for 4GB and 10 CPU
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ch := trI.GetResourceToken(ctx, 4*1024, 1000, false, false)
|
||||
ch := trI.GetResourceToken(ctx, 4*1024, 1000, false)
|
||||
defer cancel()
|
||||
|
||||
_, err := fetchToken(ch)
|
||||
@@ -198,7 +182,7 @@ func TestResourceGetSimple(t *testing.T) {
|
||||
|
||||
// ask for another 4GB and 10 CPU
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
ch = trI.GetResourceToken(ctx, 4*1024, 1000, false, false)
|
||||
ch = trI.GetResourceToken(ctx, 4*1024, 1000, false)
|
||||
defer cancel()
|
||||
|
||||
_, err = fetchToken(ch)
|
||||
@@ -218,10 +202,10 @@ func TestResourceGetSimple(t *testing.T) {
|
||||
|
||||
// POOLS should all be empty now
|
||||
getTrackerTestVals(tr, &vals)
|
||||
if vals.msu != 0 || vals.mau != 0 {
|
||||
if vals.mu != 0 {
|
||||
t.Fatalf("faulty state MEM %#v", vals)
|
||||
}
|
||||
if vals.csu != 0 || vals.cau != 0 {
|
||||
if vals.cu != 0 {
|
||||
t.Fatalf("faulty state CPU %#v", vals)
|
||||
}
|
||||
}
|
||||
@@ -235,14 +219,14 @@ func TestResourceGetSimpleNB(t *testing.T) {
|
||||
vals.setDefaults()
|
||||
|
||||
// let's make it like CPU and MEM are 100% full
|
||||
vals.mau = vals.mat
|
||||
vals.cau = vals.cat
|
||||
vals.mu = vals.mt
|
||||
vals.cu = vals.ct
|
||||
|
||||
setTrackerTestVals(tr, &vals)
|
||||
|
||||
// ask for 4GB and 10 CPU
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ch := trI.GetResourceToken(ctx, 4*1024, 1000, false, true)
|
||||
ch := trI.GetResourceToken(ctx, 4*1024, 1000, true)
|
||||
defer cancel()
|
||||
|
||||
tok := <-ch
|
||||
@@ -254,14 +238,14 @@ func TestResourceGetSimpleNB(t *testing.T) {
|
||||
vals.setDefaults()
|
||||
setTrackerTestVals(tr, &vals)
|
||||
|
||||
tok1 := <-trI.GetResourceToken(ctx, 4*1024, 1000, false, true)
|
||||
tok1 := <-trI.GetResourceToken(ctx, 4*1024, 1000, true)
|
||||
if tok1.Error() != nil {
|
||||
t.Fatalf("empty system should hand out token")
|
||||
}
|
||||
|
||||
// ask for another 4GB and 10 CPU
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
ch = trI.GetResourceToken(ctx, 4*1024, 1000, false, true)
|
||||
ch = trI.GetResourceToken(ctx, 4*1024, 1000, true)
|
||||
defer cancel()
|
||||
|
||||
tok = <-ch
|
||||
@@ -272,7 +256,7 @@ func TestResourceGetSimpleNB(t *testing.T) {
|
||||
// close means, giant token resources released
|
||||
tok1.Close()
|
||||
|
||||
tok = <-trI.GetResourceToken(ctx, 4*1024, 1000, false, true)
|
||||
tok = <-trI.GetResourceToken(ctx, 4*1024, 1000, true)
|
||||
if tok.Error() != nil {
|
||||
t.Fatalf("empty system should hand out token")
|
||||
}
|
||||
@@ -281,137 +265,10 @@ func TestResourceGetSimpleNB(t *testing.T) {
|
||||
|
||||
// POOLS should all be empty now
|
||||
getTrackerTestVals(tr, &vals)
|
||||
if vals.msu != 0 || vals.mau != 0 {
|
||||
if vals.mu != 0 {
|
||||
t.Fatalf("faulty state MEM %#v", vals)
|
||||
}
|
||||
if vals.csu != 0 || vals.cau != 0 {
|
||||
if vals.cu != 0 {
|
||||
t.Fatalf("faulty state CPU %#v", vals)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceGetCombo(t *testing.T) {
|
||||
|
||||
var vals trackerVals
|
||||
trI := NewResourceTracker(nil)
|
||||
tr := trI.(*resourceTracker)
|
||||
|
||||
vals.setDefaults()
|
||||
setTrackerTestVals(tr, &vals)
|
||||
|
||||
// impossible request
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ch := trI.GetResourceToken(ctx, 20*1024, 20000, false, false)
|
||||
_, err := fetchToken(ch)
|
||||
if err == nil {
|
||||
t.Fatalf("impossible request should never return (error here)")
|
||||
}
|
||||
|
||||
cancel()
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
|
||||
// let's use up 2 GB of 3GB async pool
|
||||
ch = trI.GetResourceToken(ctx, 2*1024, 10, true, false)
|
||||
tok1, err := fetchToken(ch)
|
||||
if err != nil {
|
||||
t.Fatalf("empty async system should hand out token1")
|
||||
}
|
||||
|
||||
cancel()
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
|
||||
// remaining 1 GB async
|
||||
ch = trI.GetResourceToken(ctx, 1*1024, 11, true, false)
|
||||
tok2, err := fetchToken(ch)
|
||||
if err != nil {
|
||||
t.Fatalf("empty async system should hand out token2")
|
||||
}
|
||||
|
||||
cancel()
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
|
||||
// NOW ASYNC POOL IS FULL
|
||||
// SYNC POOL HAS 1GB
|
||||
|
||||
// we no longer can get async token
|
||||
ch = trI.GetResourceToken(ctx, 1*1024, 12, true, false)
|
||||
_, err = fetchToken(ch)
|
||||
if err == nil {
|
||||
t.Fatalf("full async system should not hand out a token")
|
||||
}
|
||||
|
||||
cancel()
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
|
||||
// but we should get 1GB sync token
|
||||
ch = trI.GetResourceToken(ctx, 1*1024, 13, false, false)
|
||||
tok3, err := fetchToken(ch)
|
||||
if err != nil {
|
||||
t.Fatalf("empty sync system should hand out token3")
|
||||
}
|
||||
|
||||
cancel()
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
|
||||
// NOW ASYNC AND SYNC POOLS ARE FULL
|
||||
|
||||
// this should fail
|
||||
ch = trI.GetResourceToken(ctx, 1*1024, 14, false, false)
|
||||
_, err = fetchToken(ch)
|
||||
if err == nil {
|
||||
t.Fatalf("full system should not hand out a token")
|
||||
}
|
||||
|
||||
cancel()
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
|
||||
// now let's free up some async pool, release tok2 (1GB)
|
||||
tok2.Close()
|
||||
|
||||
// NOW ASYNC POOL HAS 1GB FREE
|
||||
// SYNC POOL IS FULL
|
||||
|
||||
// async pool should provide this
|
||||
ch = trI.GetResourceToken(ctx, 1*1024, 15, false, false)
|
||||
tok4, err := fetchToken(ch)
|
||||
if err != nil {
|
||||
t.Fatalf("async system should hand out token4")
|
||||
}
|
||||
|
||||
cancel()
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
|
||||
// NOW ASYNC AND SYNC POOLS ARE FULL
|
||||
|
||||
tok4.Close()
|
||||
tok3.Close()
|
||||
|
||||
// NOW ASYNC POOL HAS 1GB FREE
|
||||
// SYNC POOL HAS 1GB FREE
|
||||
|
||||
// now, we ask for 2GB sync token, it should be provided from both async+sync pools
|
||||
ch = trI.GetResourceToken(ctx, 2*1024, 16, false, false)
|
||||
tok5, err := fetchToken(ch)
|
||||
if err != nil {
|
||||
t.Fatalf("async+sync system should hand out token5")
|
||||
}
|
||||
|
||||
cancel()
|
||||
|
||||
// NOW ASYNC AND SYNC POOLS ARE FULL
|
||||
|
||||
tok1.Close()
|
||||
tok5.Close()
|
||||
|
||||
// attempt to close tok2 twice.. This should be OK.
|
||||
tok2.Close()
|
||||
|
||||
// POOLS should all be empty now
|
||||
getTrackerTestVals(tr, &vals)
|
||||
if vals.msu != 0 || vals.mau != 0 {
|
||||
t.Fatalf("faulty state MEM %#v", vals)
|
||||
}
|
||||
if vals.csu != 0 || vals.cau != 0 {
|
||||
t.Fatalf("faulty state CPU %#v", vals)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user