github.com/ice-blockchain/go/src@v0.0.0-20240403114104-1564d284e521/runtime/mgclimit_test.go (about) 1 // Copyright 2022 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime_test 6 7 import ( 8 . "runtime" 9 "testing" 10 "time" 11 ) 12 13 func TestGCCPULimiter(t *testing.T) { 14 const procs = 14 15 16 // Create mock time. 17 ticks := int64(0) 18 advance := func(d time.Duration) int64 { 19 t.Helper() 20 ticks += int64(d) 21 return ticks 22 } 23 24 // assistTime computes the CPU time for assists using frac of GOMAXPROCS 25 // over the wall-clock duration d. 26 assistTime := func(d time.Duration, frac float64) int64 { 27 t.Helper() 28 return int64(frac * float64(d) * procs) 29 } 30 31 l := NewGCCPULimiter(ticks, procs) 32 33 // Do the whole test twice to make sure state doesn't leak across. 34 var baseOverflow uint64 // Track total overflow across iterations. 35 for i := 0; i < 2; i++ { 36 t.Logf("Iteration %d", i+1) 37 38 if l.Capacity() != procs*CapacityPerProc { 39 t.Fatalf("unexpected capacity: %d", l.Capacity()) 40 } 41 if l.Fill() != 0 { 42 t.Fatalf("expected empty bucket to start") 43 } 44 45 // Test filling the bucket with just mutator time. 46 47 l.Update(advance(10 * time.Millisecond)) 48 l.Update(advance(1 * time.Second)) 49 l.Update(advance(1 * time.Hour)) 50 if l.Fill() != 0 { 51 t.Fatalf("expected empty bucket from only accumulating mutator time, got fill of %d cpu-ns", l.Fill()) 52 } 53 54 // Test needUpdate. 55 56 if l.NeedUpdate(advance(GCCPULimiterUpdatePeriod / 2)) { 57 t.Fatal("need update even though updated half a period ago") 58 } 59 if !l.NeedUpdate(advance(GCCPULimiterUpdatePeriod)) { 60 t.Fatal("doesn't need update even though updated 1.5 periods ago") 61 } 62 l.Update(advance(0)) 63 if l.NeedUpdate(advance(0)) { 64 t.Fatal("need update even though just updated") 65 } 66 67 // Test transitioning the bucket to enable the GC. 68 69 l.StartGCTransition(true, advance(109*time.Millisecond)) 70 l.FinishGCTransition(advance(2*time.Millisecond + 1*time.Microsecond)) 71 72 if expect := uint64((2*time.Millisecond + 1*time.Microsecond) * procs); l.Fill() != expect { 73 t.Fatalf("expected fill of %d, got %d cpu-ns", expect, l.Fill()) 74 } 75 76 // Test passing time without assists during a GC. Specifically, just enough to drain the bucket to 77 // exactly procs nanoseconds (easier to get to because of rounding). 78 // 79 // The window we need to drain the bucket is 1/(1-2*gcBackgroundUtilization) times the current fill: 80 // 81 // fill + (window * procs * gcBackgroundUtilization - window * procs * (1-gcBackgroundUtilization)) = n 82 // fill = n - (window * procs * gcBackgroundUtilization - window * procs * (1-gcBackgroundUtilization)) 83 // fill = n + window * procs * ((1-gcBackgroundUtilization) - gcBackgroundUtilization) 84 // fill = n + window * procs * (1-2*gcBackgroundUtilization) 85 // window = (fill - n) / (procs * (1-2*gcBackgroundUtilization))) 86 // 87 // And here we want n=procs: 88 factor := (1 / (1 - 2*GCBackgroundUtilization)) 89 fill := (2*time.Millisecond + 1*time.Microsecond) * procs 90 l.Update(advance(time.Duration(factor * float64(fill-procs) / procs))) 91 if l.Fill() != procs { 92 t.Fatalf("expected fill %d cpu-ns from draining after a GC started, got fill of %d cpu-ns", procs, l.Fill()) 93 } 94 95 // Drain to zero for the rest of the test. 96 l.Update(advance(2 * procs * CapacityPerProc)) 97 if l.Fill() != 0 { 98 t.Fatalf("expected empty bucket from draining, got fill of %d cpu-ns", l.Fill()) 99 } 100 101 // Test filling up the bucket with 50% total GC work (so, not moving the bucket at all). 102 l.AddAssistTime(assistTime(10*time.Millisecond, 0.5-GCBackgroundUtilization)) 103 l.Update(advance(10 * time.Millisecond)) 104 if l.Fill() != 0 { 105 t.Fatalf("expected empty bucket from 50%% GC work, got fill of %d cpu-ns", l.Fill()) 106 } 107 108 // Test adding to the bucket overall with 100% GC work. 109 l.AddAssistTime(assistTime(time.Millisecond, 1.0-GCBackgroundUtilization)) 110 l.Update(advance(time.Millisecond)) 111 if expect := uint64(procs * time.Millisecond); l.Fill() != expect { 112 t.Errorf("expected %d fill from 100%% GC CPU, got fill of %d cpu-ns", expect, l.Fill()) 113 } 114 if l.Limiting() { 115 t.Errorf("limiter is enabled after filling bucket but shouldn't be") 116 } 117 if t.Failed() { 118 t.FailNow() 119 } 120 121 // Test filling the bucket exactly full. 122 l.AddAssistTime(assistTime(CapacityPerProc-time.Millisecond, 1.0-GCBackgroundUtilization)) 123 l.Update(advance(CapacityPerProc - time.Millisecond)) 124 if l.Fill() != l.Capacity() { 125 t.Errorf("expected bucket filled to capacity %d, got %d", l.Capacity(), l.Fill()) 126 } 127 if !l.Limiting() { 128 t.Errorf("limiter is not enabled after filling bucket but should be") 129 } 130 if l.Overflow() != 0+baseOverflow { 131 t.Errorf("bucket filled exactly should not have overflow, found %d", l.Overflow()) 132 } 133 if t.Failed() { 134 t.FailNow() 135 } 136 137 // Test adding with a delta of exactly zero. That is, GC work is exactly 50% of all resources. 138 // Specifically, the limiter should still be on, and no overflow should accumulate. 139 l.AddAssistTime(assistTime(1*time.Second, 0.5-GCBackgroundUtilization)) 140 l.Update(advance(1 * time.Second)) 141 if l.Fill() != l.Capacity() { 142 t.Errorf("expected bucket filled to capacity %d, got %d", l.Capacity(), l.Fill()) 143 } 144 if !l.Limiting() { 145 t.Errorf("limiter is not enabled after filling bucket but should be") 146 } 147 if l.Overflow() != 0+baseOverflow { 148 t.Errorf("bucket filled exactly should not have overflow, found %d", l.Overflow()) 149 } 150 if t.Failed() { 151 t.FailNow() 152 } 153 154 // Drain the bucket by half. 155 l.AddAssistTime(assistTime(CapacityPerProc, 0)) 156 l.Update(advance(CapacityPerProc)) 157 if expect := l.Capacity() / 2; l.Fill() != expect { 158 t.Errorf("failed to drain to %d, got fill %d", expect, l.Fill()) 159 } 160 if l.Limiting() { 161 t.Errorf("limiter is enabled after draining bucket but shouldn't be") 162 } 163 if t.Failed() { 164 t.FailNow() 165 } 166 167 // Test overfilling the bucket. 168 l.AddAssistTime(assistTime(CapacityPerProc, 1.0-GCBackgroundUtilization)) 169 l.Update(advance(CapacityPerProc)) 170 if l.Fill() != l.Capacity() { 171 t.Errorf("failed to fill to capacity %d, got fill %d", l.Capacity(), l.Fill()) 172 } 173 if !l.Limiting() { 174 t.Errorf("limiter is not enabled after overfill but should be") 175 } 176 if expect := uint64(CapacityPerProc * procs / 2); l.Overflow() != expect+baseOverflow { 177 t.Errorf("bucket overfilled should have overflow %d, found %d", expect, l.Overflow()) 178 } 179 if t.Failed() { 180 t.FailNow() 181 } 182 183 // Test ending the cycle with some assists left over. 184 l.AddAssistTime(assistTime(1*time.Millisecond, 1.0-GCBackgroundUtilization)) 185 l.StartGCTransition(false, advance(1*time.Millisecond)) 186 if l.Fill() != l.Capacity() { 187 t.Errorf("failed to maintain fill to capacity %d, got fill %d", l.Capacity(), l.Fill()) 188 } 189 if !l.Limiting() { 190 t.Errorf("limiter is not enabled after overfill but should be") 191 } 192 if expect := uint64((CapacityPerProc/2 + time.Millisecond) * procs); l.Overflow() != expect+baseOverflow { 193 t.Errorf("bucket overfilled should have overflow %d, found %d", expect, l.Overflow()) 194 } 195 if t.Failed() { 196 t.FailNow() 197 } 198 199 // Make sure the STW adds to the bucket. 200 l.FinishGCTransition(advance(5 * time.Millisecond)) 201 if l.Fill() != l.Capacity() { 202 t.Errorf("failed to maintain fill to capacity %d, got fill %d", l.Capacity(), l.Fill()) 203 } 204 if !l.Limiting() { 205 t.Errorf("limiter is not enabled after overfill but should be") 206 } 207 if expect := uint64((CapacityPerProc/2 + 6*time.Millisecond) * procs); l.Overflow() != expect+baseOverflow { 208 t.Errorf("bucket overfilled should have overflow %d, found %d", expect, l.Overflow()) 209 } 210 if t.Failed() { 211 t.FailNow() 212 } 213 214 // Resize procs up and make sure limiting stops. 215 expectFill := l.Capacity() 216 l.ResetCapacity(advance(0), procs+10) 217 if l.Fill() != expectFill { 218 t.Errorf("failed to maintain fill at old capacity %d, got fill %d", expectFill, l.Fill()) 219 } 220 if l.Limiting() { 221 t.Errorf("limiter is enabled after resetting capacity higher") 222 } 223 if expect := uint64((CapacityPerProc/2 + 6*time.Millisecond) * procs); l.Overflow() != expect+baseOverflow { 224 t.Errorf("bucket overflow %d should have remained constant, found %d", expect, l.Overflow()) 225 } 226 if t.Failed() { 227 t.FailNow() 228 } 229 230 // Resize procs down and make sure limiting begins again. 231 // Also make sure resizing doesn't affect overflow. This isn't 232 // a case where we want to report overflow, because we're not 233 // actively doing work to achieve it. It's that we have fewer 234 // CPU resources now. 235 l.ResetCapacity(advance(0), procs-10) 236 if l.Fill() != l.Capacity() { 237 t.Errorf("failed lower fill to new capacity %d, got fill %d", l.Capacity(), l.Fill()) 238 } 239 if !l.Limiting() { 240 t.Errorf("limiter is disabled after resetting capacity lower") 241 } 242 if expect := uint64((CapacityPerProc/2 + 6*time.Millisecond) * procs); l.Overflow() != expect+baseOverflow { 243 t.Errorf("bucket overflow %d should have remained constant, found %d", expect, l.Overflow()) 244 } 245 if t.Failed() { 246 t.FailNow() 247 } 248 249 // Get back to a zero state. The top of the loop will double check. 250 l.ResetCapacity(advance(CapacityPerProc*procs), procs) 251 252 // Track total overflow for future iterations. 253 baseOverflow += uint64((CapacityPerProc/2 + 6*time.Millisecond) * procs) 254 } 255 }