github.com/hhrutter/nomad@v0.6.0-rc2.0.20170723054333-80c4b03f0705/client/gc_test.go (about) 1 package client 2 3 import ( 4 "testing" 5 "time" 6 7 "github.com/hashicorp/nomad/client/stats" 8 "github.com/hashicorp/nomad/nomad/mock" 9 "github.com/hashicorp/nomad/nomad/structs" 10 ) 11 12 func gcConfig() *GCConfig { 13 return &GCConfig{ 14 DiskUsageThreshold: 80, 15 InodeUsageThreshold: 70, 16 Interval: 1 * time.Minute, 17 ReservedDiskMB: 0, 18 MaxAllocs: 100, 19 } 20 } 21 22 func TestIndexedGCAllocPQ(t *testing.T) { 23 t.Parallel() 24 pq := NewIndexedGCAllocPQ() 25 26 _, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false) 27 _, ar2 := testAllocRunnerFromAlloc(mock.Alloc(), false) 28 _, ar3 := testAllocRunnerFromAlloc(mock.Alloc(), false) 29 _, ar4 := testAllocRunnerFromAlloc(mock.Alloc(), false) 30 31 pq.Push(ar1) 32 pq.Push(ar2) 33 pq.Push(ar3) 34 pq.Push(ar4) 35 36 allocID := pq.Pop().allocRunner.Alloc().ID 37 if allocID != ar1.Alloc().ID { 38 t.Fatalf("expected alloc %v, got %v", allocID, ar1.Alloc().ID) 39 } 40 41 allocID = pq.Pop().allocRunner.Alloc().ID 42 if allocID != ar2.Alloc().ID { 43 t.Fatalf("expected alloc %v, got %v", allocID, ar1.Alloc().ID) 44 } 45 46 allocID = pq.Pop().allocRunner.Alloc().ID 47 if allocID != ar3.Alloc().ID { 48 t.Fatalf("expected alloc %v, got %v", allocID, ar1.Alloc().ID) 49 } 50 51 allocID = pq.Pop().allocRunner.Alloc().ID 52 if allocID != ar4.Alloc().ID { 53 t.Fatalf("expected alloc %v, got %v", allocID, ar1.Alloc().ID) 54 } 55 56 gcAlloc := pq.Pop() 57 if gcAlloc != nil { 58 t.Fatalf("expected nil, got %v", gcAlloc) 59 } 60 } 61 62 // MockAllocCounter implements AllocCounter interface. 63 type MockAllocCounter struct { 64 allocs int 65 } 66 67 func (m *MockAllocCounter) NumAllocs() int { 68 return m.allocs 69 } 70 71 type MockStatsCollector struct { 72 availableValues []uint64 73 usedPercents []float64 74 inodePercents []float64 75 index int 76 } 77 78 func (m *MockStatsCollector) Collect() error { 79 return nil 80 } 81 82 func (m *MockStatsCollector) Stats() *stats.HostStats { 83 if len(m.availableValues) == 0 { 84 return nil 85 } 86 87 available := m.availableValues[m.index] 88 usedPercent := m.usedPercents[m.index] 89 inodePercent := m.inodePercents[m.index] 90 91 if m.index < len(m.availableValues)-1 { 92 m.index = m.index + 1 93 } 94 return &stats.HostStats{ 95 AllocDirStats: &stats.DiskStats{ 96 Available: available, 97 UsedPercent: usedPercent, 98 InodesUsedPercent: inodePercent, 99 }, 100 } 101 } 102 103 func TestAllocGarbageCollector_MarkForCollection(t *testing.T) { 104 t.Parallel() 105 logger := testLogger() 106 gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig()) 107 108 _, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false) 109 if err := gc.MarkForCollection(ar1); err != nil { 110 t.Fatalf("err: %v", err) 111 } 112 113 gcAlloc := gc.allocRunners.Pop() 114 if gcAlloc == nil || gcAlloc.allocRunner != ar1 { 115 t.Fatalf("bad gcAlloc: %v", gcAlloc) 116 } 117 } 118 119 func TestAllocGarbageCollector_Collect(t *testing.T) { 120 t.Parallel() 121 logger := testLogger() 122 gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig()) 123 124 _, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false) 125 _, ar2 := testAllocRunnerFromAlloc(mock.Alloc(), false) 126 if err := gc.MarkForCollection(ar1); err != nil { 127 t.Fatalf("err: %v", err) 128 } 129 if err := gc.MarkForCollection(ar2); err != nil { 130 t.Fatalf("err: %v", err) 131 } 132 133 // Fake that ar.Run() exits 134 close(ar1.waitCh) 135 close(ar2.waitCh) 136 137 if err := gc.Collect(ar1.Alloc().ID); err != nil { 138 t.Fatalf("err: %v", err) 139 } 140 gcAlloc := gc.allocRunners.Pop() 141 if gcAlloc == nil || gcAlloc.allocRunner != ar2 { 142 t.Fatalf("bad gcAlloc: %v", gcAlloc) 143 } 144 } 145 146 func TestAllocGarbageCollector_CollectAll(t *testing.T) { 147 t.Parallel() 148 logger := testLogger() 149 gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig()) 150 151 _, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false) 152 _, ar2 := testAllocRunnerFromAlloc(mock.Alloc(), false) 153 if err := gc.MarkForCollection(ar1); err != nil { 154 t.Fatalf("err: %v", err) 155 } 156 if err := gc.MarkForCollection(ar2); err != nil { 157 t.Fatalf("err: %v", err) 158 } 159 160 if err := gc.CollectAll(); err != nil { 161 t.Fatalf("err: %v", err) 162 } 163 gcAlloc := gc.allocRunners.Pop() 164 if gcAlloc != nil { 165 t.Fatalf("bad gcAlloc: %v", gcAlloc) 166 } 167 } 168 169 func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T) { 170 t.Parallel() 171 logger := testLogger() 172 statsCollector := &MockStatsCollector{} 173 conf := gcConfig() 174 conf.ReservedDiskMB = 20 175 gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) 176 177 _, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false) 178 close(ar1.waitCh) 179 _, ar2 := testAllocRunnerFromAlloc(mock.Alloc(), false) 180 close(ar2.waitCh) 181 if err := gc.MarkForCollection(ar1); err != nil { 182 t.Fatalf("err: %v", err) 183 } 184 if err := gc.MarkForCollection(ar2); err != nil { 185 t.Fatalf("err: %v", err) 186 } 187 188 // Make stats collector report 200MB free out of which 20MB is reserved 189 statsCollector.availableValues = []uint64{200 * MB} 190 statsCollector.usedPercents = []float64{0} 191 statsCollector.inodePercents = []float64{0} 192 193 alloc := mock.Alloc() 194 alloc.Resources.DiskMB = 150 195 if err := gc.MakeRoomFor([]*structs.Allocation{alloc}); err != nil { 196 t.Fatalf("err: %v", err) 197 } 198 199 // When we have enough disk available and don't need to do any GC so we 200 // should have two ARs in the GC queue 201 for i := 0; i < 2; i++ { 202 if gcAlloc := gc.allocRunners.Pop(); gcAlloc == nil { 203 t.Fatalf("err: %v", gcAlloc) 204 } 205 } 206 } 207 208 func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) { 209 t.Parallel() 210 logger := testLogger() 211 statsCollector := &MockStatsCollector{} 212 conf := gcConfig() 213 conf.ReservedDiskMB = 20 214 gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) 215 216 _, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false) 217 close(ar1.waitCh) 218 _, ar2 := testAllocRunnerFromAlloc(mock.Alloc(), false) 219 close(ar2.waitCh) 220 if err := gc.MarkForCollection(ar1); err != nil { 221 t.Fatalf("err: %v", err) 222 } 223 if err := gc.MarkForCollection(ar2); err != nil { 224 t.Fatalf("err: %v", err) 225 } 226 227 // Make stats collector report 80MB and 175MB free in subsequent calls 228 statsCollector.availableValues = []uint64{80 * MB, 80 * MB, 175 * MB} 229 statsCollector.usedPercents = []float64{0, 0, 0} 230 statsCollector.inodePercents = []float64{0, 0, 0} 231 232 alloc := mock.Alloc() 233 alloc.Resources.DiskMB = 150 234 if err := gc.MakeRoomFor([]*structs.Allocation{alloc}); err != nil { 235 t.Fatalf("err: %v", err) 236 } 237 238 // We should be GC-ing one alloc 239 if gcAlloc := gc.allocRunners.Pop(); gcAlloc == nil { 240 t.Fatalf("err: %v", gcAlloc) 241 } 242 243 if gcAlloc := gc.allocRunners.Pop(); gcAlloc != nil { 244 t.Fatalf("gcAlloc: %v", gcAlloc) 245 } 246 } 247 248 func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) { 249 t.Parallel() 250 logger := testLogger() 251 statsCollector := &MockStatsCollector{} 252 conf := gcConfig() 253 conf.ReservedDiskMB = 20 254 gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) 255 256 _, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false) 257 close(ar1.waitCh) 258 _, ar2 := testAllocRunnerFromAlloc(mock.Alloc(), false) 259 close(ar2.waitCh) 260 if err := gc.MarkForCollection(ar1); err != nil { 261 t.Fatalf("err: %v", err) 262 } 263 if err := gc.MarkForCollection(ar2); err != nil { 264 t.Fatalf("err: %v", err) 265 } 266 267 // Make stats collector report 80MB and 95MB free in subsequent calls 268 statsCollector.availableValues = []uint64{80 * MB, 80 * MB, 95 * MB} 269 statsCollector.usedPercents = []float64{0, 0, 0} 270 statsCollector.inodePercents = []float64{0, 0, 0} 271 272 alloc := mock.Alloc() 273 alloc.Resources.DiskMB = 150 274 if err := gc.MakeRoomFor([]*structs.Allocation{alloc}); err != nil { 275 t.Fatalf("err: %v", err) 276 } 277 278 // We should be GC-ing all the alloc runners 279 if gcAlloc := gc.allocRunners.Pop(); gcAlloc != nil { 280 t.Fatalf("gcAlloc: %v", gcAlloc) 281 } 282 } 283 284 func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Fallback(t *testing.T) { 285 t.Parallel() 286 logger := testLogger() 287 statsCollector := &MockStatsCollector{} 288 conf := gcConfig() 289 conf.ReservedDiskMB = 20 290 gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) 291 292 _, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false) 293 close(ar1.waitCh) 294 _, ar2 := testAllocRunnerFromAlloc(mock.Alloc(), false) 295 close(ar2.waitCh) 296 if err := gc.MarkForCollection(ar1); err != nil { 297 t.Fatalf("err: %v", err) 298 } 299 if err := gc.MarkForCollection(ar2); err != nil { 300 t.Fatalf("err: %v", err) 301 } 302 303 alloc := mock.Alloc() 304 alloc.Resources.DiskMB = 150 305 if err := gc.MakeRoomFor([]*structs.Allocation{alloc}); err != nil { 306 t.Fatalf("err: %v", err) 307 } 308 309 // We should be GC-ing one alloc 310 if gcAlloc := gc.allocRunners.Pop(); gcAlloc == nil { 311 t.Fatalf("err: %v", gcAlloc) 312 } 313 314 if gcAlloc := gc.allocRunners.Pop(); gcAlloc != nil { 315 t.Fatalf("gcAlloc: %v", gcAlloc) 316 } 317 } 318 319 func TestAllocGarbageCollector_MakeRoomForAllocations_MaxAllocs(t *testing.T) { 320 t.Parallel() 321 const ( 322 liveAllocs = 3 323 maxAllocs = 6 324 gcAllocs = 4 325 gcAllocsLeft = 1 326 ) 327 328 logger := testLogger() 329 statsCollector := &MockStatsCollector{ 330 availableValues: []uint64{10 * 1024 * MB}, 331 usedPercents: []float64{0}, 332 inodePercents: []float64{0}, 333 } 334 allocCounter := &MockAllocCounter{allocs: liveAllocs} 335 conf := gcConfig() 336 conf.MaxAllocs = maxAllocs 337 gc := NewAllocGarbageCollector(logger, statsCollector, allocCounter, conf) 338 339 for i := 0; i < gcAllocs; i++ { 340 _, ar := testAllocRunnerFromAlloc(mock.Alloc(), false) 341 close(ar.waitCh) 342 if err := gc.MarkForCollection(ar); err != nil { 343 t.Fatalf("error marking alloc for gc: %v", err) 344 } 345 } 346 347 if err := gc.MakeRoomFor([]*structs.Allocation{mock.Alloc(), mock.Alloc()}); err != nil { 348 t.Fatalf("error making room for 2 new allocs: %v", err) 349 } 350 351 // There should be gcAllocsLeft alloc runners left to be collected 352 if n := len(gc.allocRunners.index); n != gcAllocsLeft { 353 t.Fatalf("expected %d remaining GC-able alloc runners but found %d", gcAllocsLeft, n) 354 } 355 } 356 357 func TestAllocGarbageCollector_UsageBelowThreshold(t *testing.T) { 358 t.Parallel() 359 logger := testLogger() 360 statsCollector := &MockStatsCollector{} 361 conf := gcConfig() 362 conf.ReservedDiskMB = 20 363 gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) 364 365 _, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false) 366 close(ar1.waitCh) 367 _, ar2 := testAllocRunnerFromAlloc(mock.Alloc(), false) 368 close(ar2.waitCh) 369 if err := gc.MarkForCollection(ar1); err != nil { 370 t.Fatalf("err: %v", err) 371 } 372 if err := gc.MarkForCollection(ar2); err != nil { 373 t.Fatalf("err: %v", err) 374 } 375 376 statsCollector.availableValues = []uint64{1000} 377 statsCollector.usedPercents = []float64{20} 378 statsCollector.inodePercents = []float64{10} 379 380 if err := gc.keepUsageBelowThreshold(); err != nil { 381 t.Fatalf("err: %v", err) 382 } 383 384 // We shouldn't GC any of the allocs since the used percent values are below 385 // threshold 386 for i := 0; i < 2; i++ { 387 if gcAlloc := gc.allocRunners.Pop(); gcAlloc == nil { 388 t.Fatalf("err: %v", gcAlloc) 389 } 390 } 391 } 392 393 func TestAllocGarbageCollector_UsedPercentThreshold(t *testing.T) { 394 t.Parallel() 395 logger := testLogger() 396 statsCollector := &MockStatsCollector{} 397 conf := gcConfig() 398 conf.ReservedDiskMB = 20 399 gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) 400 401 _, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false) 402 close(ar1.waitCh) 403 _, ar2 := testAllocRunnerFromAlloc(mock.Alloc(), false) 404 close(ar2.waitCh) 405 if err := gc.MarkForCollection(ar1); err != nil { 406 t.Fatalf("err: %v", err) 407 } 408 if err := gc.MarkForCollection(ar2); err != nil { 409 t.Fatalf("err: %v", err) 410 } 411 412 statsCollector.availableValues = []uint64{1000, 800} 413 statsCollector.usedPercents = []float64{85, 60} 414 statsCollector.inodePercents = []float64{50, 30} 415 416 if err := gc.keepUsageBelowThreshold(); err != nil { 417 t.Fatalf("err: %v", err) 418 } 419 420 // We should be GC-ing only one of the alloc runners since the second time 421 // used percent returns a number below threshold. 422 if gcAlloc := gc.allocRunners.Pop(); gcAlloc == nil { 423 t.Fatalf("err: %v", gcAlloc) 424 } 425 426 if gcAlloc := gc.allocRunners.Pop(); gcAlloc != nil { 427 t.Fatalf("gcAlloc: %v", gcAlloc) 428 } 429 } 430 431 func TestAllocGarbageCollector_MaxAllocsThreshold(t *testing.T) { 432 t.Parallel() 433 const ( 434 liveAllocs = 3 435 maxAllocs = 6 436 gcAllocs = 4 437 gcAllocsLeft = 1 438 ) 439 440 logger := testLogger() 441 statsCollector := &MockStatsCollector{ 442 availableValues: []uint64{1000}, 443 usedPercents: []float64{0}, 444 inodePercents: []float64{0}, 445 } 446 allocCounter := &MockAllocCounter{allocs: liveAllocs} 447 conf := gcConfig() 448 conf.MaxAllocs = 4 449 gc := NewAllocGarbageCollector(logger, statsCollector, allocCounter, conf) 450 451 for i := 0; i < gcAllocs; i++ { 452 _, ar := testAllocRunnerFromAlloc(mock.Alloc(), false) 453 close(ar.waitCh) 454 if err := gc.MarkForCollection(ar); err != nil { 455 t.Fatalf("error marking alloc for gc: %v", err) 456 } 457 } 458 459 if err := gc.keepUsageBelowThreshold(); err != nil { 460 t.Fatalf("error gc'ing: %v", err) 461 } 462 463 // We should have gc'd down to MaxAllocs 464 if n := len(gc.allocRunners.index); n != gcAllocsLeft { 465 t.Fatalf("expected remaining gc allocs (%d) to equal %d", n, gcAllocsLeft) 466 } 467 }