github.com/djenriquez/nomad-1@v0.8.1/client/gc_test.go (about) 1 package client 2 3 import ( 4 "fmt" 5 "testing" 6 "time" 7 8 "github.com/hashicorp/nomad/client/config" 9 "github.com/hashicorp/nomad/client/stats" 10 "github.com/hashicorp/nomad/nomad/mock" 11 "github.com/hashicorp/nomad/nomad/structs" 12 "github.com/hashicorp/nomad/testutil" 13 ) 14 15 func gcConfig() *GCConfig { 16 return &GCConfig{ 17 DiskUsageThreshold: 80, 18 InodeUsageThreshold: 70, 19 Interval: 1 * time.Minute, 20 ReservedDiskMB: 0, 21 MaxAllocs: 100, 22 } 23 } 24 25 func TestIndexedGCAllocPQ(t *testing.T) { 26 t.Parallel() 27 pq := NewIndexedGCAllocPQ() 28 29 _, ar1 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 30 _, ar2 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 31 _, ar3 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 32 _, ar4 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 33 34 pq.Push(ar1) 35 pq.Push(ar2) 36 pq.Push(ar3) 37 pq.Push(ar4) 38 39 allocID := pq.Pop().allocRunner.Alloc().ID 40 if allocID != ar1.Alloc().ID { 41 t.Fatalf("expected alloc %v, got %v", allocID, ar1.Alloc().ID) 42 } 43 44 allocID = pq.Pop().allocRunner.Alloc().ID 45 if allocID != ar2.Alloc().ID { 46 t.Fatalf("expected alloc %v, got %v", allocID, ar1.Alloc().ID) 47 } 48 49 allocID = pq.Pop().allocRunner.Alloc().ID 50 if allocID != ar3.Alloc().ID { 51 t.Fatalf("expected alloc %v, got %v", allocID, ar1.Alloc().ID) 52 } 53 54 allocID = pq.Pop().allocRunner.Alloc().ID 55 if allocID != ar4.Alloc().ID { 56 t.Fatalf("expected alloc %v, got %v", allocID, ar1.Alloc().ID) 57 } 58 59 gcAlloc := pq.Pop() 60 if gcAlloc != nil { 61 t.Fatalf("expected nil, got %v", gcAlloc) 62 } 63 } 64 65 // MockAllocCounter implements AllocCounter interface. 66 type MockAllocCounter struct { 67 allocs int 68 } 69 70 func (m *MockAllocCounter) NumAllocs() int { 71 return m.allocs 72 } 73 74 type MockStatsCollector struct { 75 availableValues []uint64 76 usedPercents []float64 77 inodePercents []float64 78 index int 79 } 80 81 func (m *MockStatsCollector) Collect() error { 82 return nil 83 } 84 85 func (m *MockStatsCollector) Stats() *stats.HostStats { 86 if len(m.availableValues) == 0 { 87 return nil 88 } 89 90 available := m.availableValues[m.index] 91 usedPercent := m.usedPercents[m.index] 92 inodePercent := m.inodePercents[m.index] 93 94 if m.index < len(m.availableValues)-1 { 95 m.index = m.index + 1 96 } 97 return &stats.HostStats{ 98 AllocDirStats: &stats.DiskStats{ 99 Available: available, 100 UsedPercent: usedPercent, 101 InodesUsedPercent: inodePercent, 102 }, 103 } 104 } 105 106 func TestAllocGarbageCollector_MarkForCollection(t *testing.T) { 107 t.Parallel() 108 logger := testLogger() 109 gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig()) 110 111 _, ar1 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 112 gc.MarkForCollection(ar1) 113 114 gcAlloc := gc.allocRunners.Pop() 115 if gcAlloc == nil || gcAlloc.allocRunner != ar1 { 116 t.Fatalf("bad gcAlloc: %v", gcAlloc) 117 } 118 } 119 120 func TestAllocGarbageCollector_Collect(t *testing.T) { 121 t.Parallel() 122 logger := testLogger() 123 gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig()) 124 125 _, ar1 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 126 _, ar2 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 127 gc.MarkForCollection(ar1) 128 gc.MarkForCollection(ar2) 129 130 // Fake that ar.Run() exits 131 close(ar1.waitCh) 132 close(ar2.waitCh) 133 134 gc.Collect(ar1.Alloc().ID) 135 gcAlloc := gc.allocRunners.Pop() 136 if gcAlloc == nil || gcAlloc.allocRunner != ar2 { 137 t.Fatalf("bad gcAlloc: %v", gcAlloc) 138 } 139 } 140 141 func TestAllocGarbageCollector_CollectAll(t *testing.T) { 142 t.Parallel() 143 logger := testLogger() 144 gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig()) 145 146 _, ar1 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 147 _, ar2 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 148 gc.MarkForCollection(ar1) 149 gc.MarkForCollection(ar2) 150 151 gc.CollectAll() 152 gcAlloc := gc.allocRunners.Pop() 153 if gcAlloc != nil { 154 t.Fatalf("bad gcAlloc: %v", gcAlloc) 155 } 156 } 157 158 func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T) { 159 t.Parallel() 160 logger := testLogger() 161 statsCollector := &MockStatsCollector{} 162 conf := gcConfig() 163 conf.ReservedDiskMB = 20 164 gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) 165 166 _, ar1 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 167 close(ar1.waitCh) 168 _, ar2 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 169 close(ar2.waitCh) 170 gc.MarkForCollection(ar1) 171 gc.MarkForCollection(ar2) 172 173 // Make stats collector report 200MB free out of which 20MB is reserved 174 statsCollector.availableValues = []uint64{200 * MB} 175 statsCollector.usedPercents = []float64{0} 176 statsCollector.inodePercents = []float64{0} 177 178 alloc := mock.Alloc() 179 alloc.Resources.DiskMB = 150 180 if err := gc.MakeRoomFor([]*structs.Allocation{alloc}); err != nil { 181 t.Fatalf("err: %v", err) 182 } 183 184 // When we have enough disk available and don't need to do any GC so we 185 // should have two ARs in the GC queue 186 for i := 0; i < 2; i++ { 187 if gcAlloc := gc.allocRunners.Pop(); gcAlloc == nil { 188 t.Fatalf("err: %v", gcAlloc) 189 } 190 } 191 } 192 193 func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) { 194 t.Parallel() 195 logger := testLogger() 196 statsCollector := &MockStatsCollector{} 197 conf := gcConfig() 198 conf.ReservedDiskMB = 20 199 gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) 200 201 _, ar1 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 202 close(ar1.waitCh) 203 _, ar2 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 204 close(ar2.waitCh) 205 gc.MarkForCollection(ar1) 206 gc.MarkForCollection(ar2) 207 208 // Make stats collector report 80MB and 175MB free in subsequent calls 209 statsCollector.availableValues = []uint64{80 * MB, 80 * MB, 175 * MB} 210 statsCollector.usedPercents = []float64{0, 0, 0} 211 statsCollector.inodePercents = []float64{0, 0, 0} 212 213 alloc := mock.Alloc() 214 alloc.Resources.DiskMB = 150 215 if err := gc.MakeRoomFor([]*structs.Allocation{alloc}); err != nil { 216 t.Fatalf("err: %v", err) 217 } 218 219 // We should be GC-ing one alloc 220 if gcAlloc := gc.allocRunners.Pop(); gcAlloc == nil { 221 t.Fatalf("err: %v", gcAlloc) 222 } 223 224 if gcAlloc := gc.allocRunners.Pop(); gcAlloc != nil { 225 t.Fatalf("gcAlloc: %v", gcAlloc) 226 } 227 } 228 229 func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) { 230 t.Parallel() 231 logger := testLogger() 232 statsCollector := &MockStatsCollector{} 233 conf := gcConfig() 234 conf.ReservedDiskMB = 20 235 gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) 236 237 _, ar1 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 238 close(ar1.waitCh) 239 _, ar2 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 240 close(ar2.waitCh) 241 gc.MarkForCollection(ar1) 242 gc.MarkForCollection(ar2) 243 244 // Make stats collector report 80MB and 95MB free in subsequent calls 245 statsCollector.availableValues = []uint64{80 * MB, 80 * MB, 95 * MB} 246 statsCollector.usedPercents = []float64{0, 0, 0} 247 statsCollector.inodePercents = []float64{0, 0, 0} 248 249 alloc := mock.Alloc() 250 alloc.Resources.DiskMB = 150 251 if err := gc.MakeRoomFor([]*structs.Allocation{alloc}); err != nil { 252 t.Fatalf("err: %v", err) 253 } 254 255 // We should be GC-ing all the alloc runners 256 if gcAlloc := gc.allocRunners.Pop(); gcAlloc != nil { 257 t.Fatalf("gcAlloc: %v", gcAlloc) 258 } 259 } 260 261 func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Fallback(t *testing.T) { 262 t.Parallel() 263 logger := testLogger() 264 statsCollector := &MockStatsCollector{} 265 conf := gcConfig() 266 conf.ReservedDiskMB = 20 267 gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) 268 269 _, ar1 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 270 close(ar1.waitCh) 271 _, ar2 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 272 close(ar2.waitCh) 273 gc.MarkForCollection(ar1) 274 gc.MarkForCollection(ar2) 275 276 alloc := mock.Alloc() 277 alloc.Resources.DiskMB = 150 278 if err := gc.MakeRoomFor([]*structs.Allocation{alloc}); err != nil { 279 t.Fatalf("err: %v", err) 280 } 281 282 // We should be GC-ing one alloc 283 if gcAlloc := gc.allocRunners.Pop(); gcAlloc == nil { 284 t.Fatalf("err: %v", gcAlloc) 285 } 286 287 if gcAlloc := gc.allocRunners.Pop(); gcAlloc != nil { 288 t.Fatalf("gcAlloc: %v", gcAlloc) 289 } 290 } 291 292 // TestAllocGarbageCollector_MaxAllocs asserts that when making room for new 293 // allocs, terminal allocs are GC'd until old_allocs + new_allocs <= limit 294 func TestAllocGarbageCollector_MaxAllocs(t *testing.T) { 295 t.Parallel() 296 server, serverAddr := testServer(t, nil) 297 defer server.Shutdown() 298 testutil.WaitForLeader(t, server.RPC) 299 300 const maxAllocs = 6 301 client := TestClient(t, func(c *config.Config) { 302 c.GCMaxAllocs = maxAllocs 303 c.GCDiskUsageThreshold = 100 304 c.GCInodeUsageThreshold = 100 305 c.GCParallelDestroys = 1 306 c.GCInterval = time.Hour 307 308 c.RPCHandler = server 309 c.Servers = []string{serverAddr} 310 c.ConsulConfig.ClientAutoJoin = new(bool) // squelch logs 311 }) 312 defer client.Shutdown() 313 waitTilNodeReady(client, t) 314 315 callN := 0 316 assertAllocs := func(expectedAll, expectedDestroyed int) { 317 // Wait for allocs to be started 318 callN++ 319 client.logger.Printf("[TEST] %d -- Waiting for %d total allocs, %d GC'd", callN, expectedAll, expectedDestroyed) 320 testutil.WaitForResult(func() (bool, error) { 321 all, destroyed := 0, 0 322 for _, ar := range client.getAllocRunners() { 323 all++ 324 if ar.IsDestroyed() { 325 destroyed++ 326 } 327 } 328 return all == expectedAll && destroyed == expectedDestroyed, fmt.Errorf( 329 "expected %d allocs (found %d); expected %d destroy (found %d)", 330 expectedAll, all, expectedDestroyed, destroyed, 331 ) 332 }, func(err error) { 333 client.logger.Printf("[TEST] %d -- FAILED to find %d total allocs, %d GC'd!", callN, expectedAll, expectedDestroyed) 334 t.Fatalf("%d alloc state: %v", callN, err) 335 }) 336 client.logger.Printf("[TEST] %d -- Found %d total allocs, %d GC'd!", callN, expectedAll, expectedDestroyed) 337 } 338 339 // Create a job 340 state := server.State() 341 job := mock.Job() 342 job.TaskGroups[0].Tasks[0].Driver = "mock_driver" 343 job.TaskGroups[0].Tasks[0].Config["run_for"] = "30s" 344 nodeID := client.Node().ID 345 if err := state.UpsertJob(98, job); err != nil { 346 t.Fatalf("error upserting job: %v", err) 347 } 348 if err := state.UpsertJobSummary(99, mock.JobSummary(job.ID)); err != nil { 349 t.Fatalf("error upserting job summary: %v", err) 350 } 351 352 newAlloc := func() *structs.Allocation { 353 alloc := mock.Alloc() 354 alloc.JobID = job.ID 355 alloc.Job = job 356 alloc.NodeID = nodeID 357 return alloc 358 } 359 360 // Create the allocations 361 allocs := make([]*structs.Allocation, 7) 362 for i := 0; i < len(allocs); i++ { 363 allocs[i] = newAlloc() 364 } 365 366 // Upsert a copy of the allocs as modifying the originals later would 367 // cause a race 368 { 369 allocsCopy := make([]*structs.Allocation, len(allocs)) 370 for i, a := range allocs { 371 allocsCopy[i] = a.Copy() 372 } 373 if err := state.UpsertAllocs(100, allocsCopy); err != nil { 374 t.Fatalf("error upserting initial allocs: %v", err) 375 } 376 } 377 378 // 7 total, 0 GC'd 379 assertAllocs(7, 0) 380 381 // Set the first few as terminal so they're marked for gc 382 const terminalN = 4 383 for i := 0; i < terminalN; i++ { 384 // Copy the alloc so the pointers aren't shared 385 alloc := allocs[i].Copy() 386 alloc.DesiredStatus = structs.AllocDesiredStatusStop 387 allocs[i] = alloc 388 } 389 if err := state.UpsertAllocs(101, allocs[:terminalN]); err != nil { 390 t.Fatalf("error upserting stopped allocs: %v", err) 391 } 392 393 // 7 total, 1 GC'd to get down to limit of 6 394 assertAllocs(7, 1) 395 396 // Add one more alloc 397 if err := state.UpsertAllocs(102, []*structs.Allocation{newAlloc()}); err != nil { 398 t.Fatalf("error upserting new alloc: %v", err) 399 } 400 401 // 8 total, 1 GC'd to get down to limit of 6 402 // If this fails it may be due to the gc's Run and MarkRoomFor methods 403 // gc'ing concurrently. May have to disable gc's run loop if this test 404 // is flaky. 405 assertAllocs(8, 2) 406 407 // Add new allocs to cause the gc of old terminal ones 408 newAllocs := make([]*structs.Allocation, 4) 409 for i := 0; i < len(newAllocs); i++ { 410 newAllocs[i] = newAlloc() 411 } 412 if err := state.UpsertAllocs(200, newAllocs); err != nil { 413 t.Fatalf("error upserting %d new allocs: %v", len(newAllocs), err) 414 } 415 416 // 12 total, 4 GC'd total because all other allocs are alive 417 assertAllocs(12, 4) 418 } 419 420 func TestAllocGarbageCollector_UsageBelowThreshold(t *testing.T) { 421 t.Parallel() 422 logger := testLogger() 423 statsCollector := &MockStatsCollector{} 424 conf := gcConfig() 425 conf.ReservedDiskMB = 20 426 gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) 427 428 _, ar1 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 429 close(ar1.waitCh) 430 _, ar2 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 431 close(ar2.waitCh) 432 gc.MarkForCollection(ar1) 433 gc.MarkForCollection(ar2) 434 435 statsCollector.availableValues = []uint64{1000} 436 statsCollector.usedPercents = []float64{20} 437 statsCollector.inodePercents = []float64{10} 438 439 if err := gc.keepUsageBelowThreshold(); err != nil { 440 t.Fatalf("err: %v", err) 441 } 442 443 // We shouldn't GC any of the allocs since the used percent values are below 444 // threshold 445 for i := 0; i < 2; i++ { 446 if gcAlloc := gc.allocRunners.Pop(); gcAlloc == nil { 447 t.Fatalf("err: %v", gcAlloc) 448 } 449 } 450 } 451 452 func TestAllocGarbageCollector_UsedPercentThreshold(t *testing.T) { 453 t.Parallel() 454 logger := testLogger() 455 statsCollector := &MockStatsCollector{} 456 conf := gcConfig() 457 conf.ReservedDiskMB = 20 458 gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) 459 460 _, ar1 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 461 close(ar1.waitCh) 462 _, ar2 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) 463 close(ar2.waitCh) 464 gc.MarkForCollection(ar1) 465 gc.MarkForCollection(ar2) 466 467 statsCollector.availableValues = []uint64{1000, 800} 468 statsCollector.usedPercents = []float64{85, 60} 469 statsCollector.inodePercents = []float64{50, 30} 470 471 if err := gc.keepUsageBelowThreshold(); err != nil { 472 t.Fatalf("err: %v", err) 473 } 474 475 // We should be GC-ing only one of the alloc runners since the second time 476 // used percent returns a number below threshold. 477 if gcAlloc := gc.allocRunners.Pop(); gcAlloc == nil { 478 t.Fatalf("err: %v", gcAlloc) 479 } 480 481 if gcAlloc := gc.allocRunners.Pop(); gcAlloc != nil { 482 t.Fatalf("gcAlloc: %v", gcAlloc) 483 } 484 }