github.com/grafana/pyroscope@v1.18.0/pkg/metastore/compaction/compactor/plan_test.go (about) 1 package compactor 2 3 import ( 4 "fmt" 5 "strconv" 6 "testing" 7 "time" 8 9 "github.com/stretchr/testify/assert" 10 "github.com/stretchr/testify/require" 11 12 "github.com/grafana/pyroscope/pkg/metastore/compaction" 13 "github.com/grafana/pyroscope/pkg/test" 14 ) 15 16 var testConfig = Config{ 17 Levels: []LevelConfig{ 18 {MaxBlocks: 3}, 19 {MaxBlocks: 2}, 20 {MaxBlocks: 2}, 21 }, 22 } 23 24 func TestPlan_same_level(t *testing.T) { 25 c := NewCompactor(testConfig, nil, nil, nil) 26 27 var i int // The index is used outside the loop. 28 for _, e := range []compaction.BlockEntry{ 29 {Tenant: "A", Shard: 0, Level: 0}, 30 {Tenant: "B", Shard: 2, Level: 0}, 31 {Tenant: "A", Shard: 1, Level: 0}, 32 {Tenant: "A", Shard: 1, Level: 0}, 33 {Tenant: "B", Shard: 2, Level: 0}, 34 {Tenant: "A", Shard: 1, Level: 0}, // TA-S1-L0 is ready 35 {Tenant: "B", Shard: 2, Level: 0}, // TB-S2-L0 36 {Tenant: "A", Shard: 0, Level: 0}, 37 {Tenant: "A", Shard: 1, Level: 0}, 38 {Tenant: "A", Shard: 0, Level: 0}, // TA-S0-L0 39 {Tenant: "B", Shard: 2, Level: 0}, 40 {Tenant: "A", Shard: 1, Level: 0}, 41 } { 42 e.Index = uint64(i) 43 e.ID = strconv.Itoa(i) 44 c.enqueue(e) 45 i++ 46 } 47 48 expected := []*jobPlan{ 49 { 50 compactionKey: compactionKey{tenant: "A", shard: 1, level: 0}, 51 name: "ffba6b12acb007e6-TA-S1-L0", 52 blocks: []string{"2", "3", "5"}, 53 }, 54 { 55 compactionKey: compactionKey{tenant: "B", shard: 2, level: 0}, 56 name: "3860b3ec2cf5bfa3-TB-S2-L0", 57 blocks: []string{"1", "4", "6"}, 58 }, 59 { 60 compactionKey: compactionKey{tenant: "A", shard: 0, level: 0}, 61 name: "6a1fee35d1568267-TA-S0-L0", 62 blocks: []string{"0", "7", "9"}, 63 }, 64 } 65 66 p := &plan{compactor: c, blocks: newBlockIter()} 67 planned := make([]*jobPlan, 0, len(expected)) 68 for j := p.nextJob(); j != nil; j = p.nextJob() { 69 planned = append(planned, j) 70 } 71 assert.Equal(t, expected, planned) 72 73 // Now we're adding some more blocks to produce more jobs, 74 // using the same queue. We expect all the previously planned 75 // jobs and new ones. 76 expected = append(expected, []*jobPlan{ 77 { 78 compactionKey: compactionKey{tenant: "A", shard: 1, level: 0}, 79 name: "34d4246acbf55d05-TA-S1-L0", 80 blocks: []string{"8", "11", "13"}, 81 }, 82 { 83 compactionKey: compactionKey{tenant: "B", shard: 2, level: 0}, 84 name: "5567ff0cdb349aaf-TB-S2-L0", 85 blocks: []string{"10", "12", "14"}, 86 }, 87 }...) 88 89 for _, e := range []compaction.BlockEntry{ 90 {Tenant: "B", Shard: 2, Level: 0}, 91 {Tenant: "A", Shard: 1, Level: 0}, // TA-S1-L0 is ready 92 {Tenant: "B", Shard: 2, Level: 0}, // TB-S2-L0 93 } { 94 e.Index = uint64(i) 95 e.ID = strconv.Itoa(i) 96 c.enqueue(e) 97 i++ 98 } 99 100 p = &plan{compactor: c, blocks: newBlockIter()} 101 planned = planned[:0] // Old jobs should be re-planned. 102 for j := p.nextJob(); j != nil; j = p.nextJob() { 103 planned = append(planned, j) 104 } 105 assert.Equal(t, expected, planned) 106 } 107 108 func TestPlan_level_priority(t *testing.T) { 109 c := NewCompactor(testConfig, nil, nil, nil) 110 111 // Lower level job should be planned first despite the arrival order. 112 var i int 113 for _, e := range []compaction.BlockEntry{ 114 {Tenant: "B", Shard: 2, Level: 1}, 115 {Tenant: "A", Shard: 1, Level: 0}, 116 {Tenant: "A", Shard: 1, Level: 0}, 117 {Tenant: "B", Shard: 2, Level: 1}, // TB-S2-L1 is ready 118 {Tenant: "A", Shard: 1, Level: 0}, // TA-S1-L0 119 } { 120 e.Index = uint64(i) 121 e.ID = strconv.Itoa(i) 122 c.enqueue(e) 123 i++ 124 } 125 126 expected := []*jobPlan{ 127 { 128 compactionKey: compactionKey{tenant: "A", shard: 1, level: 0}, 129 name: "3567f9a8f34203a9-TA-S1-L0", 130 blocks: []string{"1", "2", "4"}, 131 }, 132 { 133 compactionKey: compactionKey{tenant: "B", shard: 2, level: 1}, 134 name: "3254788b90b8fafc-TB-S2-L1", 135 blocks: []string{"0", "3"}, 136 }, 137 } 138 139 p := &plan{compactor: c, blocks: newBlockIter()} 140 planned := make([]*jobPlan, 0, len(expected)) 141 for j := p.nextJob(); j != nil; j = p.nextJob() { 142 planned = append(planned, j) 143 } 144 145 assert.Equal(t, expected, planned) 146 } 147 148 func TestPlan_empty_queue(t *testing.T) { 149 c := NewCompactor(testConfig, nil, nil, nil) 150 151 p := &plan{compactor: c, blocks: newBlockIter()} 152 assert.Nil(t, p.nextJob()) 153 154 c.enqueue(compaction.BlockEntry{ 155 Index: 0, 156 ID: "0", 157 Tenant: "A", 158 Shard: 1, 159 Level: 1, 160 }) 161 162 // L0 queue is empty. 163 // L1 queue has one block. 164 p = &plan{compactor: c, blocks: newBlockIter()} 165 assert.Nil(t, p.nextJob()) 166 167 c.enqueue(compaction.BlockEntry{ 168 Index: 1, 169 ID: "1", 170 Tenant: "A", 171 Shard: 1, 172 Level: 1, 173 }) 174 175 // L0 queue is empty. 176 // L2 has blocks for a job. 177 p = &plan{compactor: c, blocks: newBlockIter()} 178 assert.NotNil(t, p.nextJob()) 179 } 180 181 func TestPlan_deleted_blocks(t *testing.T) { 182 c := NewCompactor(testConfig, nil, nil, nil) 183 184 var i int // The index is used outside the loop. 185 for _, e := range []compaction.BlockEntry{ 186 {Tenant: "A", Shard: 1, Level: 0}, 187 {Tenant: "B", Shard: 2, Level: 0}, 188 {Tenant: "A", Shard: 1, Level: 0}, 189 {Tenant: "B", Shard: 2, Level: 0}, 190 {Tenant: "A", Shard: 1, Level: 0}, // TA-S1-L0 is ready 191 {Tenant: "B", Shard: 2, Level: 0}, // TB-S2-L0 192 } { 193 e.Index = uint64(i) 194 e.ID = strconv.Itoa(i) 195 if !c.enqueue(e) { 196 t.Errorf("failed to enqueue: %v", e) 197 } 198 i++ 199 } 200 201 // Invalidate TA-S1-L0 plan by removing some blocks. 202 remove(c.queue.levels[0], compactionKey{ 203 tenant: "A", 204 shard: 1, 205 level: 0, 206 }, "0", "4") 207 208 // "0" - - - 209 // "1" {Tenant: "B", Shard: 2, Level: 0}, 210 // "2" {Tenant: "A", Shard: 1, Level: 0}, 211 // "3" {Tenant: "B", Shard: 2, Level: 0}, 212 // "4" - - - // TA-S1-L0 would be created here. 213 // "5" {Tenant: "B", Shard: 2, Level: 0}, // TB-S2-L0 is ready 214 expected := []*jobPlan{ 215 { 216 compactionKey: compactionKey{tenant: "B", shard: 2, level: 0}, 217 name: "5668d093d5b7cc2f-TB-S2-L0", 218 blocks: []string{"1", "3", "5"}, 219 }, 220 } 221 222 p := &plan{compactor: c, blocks: newBlockIter()} 223 planned := make([]*jobPlan, 0, len(expected)) 224 for j := p.nextJob(); j != nil; j = p.nextJob() { 225 planned = append(planned, j) 226 } 227 assert.Equal(t, expected, planned) 228 229 // Now we add some more blocks to make sure that the 230 // invalidated queue can still be compacted. 231 for _, e := range []compaction.BlockEntry{ 232 {Tenant: "A", Shard: 1, Level: 0}, 233 {Tenant: "A", Shard: 1, Level: 0}, 234 {Tenant: "A", Shard: 1, Level: 0}, 235 } { 236 e.Index = uint64(i) 237 e.ID = strconv.Itoa(i) 238 c.enqueue(e) 239 i++ 240 } 241 242 expected = append([]*jobPlan{ 243 { 244 compactionKey: compactionKey{tenant: "A", shard: 1, level: 0}, 245 name: "69cebc117138be9-TA-S1-L0", 246 blocks: []string{"2", "6", "7"}, 247 }, 248 }, expected...) 249 250 p = &plan{compactor: c, blocks: newBlockIter()} 251 planned = planned[:0] 252 for j := p.nextJob(); j != nil; j = p.nextJob() { 253 planned = append(planned, j) 254 } 255 assert.Equal(t, expected, planned) 256 } 257 258 func TestPlan_deleted_batch(t *testing.T) { 259 c := NewCompactor(testConfig, nil, nil, nil) 260 261 for i, e := range make([]compaction.BlockEntry, 3) { 262 e.Index = uint64(i) 263 e.ID = strconv.Itoa(i) 264 c.enqueue(e) 265 } 266 267 remove(c.queue.levels[0], compactionKey{}, "0", "1", "2") 268 269 p := &plan{compactor: c, blocks: newBlockIter()} 270 assert.Nil(t, p.nextJob()) 271 } 272 273 func TestPlan_compact_by_time(t *testing.T) { 274 c := NewCompactor(Config{ 275 Levels: []LevelConfig{ 276 {MaxBlocks: 5, MaxAge: 5}, 277 {MaxBlocks: 5, MaxAge: 5}, 278 }, 279 }, nil, nil, nil) 280 281 for _, e := range []compaction.BlockEntry{ 282 {Tenant: "A", Shard: 1, Level: 0, Index: 1, AppendedAt: 10, ID: "1"}, 283 {Tenant: "B", Shard: 0, Level: 0, Index: 2, AppendedAt: 20, ID: "2"}, 284 {Tenant: "A", Shard: 1, Level: 0, Index: 3, AppendedAt: 30, ID: "3"}, 285 } { 286 c.enqueue(e) 287 } 288 289 // Third block remains in the queue as 290 // we need another push to evict it. 291 expected := []*jobPlan{ 292 { 293 compactionKey: compactionKey{tenant: "A", shard: 1, level: 0}, 294 name: "b7b41276360564d4-TA-S1-L0", 295 blocks: []string{"1"}, 296 }, 297 { 298 compactionKey: compactionKey{tenant: "B", shard: 0, level: 0}, 299 name: "6021b5621680598b-TB-S0-L0", 300 blocks: []string{"2"}, 301 }, 302 } 303 304 p := &plan{ 305 compactor: c, 306 blocks: newBlockIter(), 307 now: 40, 308 } 309 310 planned := make([]*jobPlan, 0, len(expected)) 311 for j := p.nextJob(); j != nil; j = p.nextJob() { 312 planned = append(planned, j) 313 } 314 315 assert.Equal(t, expected, planned) 316 } 317 318 func TestPlan_time_split(t *testing.T) { 319 s := DefaultConfig() 320 // To skip tombstones for simplicity. 321 s.CleanupBatchSize = 0 322 c := NewCompactor(s, nil, nil, nil) 323 now := test.Time("2024-09-23T00:00:00Z") 324 325 for i := 0; i < 10; i++ { 326 now = now.Add(15 * time.Second) 327 e := compaction.BlockEntry{ 328 Index: uint64(i), 329 AppendedAt: now.UnixNano(), 330 Tenant: "A", 331 Shard: 1, 332 Level: 0, 333 ID: test.ULID(now.Format(time.RFC3339)), 334 } 335 c.enqueue(e) 336 } 337 338 now = now.Add(time.Hour * 6) 339 for i := 0; i < 5; i++ { 340 now = now.Add(15 * time.Second) 341 e := compaction.BlockEntry{ 342 Index: uint64(i), 343 AppendedAt: now.UnixNano(), 344 Tenant: "A", 345 Shard: 1, 346 Level: 0, 347 ID: test.ULID(now.Format(time.RFC3339)), 348 } 349 c.enqueue(e) 350 } 351 352 p := &plan{ 353 compactor: c, 354 blocks: newBlockIter(), 355 now: now.UnixNano(), 356 } 357 358 var i int 359 var n int 360 for j := p.nextJob(); j != nil; j = p.nextJob() { 361 i++ 362 n += len(j.blocks) 363 } 364 365 assert.Equal(t, 2, i) 366 assert.Equal(t, 15, n) 367 } 368 369 func TestPlan_remove_staged_batch_corrupts_queue(t *testing.T) { 370 c := NewCompactor(testConfig, nil, nil, nil) 371 372 for i := 0; i < 3; i++ { 373 e := compaction.BlockEntry{ 374 Index: uint64(i), 375 ID: fmt.Sprint(i), 376 Tenant: "baseline", 377 Shard: 1, 378 Level: 0, 379 } 380 c.enqueue(e) 381 } 382 383 p1 := &plan{compactor: c, blocks: newBlockIter()} 384 require.NotNil(t, p1.nextJob()) 385 require.Nil(t, p1.nextJob()) 386 387 // Add and remove blocks before they got to the compaction 388 // queue, triggering removal of the staged batch. 389 for i := 10; i < 12; i++ { 390 e := compaction.BlockEntry{ 391 Index: uint64(i + 10), 392 ID: fmt.Sprint(i), 393 Tenant: "temp", 394 Shard: 1, 395 Level: 0, 396 } 397 c.enqueue(e) 398 } 399 400 level0 := c.queue.levels[0] 401 tempKey := compactionKey{tenant: "temp", shard: 1, level: 0} 402 if tempStaged, exists := level0.staged[tempKey]; exists { 403 tempStaged.delete("10") 404 tempStaged.delete("11") 405 } else { 406 t.Fatal("Compaction queue not found") 407 } 408 409 p2 := &plan{compactor: c, blocks: newBlockIter()} 410 if job := p2.nextJob(); job == nil { 411 t.Fatal("🐛🐛🐛: Corrupted compaction queue") 412 } 413 414 require.Nil(t, p1.nextJob(), "A single job is expected.") 415 }