github.com/petermattis/pebble@v0.0.0-20190905164901-ab51a2166067/merging_iter.go (about) 1 // Copyright 2018 The LevelDB-Go and Pebble Authors. All rights reserved. Use 2 // of this source code is governed by a BSD-style license that can be found in 3 // the LICENSE file. 4 5 package pebble 6 7 import ( 8 "bytes" 9 "fmt" 10 11 "github.com/petermattis/pebble/internal/base" 12 "github.com/petermattis/pebble/internal/rangedel" 13 ) 14 15 // mergingIter provides a merged view of multiple iterators from different 16 // levels of the LSM. 17 // 18 // The core of a mergingIter is a heap of internalIterators (see 19 // mergingIterHeap). The heap can operate as either a min-heap, used during 20 // forward iteration (First, SeekGE, Next) or a max-heap, used during reverse 21 // iteration (Last, SeekLT, Prev). The heap is initialized in calls to First, 22 // Last, SeekGE, and SeekLT. A call to Next or Prev takes the current top 23 // element on the heap, advances its iterator, and then "fixes" the heap 24 // property. When one of the child iterators is exhausted during Next/Prev 25 // iteration, it is removed from the heap. 26 // 27 // Range Deletions 28 // 29 // A mergingIter can optionally be configured with a slice of range deletion 30 // iterators. The range deletion iterator slice must exactly parallel the point 31 // iterators and the range deletion iterator must correspond to the same level 32 // in the LSM as the point iterator. Note that each memtable and each table in 33 // L0 is a different "level" from the mergingIter perspective. So level 0 below 34 // does not correspond to L0 in the LSM. 35 // 36 // A range deletion iterator iterates over fragmented range tombstones. Range 37 // tombstones are fragmented by splitting them at any overlapping points. This 38 // fragmentation guarantees that within an sstable tombstones will either be 39 // distinct or will have identical start and end user keys. While range 40 // tombstones are fragmented within an sstable, the end keys are not truncated 41 // to sstable boundaries. This is necessary because the tombstone end key is 42 // exclusive and does not have a sequence number. Consider an sstable 43 // containing the range tombstone [a,c)#9 and the key "b#8". The tombstone must 44 // delete "b#8", yet older versions of "b" might spill over to the next 45 // sstable. So the boundary key for this sstable must be "b#8". Adjusting the 46 // end key of tombstones to be optionally inclusive or contain a sequence 47 // number would be possible solutions. The approach taken here performs an 48 // implicit truncation of the tombstone to the sstable boundaries. 49 // 50 // During initialization of a mergingIter, the range deletion iterators for 51 // batches, memtables, and L0 tables are populated up front. Note that Batches 52 // and memtables index unfragmented tombstones. Batch.newRangeDelIter() and 53 // memTable.newRangeDelIter() fragment and cache the tombstones on demand. The 54 // L1-L6 range deletion iterators are populated by levelIter. When configured 55 // to load range deletion iterators, whenever a levelIter loads a table it 56 // loads both the point iterator and the range deletion 57 // iterator. levelIter.rangeDelIter is configured to point to the right entry 58 // in mergingIter.rangeDelIters. The effect of this setup is that 59 // mergingIter.rangeDelIters[i] always contains the fragmented range tombstone 60 // for the current table in level i that the levelIter has open. 61 // 62 // Another crucial mechanism of levelIter is that it materializes fake point 63 // entries for the table boundaries if the boundary is range deletion 64 // key. Consider a table that contains only a range tombstone [a-e)#10. The 65 // sstable boundaries for this table will be a#10,15 and 66 // e#72057594037927935,15. During forward iteration levelIter will return 67 // e#72057594037927935,15 as a key. During reverse iteration levelIter will 68 // return a#10,15 as a key. These sentinel keys act as bookends to point 69 // iteration and allow mergingIter to keep a table and its associated range 70 // tombstones loaded as long as their are keys at lower levels that are within 71 // the bounds of the table. 72 // 73 // The final piece to the range deletion puzzle is the LSM invariant that for a 74 // given key K newer versions of K can only exist earlier in the level, or at 75 // higher levels of the tree. For example, if K#4 exists in L3, k#5 can only 76 // exist earlier in the L3 or in L0, L1, L2 or a memtable. Get very explicitly 77 // uses this invariant to find the value for a key by walking the LSM level by 78 // level. For range deletions, this invariant means that a range deletion at 79 // level N will necessarily shadow any keys within its bounds in level Y where 80 // Y > N. One wrinkle to this statement is that it only applies to keys that 81 // lie within the sstable bounds as well, but we get that guarantee due to the 82 // way the range deletion iterator and point iterator are bound together by a 83 // levelIter. 84 // 85 // Tying the above all together, we get a picture where each level (index in 86 // mergingIter.{iters,rangeDelIters}) is composed of both point operations (pX) 87 // and range deletions (rX). The range deletions for level X shadow both the 88 // point operations and range deletions for level Y where Y > X allowing 89 // mergingIter to skip processing entries in that shadow. For example, consider 90 // the scenario: 91 // 92 // r0: a---e 93 // r1: d---h 94 // r2: g---k 95 // r3: j---n 96 // r4: m---q 97 // 98 // This is showing 5 levels of range deletions. Consider what happens upon 99 // SeekGE("b"). We first seek the point iterator for level 0 (the point values 100 // are not shown above) and we then seek the range deletion iterator. That 101 // returns the tombstone [a,e). This tombstone tells us that all keys in the 102 // range [a,e) in lower levels are deleted so we can skip them. So we can 103 // adjust the seek key to "e", the tombstone end key. For level 1 we seek to 104 // "e" and find the range tombstone [d,h) and similar logic holds. By the time 105 // we get to level 4 we're seeking to "n". 106 // 107 // One consequence of not truncating tombstone end keys to sstable boundaries is 108 // the seeking process described above cannot always seek to the tombstone end 109 // key in the older level. For example, imagine in the above example r3 is a 110 // partitioned level (i.e., L1+ in our LSM), and the sstable containing [j, n) 111 // has "k" as its upper boundary. In this situation, compactions involving keys 112 // at or after "k" can output those keys to r4+, even if they're newer than our 113 // tombstone [j, n). So instead of seeking to "n" in r4 we can only seek to "k". 114 // To achieve this, the instance variable `largestUserKeys` maintains the 115 // upper bounds of the current sstables in the partitioned levels. In this 116 // example, `largestUserKeys[3]` holds "k", telling us to limit the seek 117 // triggered by a tombstone in r3 to "k". 118 // 119 // During actual iteration levels can contain both point operations and range 120 // deletions. Within a level, when a range deletion contains a point operation 121 // the sequence numbers must be checked to determine if the point operation is 122 // newer or older than the range deletion tombstone. The mergingIter maintains 123 // the invariant that the range deletion iterators for all levels newer that 124 // the current iteration key (L < m.heap.items[0].index) are positioned at the 125 // next (or previous during reverse iteration) range deletion tombstone. We 126 // know those levels don't contain a range deletion tombstone that covers the 127 // current key because if they did the current key would be deleted. The range 128 // deletion iterator for the current key's level is positioned at a range 129 // tombstone covering or past the current key. The position of all of other 130 // range deletion iterators is unspecified. Whenever a key from those levels 131 // becomes the current key, their range deletion iterators need to be 132 // positioned. This lazy positioning avoids seeking the range deletion 133 // iterators for keys that are never considered. (A similar bit of lazy 134 // evaluation can be done for the point iterators, but is still TBD). 135 // 136 // For a full example, consider the following setup: 137 // 138 // p0: o 139 // r0: m---q 140 // 141 // p1: n p 142 // r1: g---k 143 // 144 // p2: b d i 145 // r2: a---e q----v 146 // 147 // p3: e 148 // r3: 149 // 150 // If we start iterating from the beginning, the first key we encounter is "b" 151 // in p2. When the mergingIter is pointing at a valid entry, the range deletion 152 // iterators for all of the levels < m.heap.items[0].index are positioned at 153 // the next range tombstone past the current key. So r0 will point at [m,q) and 154 // r1 at [g,k). When the key "b" is encountered, we check to see if the current 155 // tombstone for r0 or r1 contains it, and whether the tombstone for r2, [a,e), 156 // contains and is newer than "b". 157 // 158 // Advancing the iterator finds the next key at "d". This is in the same level 159 // as the previous key "b" so we don't have to reposition any of the range 160 // deletion iterators, but merely check whether "d" is now contained by any of 161 // the range tombstones at higher levels or has stepped past the range 162 // tombstone in its own level. In this case, there is nothing to be done. 163 // 164 // Advancing the iterator again finds "e". Since "e" comes from p3, we have to 165 // position the r3 range deletion iterator, which is empty. "e" is past the r2 166 // tombstone of [a,e) so we need to advance the r2 range deletion iterator to 167 // [q,v). 168 // 169 // The next key is "i". Because this key is in p2, a level above "e", we don't 170 // have to reposition any range deletion iterators and instead see that "i" is 171 // covered by the range tombstone [g,k). The iterator is immediately advanced 172 // to "n" which is covered by the range tombstone [m,q) causing the iterator to 173 // advance to "o" which is visible. 174 // 175 // TODO(peter,rangedel): For testing, advance the iterator through various 176 // scenarios and have each step display the current state (i.e. the current 177 // heap and range-del iterator positioning). 178 type mergingIter struct { 179 dir int 180 snapshot uint64 181 iters []internalIterator 182 rangeDelIters []internalIterator 183 largestUserKeys [][]byte 184 heap mergingIterHeap 185 err error 186 prefix []byte 187 } 188 189 // mergingIter implements the internalIterator interface. 190 var _ internalIterator = (*mergingIter)(nil) 191 192 // newMergingIter returns an iterator that merges its input. Walking the 193 // resultant iterator will return all key/value pairs of all input iterators 194 // in strictly increasing key order, as defined by cmp. 195 // 196 // The input's key ranges may overlap, but there are assumed to be no duplicate 197 // keys: if iters[i] contains a key k then iters[j] will not contain that key k. 198 // 199 // None of the iters may be nil. 200 func newMergingIter(cmp Compare, iters ...internalIterator) *mergingIter { 201 m := &mergingIter{} 202 m.init(cmp, iters...) 203 return m 204 } 205 206 func (m *mergingIter) init(cmp Compare, iters ...internalIterator) { 207 m.snapshot = InternalKeySeqNumMax 208 m.iters = iters 209 m.heap.cmp = cmp 210 m.heap.items = make([]mergingIterItem, 0, len(iters)) 211 m.initMinHeap() 212 } 213 214 func (m *mergingIter) initHeap() { 215 m.heap.items = m.heap.items[:0] 216 for i, t := range m.iters { 217 if t.Valid() { 218 m.heap.items = append(m.heap.items, mergingIterItem{ 219 index: i, 220 key: *t.Key(), 221 value: t.Value(), 222 }) 223 } 224 } 225 m.heap.init() 226 } 227 228 func (m *mergingIter) initMinHeap() { 229 m.dir = 1 230 m.heap.reverse = false 231 m.initHeap() 232 m.initMinRangeDelIters(-1) 233 } 234 235 func (m *mergingIter) initMinRangeDelIters(oldTopLevel int) { 236 if m.rangeDelIters == nil || m.heap.len() == 0 { 237 return 238 } 239 240 // Position the range-del iterators at levels <= m.heap.items[0].index. 241 item := &m.heap.items[0] 242 for level := oldTopLevel + 1; level <= item.index; level++ { 243 rangeDelIter := m.rangeDelIters[level] 244 if rangeDelIter == nil { 245 continue 246 } 247 _ = rangedel.SeekGE(m.heap.cmp, rangeDelIter, item.key.UserKey, m.snapshot) 248 } 249 } 250 251 func (m *mergingIter) initMaxHeap() { 252 m.dir = -1 253 m.heap.reverse = true 254 m.initHeap() 255 m.initMaxRangeDelIters(-1) 256 } 257 258 func (m *mergingIter) initMaxRangeDelIters(oldTopLevel int) { 259 if m.rangeDelIters == nil || m.heap.len() == 0 { 260 return 261 } 262 // Position the range-del iterators at levels <= m.heap.items[0].index. 263 item := &m.heap.items[0] 264 for level := oldTopLevel + 1; level <= item.index; level++ { 265 rangeDelIter := m.rangeDelIters[level] 266 if rangeDelIter == nil { 267 continue 268 } 269 _ = rangedel.SeekLE(m.heap.cmp, rangeDelIter, item.key.UserKey, m.snapshot) 270 } 271 } 272 273 func (m *mergingIter) switchToMinHeap() { 274 if m.heap.len() == 0 { 275 m.First() 276 return 277 } 278 279 // We're switching from using a max heap to a min heap. We need to advance 280 // any iterator that is less than or equal to the current key. Consider the 281 // scenario where we have 2 iterators being merged (user-key:seq-num): 282 // 283 // i1: *a:2 b:2 284 // i2: a:1 b:1 285 // 286 // The current key is a:2 and i2 is pointed at a:1. When we switch to forward 287 // iteration, we want to return a key that is greater than a:2. 288 289 key := m.heap.items[0].key 290 cur := m.iters[m.heap.items[0].index] 291 292 for _, i := range m.iters { 293 if i == cur { 294 continue 295 } 296 iterKey := i.Key() 297 if iterKey == nil { 298 iterKey, _ = i.Next() 299 } 300 for ; iterKey != nil; iterKey, _ = i.Next() { 301 if base.InternalCompare(m.heap.cmp, key, *iterKey) < 0 { 302 // key < iter-key 303 break 304 } 305 // key >= iter-key 306 } 307 } 308 309 // Special handling for the current iterator because we were using its key 310 // above. 311 cur.Next() 312 m.initMinHeap() 313 } 314 315 func (m *mergingIter) switchToMaxHeap() { 316 if m.heap.len() == 0 { 317 m.Last() 318 return 319 } 320 321 // We're switching from using a min heap to a max heap. We need to backup any 322 // iterator that is greater than or equal to the current key. Consider the 323 // scenario where we have 2 iterators being merged (user-key:seq-num): 324 // 325 // i1: a:2 *b:2 326 // i2: a:1 b:1 327 // 328 // The current key is b:2 and i2 is pointing at b:1. When we switch to 329 // reverse iteration, we want to return a key that is less than b:2. 330 key := m.heap.items[0].key 331 cur := m.iters[m.heap.items[0].index] 332 333 for _, i := range m.iters { 334 if i == cur { 335 continue 336 } 337 iterKey := i.Key() 338 if iterKey == nil { 339 iterKey, _ = i.Prev() 340 } 341 for ; iterKey != nil; iterKey, _ = i.Prev() { 342 if base.InternalCompare(m.heap.cmp, key, *iterKey) > 0 { 343 // key > iter-key 344 break 345 } 346 // key <= iter-key 347 } 348 } 349 350 // Special handling for the current iterator because we were using its key 351 // above. 352 cur.Prev() 353 m.initMaxHeap() 354 } 355 356 func (m *mergingIter) nextEntry(item *mergingIterItem) { 357 oldTopLevel := item.index 358 iter := m.iters[item.index] 359 if key, value := iter.Next(); key != nil { 360 item.key, item.value = *key, value 361 if m.heap.len() > 1 { 362 m.heap.fix(0) 363 } 364 } else { 365 m.err = iter.Error() 366 if m.err == nil { 367 m.heap.pop() 368 } 369 } 370 m.initMinRangeDelIters(oldTopLevel) 371 } 372 373 func (m *mergingIter) isNextEntryDeleted(item *mergingIterItem) bool { 374 // Look for a range deletion tombstone containing item.key at higher 375 // levels (level < item.index). If we find such a range tombstone we know 376 // it deletes the key in the current level. Also look for a range 377 // deletion at the current level (level == item.index). If we find such a 378 // range deletion we need to check whether it is newer than the current 379 // entry. 380 for level := 0; level <= item.index; level++ { 381 rangeDelIter := m.rangeDelIters[level] 382 if rangeDelIter == nil || !rangeDelIter.Valid() { 383 continue 384 } 385 tombstone := rangedel.Tombstone{ 386 Start: *rangeDelIter.Key(), 387 End: rangeDelIter.Value(), 388 } 389 if m.heap.cmp(tombstone.End, item.key.UserKey) <= 0 { 390 // The current key is at or past the tombstone end key. 391 tombstone = rangedel.SeekGE(m.heap.cmp, rangeDelIter, item.key.UserKey, m.snapshot) 392 } 393 if tombstone.Empty() { 394 continue 395 } 396 if tombstone.Contains(m.heap.cmp, item.key.UserKey) { 397 if level < item.index { 398 m.seekGE(tombstone.End, item.index) 399 return true 400 } 401 if tombstone.Deletes(item.key.SeqNum()) { 402 m.nextEntry(item) 403 return true 404 } 405 } 406 } 407 return false 408 } 409 410 func (m *mergingIter) findNextEntry() (*InternalKey, []byte) { 411 for m.heap.len() > 0 && m.err == nil { 412 item := &m.heap.items[0] 413 if m.rangeDelIters != nil && m.isNextEntryDeleted(item) { 414 continue 415 } 416 if item.key.Visible(m.snapshot) { 417 return &item.key, item.value 418 } 419 m.nextEntry(item) 420 } 421 return nil, nil 422 } 423 424 func (m *mergingIter) prevEntry(item *mergingIterItem) { 425 oldTopLevel := item.index 426 iter := m.iters[item.index] 427 if key, value := iter.Prev(); key != nil { 428 item.key, item.value = *key, value 429 if m.heap.len() > 1 { 430 m.heap.fix(0) 431 } 432 } else { 433 m.err = iter.Error() 434 if m.err == nil { 435 m.heap.pop() 436 } 437 } 438 m.initMaxRangeDelIters(oldTopLevel) 439 } 440 441 func (m *mergingIter) isPrevEntryDeleted(item *mergingIterItem) bool { 442 // Look for a range deletion tombstone containing item.key at higher 443 // levels (level < item.index). If we find such a range tombstone we know 444 // it deletes the key in the current level. Also look for a range 445 // deletion at the current level (level == item.index). If we find such a 446 // range deletion we need to check whether it is newer than the current 447 // entry. 448 for level := 0; level <= item.index; level++ { 449 rangeDelIter := m.rangeDelIters[level] 450 if rangeDelIter == nil || !rangeDelIter.Valid() { 451 continue 452 } 453 tombstone := rangedel.Tombstone{ 454 Start: *rangeDelIter.Key(), 455 End: rangeDelIter.Value(), 456 } 457 if m.heap.cmp(item.key.UserKey, tombstone.Start.UserKey) < 0 { 458 // The current key is before the tombstone start key8. 459 tombstone = rangedel.SeekLE(m.heap.cmp, rangeDelIter, item.key.UserKey, m.snapshot) 460 } 461 if tombstone.Empty() { 462 continue 463 } 464 if tombstone.Contains(m.heap.cmp, item.key.UserKey) { 465 if level < item.index { 466 m.seekLT(tombstone.Start.UserKey, item.index) 467 return true 468 } 469 if tombstone.Deletes(item.key.SeqNum()) { 470 m.prevEntry(item) 471 return true 472 } 473 } 474 } 475 return false 476 } 477 478 func (m *mergingIter) findPrevEntry() (*InternalKey, []byte) { 479 for m.heap.len() > 0 && m.err == nil { 480 item := &m.heap.items[0] 481 if m.rangeDelIters != nil && m.isPrevEntryDeleted(item) { 482 continue 483 } 484 if item.key.Visible(m.snapshot) { 485 return &item.key, item.value 486 } 487 m.prevEntry(item) 488 } 489 return nil, nil 490 } 491 492 func (m *mergingIter) seekGE(key []byte, level int) { 493 // When seeking, we can use tombstones to adjust the key we seek to on each 494 // level. Consider the series of range tombstones: 495 // 496 // 1: a---e 497 // 2: d---h 498 // 3: g---k 499 // 4: j---n 500 // 5: m---q 501 // 502 // If we SeekGE("b") we also find the tombstone "b" resides within in the 503 // first level which is [a,e). Regardless of whether this tombstone deletes 504 // "b" in that level, we know it deletes "b" in all lower levels, so we 505 // adjust the search key in the next level to the tombstone end key "e". We 506 // then SeekGE("e") in the second level and find the corresponding tombstone 507 // [d,h). This process continues and we end up seeking for "h" in the 3rd 508 // level, "k" in the 4th level and "n" in the last level. 509 // 510 // TODO(peter,rangedel): In addition to the above we can delay seeking a 511 // level (and any lower levels) when the current iterator position is 512 // contained within a range tombstone at a higher level. 513 514 for ; level < len(m.iters); level++ { 515 iter := m.iters[level] 516 if m.prefix != nil { 517 iter.SeekPrefixGE(m.prefix, key) 518 } else { 519 iter.SeekGE(key) 520 } 521 522 if m.rangeDelIters != nil { 523 if rangeDelIter := m.rangeDelIters[level]; rangeDelIter != nil { 524 // The level has a range-del iterator. Find the tombstone containing 525 // the search key. 526 tombstone := rangedel.SeekGE(m.heap.cmp, rangeDelIter, key, m.snapshot) 527 if !tombstone.Empty() && tombstone.Contains(m.heap.cmp, key) { 528 if m.largestUserKeys[level] != nil && 529 m.heap.cmp(m.largestUserKeys[level], tombstone.End) < 0 { 530 key = m.largestUserKeys[level] 531 } else { 532 key = tombstone.End 533 } 534 } 535 } 536 } 537 } 538 539 m.initMinHeap() 540 } 541 542 func (m *mergingIter) SeekGE(key []byte) (*InternalKey, []byte) { 543 m.prefix = nil 544 m.seekGE(key, 0 /* start level */) 545 return m.findNextEntry() 546 } 547 548 func (m *mergingIter) SeekPrefixGE(prefix, key []byte) (*InternalKey, []byte) { 549 m.prefix = prefix 550 m.seekGE(key, 0 /* start level */) 551 return m.findNextEntry() 552 } 553 554 func (m *mergingIter) seekLT(key []byte, level int) { 555 // See the comment in seekLT regarding using tombstones to adjust the seek 556 // target per level. 557 m.prefix = nil 558 for ; level < len(m.iters); level++ { 559 m.iters[level].SeekLT(key) 560 561 if m.rangeDelIters != nil { 562 if rangeDelIter := m.rangeDelIters[level]; rangeDelIter != nil { 563 // The level has a range-del iterator. Find the tombstone containing 564 // the search key. 565 tombstone := rangedel.SeekLE(m.heap.cmp, rangeDelIter, key, m.snapshot) 566 if !tombstone.Empty() && tombstone.Contains(m.heap.cmp, key) { 567 key = tombstone.Start.UserKey 568 } 569 } 570 } 571 } 572 573 m.initMaxHeap() 574 } 575 576 func (m *mergingIter) SeekLT(key []byte) (*InternalKey, []byte) { 577 m.prefix = nil 578 m.seekLT(key, 0 /* start level */) 579 return m.findPrevEntry() 580 } 581 582 func (m *mergingIter) First() (*InternalKey, []byte) { 583 m.prefix = nil 584 m.heap.items = m.heap.items[:0] 585 for _, t := range m.iters { 586 // TODO(peter): save key and value so we don't have to access t.Key() and 587 // t.Value() in initHeap(). 588 t.First() 589 } 590 m.initMinHeap() 591 return m.findNextEntry() 592 } 593 594 func (m *mergingIter) Last() (*InternalKey, []byte) { 595 m.prefix = nil 596 for _, t := range m.iters { 597 // TODO(peter): save key and value so we don't have to access t.Key() and 598 // t.Value() in initHeap(). 599 t.Last() 600 } 601 m.initMaxHeap() 602 return m.findPrevEntry() 603 } 604 605 func (m *mergingIter) Next() (*InternalKey, []byte) { 606 if m.err != nil { 607 return nil, nil 608 } 609 610 if m.dir != 1 { 611 m.switchToMinHeap() 612 return m.findNextEntry() 613 } 614 615 if m.heap.len() == 0 { 616 return nil, nil 617 } 618 619 m.nextEntry(&m.heap.items[0]) 620 return m.findNextEntry() 621 } 622 623 func (m *mergingIter) Prev() (*InternalKey, []byte) { 624 if m.err != nil { 625 return nil, nil 626 } 627 628 if m.dir != -1 { 629 m.switchToMaxHeap() 630 return m.findPrevEntry() 631 } 632 633 if m.heap.len() == 0 { 634 return nil, nil 635 } 636 637 m.prevEntry(&m.heap.items[0]) 638 return m.findPrevEntry() 639 } 640 641 func (m *mergingIter) Key() *InternalKey { 642 return &m.heap.items[0].key 643 } 644 645 func (m *mergingIter) Value() []byte { 646 return m.heap.items[0].value 647 } 648 649 func (m *mergingIter) Valid() bool { 650 return m.heap.len() > 0 && m.err == nil 651 } 652 653 func (m *mergingIter) Error() error { 654 if m.heap.len() == 0 || m.err != nil { 655 return m.err 656 } 657 return m.iters[m.heap.items[0].index].Error() 658 } 659 660 func (m *mergingIter) Close() error { 661 for _, iter := range m.iters { 662 if err := iter.Close(); err != nil && m.err == nil { 663 m.err = err 664 } 665 } 666 for _, iter := range m.rangeDelIters { 667 if iter != nil { 668 if err := iter.Close(); err != nil && m.err == nil { 669 m.err = err 670 } 671 } 672 } 673 m.iters = nil 674 m.rangeDelIters = nil 675 m.heap.items = nil 676 return m.err 677 } 678 679 func (m *mergingIter) SetBounds(lower, upper []byte) { 680 for _, iter := range m.iters { 681 iter.SetBounds(lower, upper) 682 } 683 } 684 685 func (m *mergingIter) DebugString() string { 686 var buf bytes.Buffer 687 sep := "" 688 for m.heap.len() > 0 { 689 item := m.heap.pop() 690 fmt.Fprintf(&buf, "%s%s:%d", sep, item.key.UserKey, item.key.SeqNum()) 691 sep = " " 692 } 693 if m.dir == 1 { 694 m.initMinHeap() 695 } else { 696 m.initMaxHeap() 697 } 698 return buf.String() 699 }