github.com/ttpreport/gvisor-ligolo@v0.0.0-20240123134145-a858404967ba/pkg/sentry/fsutil/file_range_set_impl.go (about) 1 package fsutil 2 3 import ( 4 __generics_imported0 "github.com/ttpreport/gvisor-ligolo/pkg/sentry/memmap" 5 ) 6 7 import ( 8 "bytes" 9 "fmt" 10 ) 11 12 // trackGaps is an optional parameter. 13 // 14 // If trackGaps is 1, the Set will track maximum gap size recursively, 15 // enabling the GapIterator.{Prev,Next}LargeEnoughGap functions. In this 16 // case, Key must be an unsigned integer. 17 // 18 // trackGaps must be 0 or 1. 19 const FileRangetrackGaps = 0 20 21 var _ = uint8(FileRangetrackGaps << 7) // Will fail if not zero or one. 22 23 // dynamicGap is a type that disappears if trackGaps is 0. 24 type FileRangedynamicGap [FileRangetrackGaps]uint64 25 26 // Get returns the value of the gap. 27 // 28 // Precondition: trackGaps must be non-zero. 29 func (d *FileRangedynamicGap) Get() uint64 { 30 return d[:][0] 31 } 32 33 // Set sets the value of the gap. 34 // 35 // Precondition: trackGaps must be non-zero. 36 func (d *FileRangedynamicGap) Set(v uint64) { 37 d[:][0] = v 38 } 39 40 const ( 41 // minDegree is the minimum degree of an internal node in a Set B-tree. 42 // 43 // - Any non-root node has at least minDegree-1 segments. 44 // 45 // - Any non-root internal (non-leaf) node has at least minDegree children. 46 // 47 // - The root node may have fewer than minDegree-1 segments, but it may 48 // only have 0 segments if the tree is empty. 49 // 50 // Our implementation requires minDegree >= 3. Higher values of minDegree 51 // usually improve performance, but increase memory usage for small sets. 52 FileRangeminDegree = 3 53 54 FileRangemaxDegree = 2 * FileRangeminDegree 55 ) 56 57 // A Set is a mapping of segments with non-overlapping Range keys. The zero 58 // value for a Set is an empty set. Set values are not safely movable nor 59 // copyable. Set is thread-compatible. 60 // 61 // +stateify savable 62 type FileRangeSet struct { 63 root FileRangenode `state:".(*FileRangeSegmentDataSlices)"` 64 } 65 66 // IsEmpty returns true if the set contains no segments. 67 func (s *FileRangeSet) IsEmpty() bool { 68 return s.root.nrSegments == 0 69 } 70 71 // IsEmptyRange returns true iff no segments in the set overlap the given 72 // range. This is semantically equivalent to s.SpanRange(r) == 0, but may be 73 // more efficient. 74 func (s *FileRangeSet) IsEmptyRange(r __generics_imported0.MappableRange) bool { 75 switch { 76 case r.Length() < 0: 77 panic(fmt.Sprintf("invalid range %v", r)) 78 case r.Length() == 0: 79 return true 80 } 81 _, gap := s.Find(r.Start) 82 if !gap.Ok() { 83 return false 84 } 85 return r.End <= gap.End() 86 } 87 88 // Span returns the total size of all segments in the set. 89 func (s *FileRangeSet) Span() uint64 { 90 var sz uint64 91 for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { 92 sz += seg.Range().Length() 93 } 94 return sz 95 } 96 97 // SpanRange returns the total size of the intersection of segments in the set 98 // with the given range. 99 func (s *FileRangeSet) SpanRange(r __generics_imported0.MappableRange) uint64 { 100 switch { 101 case r.Length() < 0: 102 panic(fmt.Sprintf("invalid range %v", r)) 103 case r.Length() == 0: 104 return 0 105 } 106 var sz uint64 107 for seg := s.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() { 108 sz += seg.Range().Intersect(r).Length() 109 } 110 return sz 111 } 112 113 // FirstSegment returns the first segment in the set. If the set is empty, 114 // FirstSegment returns a terminal iterator. 115 func (s *FileRangeSet) FirstSegment() FileRangeIterator { 116 if s.root.nrSegments == 0 { 117 return FileRangeIterator{} 118 } 119 return s.root.firstSegment() 120 } 121 122 // LastSegment returns the last segment in the set. If the set is empty, 123 // LastSegment returns a terminal iterator. 124 func (s *FileRangeSet) LastSegment() FileRangeIterator { 125 if s.root.nrSegments == 0 { 126 return FileRangeIterator{} 127 } 128 return s.root.lastSegment() 129 } 130 131 // FirstGap returns the first gap in the set. 132 func (s *FileRangeSet) FirstGap() FileRangeGapIterator { 133 n := &s.root 134 for n.hasChildren { 135 n = n.children[0] 136 } 137 return FileRangeGapIterator{n, 0} 138 } 139 140 // LastGap returns the last gap in the set. 141 func (s *FileRangeSet) LastGap() FileRangeGapIterator { 142 n := &s.root 143 for n.hasChildren { 144 n = n.children[n.nrSegments] 145 } 146 return FileRangeGapIterator{n, n.nrSegments} 147 } 148 149 // Find returns the segment or gap whose range contains the given key. If a 150 // segment is found, the returned Iterator is non-terminal and the 151 // returned GapIterator is terminal. Otherwise, the returned Iterator is 152 // terminal and the returned GapIterator is non-terminal. 153 func (s *FileRangeSet) Find(key uint64) (FileRangeIterator, FileRangeGapIterator) { 154 n := &s.root 155 for { 156 157 lower := 0 158 upper := n.nrSegments 159 for lower < upper { 160 i := lower + (upper-lower)/2 161 if r := n.keys[i]; key < r.End { 162 if key >= r.Start { 163 return FileRangeIterator{n, i}, FileRangeGapIterator{} 164 } 165 upper = i 166 } else { 167 lower = i + 1 168 } 169 } 170 i := lower 171 if !n.hasChildren { 172 return FileRangeIterator{}, FileRangeGapIterator{n, i} 173 } 174 n = n.children[i] 175 } 176 } 177 178 // FindSegment returns the segment whose range contains the given key. If no 179 // such segment exists, FindSegment returns a terminal iterator. 180 func (s *FileRangeSet) FindSegment(key uint64) FileRangeIterator { 181 seg, _ := s.Find(key) 182 return seg 183 } 184 185 // LowerBoundSegment returns the segment with the lowest range that contains a 186 // key greater than or equal to min. If no such segment exists, 187 // LowerBoundSegment returns a terminal iterator. 188 func (s *FileRangeSet) LowerBoundSegment(min uint64) FileRangeIterator { 189 seg, gap := s.Find(min) 190 if seg.Ok() { 191 return seg 192 } 193 return gap.NextSegment() 194 } 195 196 // UpperBoundSegment returns the segment with the highest range that contains a 197 // key less than or equal to max. If no such segment exists, UpperBoundSegment 198 // returns a terminal iterator. 199 func (s *FileRangeSet) UpperBoundSegment(max uint64) FileRangeIterator { 200 seg, gap := s.Find(max) 201 if seg.Ok() { 202 return seg 203 } 204 return gap.PrevSegment() 205 } 206 207 // FindGap returns the gap containing the given key. If no such gap exists 208 // (i.e. the set contains a segment containing that key), FindGap returns a 209 // terminal iterator. 210 func (s *FileRangeSet) FindGap(key uint64) FileRangeGapIterator { 211 _, gap := s.Find(key) 212 return gap 213 } 214 215 // LowerBoundGap returns the gap with the lowest range that is greater than or 216 // equal to min. 217 func (s *FileRangeSet) LowerBoundGap(min uint64) FileRangeGapIterator { 218 seg, gap := s.Find(min) 219 if gap.Ok() { 220 return gap 221 } 222 return seg.NextGap() 223 } 224 225 // UpperBoundGap returns the gap with the highest range that is less than or 226 // equal to max. 227 func (s *FileRangeSet) UpperBoundGap(max uint64) FileRangeGapIterator { 228 seg, gap := s.Find(max) 229 if gap.Ok() { 230 return gap 231 } 232 return seg.PrevGap() 233 } 234 235 // Add inserts the given segment into the set and returns true. If the new 236 // segment can be merged with adjacent segments, Add will do so. If the new 237 // segment would overlap an existing segment, Add returns false. If Add 238 // succeeds, all existing iterators are invalidated. 239 func (s *FileRangeSet) Add(r __generics_imported0.MappableRange, val uint64) bool { 240 if r.Length() <= 0 { 241 panic(fmt.Sprintf("invalid segment range %v", r)) 242 } 243 gap := s.FindGap(r.Start) 244 if !gap.Ok() { 245 return false 246 } 247 if r.End > gap.End() { 248 return false 249 } 250 s.Insert(gap, r, val) 251 return true 252 } 253 254 // AddWithoutMerging inserts the given segment into the set and returns true. 255 // If it would overlap an existing segment, AddWithoutMerging does nothing and 256 // returns false. If AddWithoutMerging succeeds, all existing iterators are 257 // invalidated. 258 func (s *FileRangeSet) AddWithoutMerging(r __generics_imported0.MappableRange, val uint64) bool { 259 if r.Length() <= 0 { 260 panic(fmt.Sprintf("invalid segment range %v", r)) 261 } 262 gap := s.FindGap(r.Start) 263 if !gap.Ok() { 264 return false 265 } 266 if r.End > gap.End() { 267 return false 268 } 269 s.InsertWithoutMergingUnchecked(gap, r, val) 270 return true 271 } 272 273 // Insert inserts the given segment into the given gap. If the new segment can 274 // be merged with adjacent segments, Insert will do so. Insert returns an 275 // iterator to the segment containing the inserted value (which may have been 276 // merged with other values). All existing iterators (including gap, but not 277 // including the returned iterator) are invalidated. 278 // 279 // If the gap cannot accommodate the segment, or if r is invalid, Insert panics. 280 // 281 // Insert is semantically equivalent to a InsertWithoutMerging followed by a 282 // Merge, but may be more efficient. Note that there is no unchecked variant of 283 // Insert since Insert must retrieve and inspect gap's predecessor and 284 // successor segments regardless. 285 func (s *FileRangeSet) Insert(gap FileRangeGapIterator, r __generics_imported0.MappableRange, val uint64) FileRangeIterator { 286 if r.Length() <= 0 { 287 panic(fmt.Sprintf("invalid segment range %v", r)) 288 } 289 prev, next := gap.PrevSegment(), gap.NextSegment() 290 if prev.Ok() && prev.End() > r.Start { 291 panic(fmt.Sprintf("new segment %v overlaps predecessor %v", r, prev.Range())) 292 } 293 if next.Ok() && next.Start() < r.End { 294 panic(fmt.Sprintf("new segment %v overlaps successor %v", r, next.Range())) 295 } 296 if prev.Ok() && prev.End() == r.Start { 297 if mval, ok := (FileRangeSetFunctions{}).Merge(prev.Range(), prev.Value(), r, val); ok { 298 shrinkMaxGap := FileRangetrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get() 299 prev.SetEndUnchecked(r.End) 300 prev.SetValue(mval) 301 if shrinkMaxGap { 302 gap.node.updateMaxGapLeaf() 303 } 304 if next.Ok() && next.Start() == r.End { 305 val = mval 306 if mval, ok := (FileRangeSetFunctions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok { 307 prev.SetEndUnchecked(next.End()) 308 prev.SetValue(mval) 309 return s.Remove(next).PrevSegment() 310 } 311 } 312 return prev 313 } 314 } 315 if next.Ok() && next.Start() == r.End { 316 if mval, ok := (FileRangeSetFunctions{}).Merge(r, val, next.Range(), next.Value()); ok { 317 shrinkMaxGap := FileRangetrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get() 318 next.SetStartUnchecked(r.Start) 319 next.SetValue(mval) 320 if shrinkMaxGap { 321 gap.node.updateMaxGapLeaf() 322 } 323 return next 324 } 325 } 326 327 return s.InsertWithoutMergingUnchecked(gap, r, val) 328 } 329 330 // InsertWithoutMerging inserts the given segment into the given gap and 331 // returns an iterator to the inserted segment. All existing iterators 332 // (including gap, but not including the returned iterator) are invalidated. 333 // 334 // If the gap cannot accommodate the segment, or if r is invalid, 335 // InsertWithoutMerging panics. 336 func (s *FileRangeSet) InsertWithoutMerging(gap FileRangeGapIterator, r __generics_imported0.MappableRange, val uint64) FileRangeIterator { 337 if r.Length() <= 0 { 338 panic(fmt.Sprintf("invalid segment range %v", r)) 339 } 340 if gr := gap.Range(); !gr.IsSupersetOf(r) { 341 panic(fmt.Sprintf("cannot insert segment range %v into gap range %v", r, gr)) 342 } 343 return s.InsertWithoutMergingUnchecked(gap, r, val) 344 } 345 346 // InsertWithoutMergingUnchecked inserts the given segment into the given gap 347 // and returns an iterator to the inserted segment. All existing iterators 348 // (including gap, but not including the returned iterator) are invalidated. 349 // 350 // Preconditions: 351 // - r.Start >= gap.Start(). 352 // - r.End <= gap.End(). 353 func (s *FileRangeSet) InsertWithoutMergingUnchecked(gap FileRangeGapIterator, r __generics_imported0.MappableRange, val uint64) FileRangeIterator { 354 gap = gap.node.rebalanceBeforeInsert(gap) 355 splitMaxGap := FileRangetrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get()) 356 copy(gap.node.keys[gap.index+1:], gap.node.keys[gap.index:gap.node.nrSegments]) 357 copy(gap.node.values[gap.index+1:], gap.node.values[gap.index:gap.node.nrSegments]) 358 gap.node.keys[gap.index] = r 359 gap.node.values[gap.index] = val 360 gap.node.nrSegments++ 361 if splitMaxGap { 362 gap.node.updateMaxGapLeaf() 363 } 364 return FileRangeIterator{gap.node, gap.index} 365 } 366 367 // Remove removes the given segment and returns an iterator to the vacated gap. 368 // All existing iterators (including seg, but not including the returned 369 // iterator) are invalidated. 370 func (s *FileRangeSet) Remove(seg FileRangeIterator) FileRangeGapIterator { 371 372 if seg.node.hasChildren { 373 374 victim := seg.PrevSegment() 375 376 seg.SetRangeUnchecked(victim.Range()) 377 seg.SetValue(victim.Value()) 378 379 nextAdjacentNode := seg.NextSegment().node 380 if FileRangetrackGaps != 0 { 381 nextAdjacentNode.updateMaxGapLeaf() 382 } 383 return s.Remove(victim).NextGap() 384 } 385 copy(seg.node.keys[seg.index:], seg.node.keys[seg.index+1:seg.node.nrSegments]) 386 copy(seg.node.values[seg.index:], seg.node.values[seg.index+1:seg.node.nrSegments]) 387 FileRangeSetFunctions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1]) 388 seg.node.nrSegments-- 389 if FileRangetrackGaps != 0 { 390 seg.node.updateMaxGapLeaf() 391 } 392 return seg.node.rebalanceAfterRemove(FileRangeGapIterator{seg.node, seg.index}) 393 } 394 395 // RemoveAll removes all segments from the set. All existing iterators are 396 // invalidated. 397 func (s *FileRangeSet) RemoveAll() { 398 s.root = FileRangenode{} 399 } 400 401 // RemoveRange removes all segments in the given range. An iterator to the 402 // newly formed gap is returned, and all existing iterators are invalidated. 403 func (s *FileRangeSet) RemoveRange(r __generics_imported0.MappableRange) FileRangeGapIterator { 404 seg, gap := s.Find(r.Start) 405 if seg.Ok() { 406 seg = s.Isolate(seg, r) 407 gap = s.Remove(seg) 408 } 409 for seg = gap.NextSegment(); seg.Ok() && seg.Start() < r.End; seg = gap.NextSegment() { 410 seg = s.Isolate(seg, r) 411 gap = s.Remove(seg) 412 } 413 return gap 414 } 415 416 // Merge attempts to merge two neighboring segments. If successful, Merge 417 // returns an iterator to the merged segment, and all existing iterators are 418 // invalidated. Otherwise, Merge returns a terminal iterator. 419 // 420 // If first is not the predecessor of second, Merge panics. 421 func (s *FileRangeSet) Merge(first, second FileRangeIterator) FileRangeIterator { 422 if first.NextSegment() != second { 423 panic(fmt.Sprintf("attempt to merge non-neighboring segments %v, %v", first.Range(), second.Range())) 424 } 425 return s.MergeUnchecked(first, second) 426 } 427 428 // MergeUnchecked attempts to merge two neighboring segments. If successful, 429 // MergeUnchecked returns an iterator to the merged segment, and all existing 430 // iterators are invalidated. Otherwise, MergeUnchecked returns a terminal 431 // iterator. 432 // 433 // Precondition: first is the predecessor of second: first.NextSegment() == 434 // second, first == second.PrevSegment(). 435 func (s *FileRangeSet) MergeUnchecked(first, second FileRangeIterator) FileRangeIterator { 436 if first.End() == second.Start() { 437 if mval, ok := (FileRangeSetFunctions{}).Merge(first.Range(), first.Value(), second.Range(), second.Value()); ok { 438 439 first.SetEndUnchecked(second.End()) 440 first.SetValue(mval) 441 442 return s.Remove(second).PrevSegment() 443 } 444 } 445 return FileRangeIterator{} 446 } 447 448 // MergeAll attempts to merge all adjacent segments in the set. All existing 449 // iterators are invalidated. 450 func (s *FileRangeSet) MergeAll() { 451 seg := s.FirstSegment() 452 if !seg.Ok() { 453 return 454 } 455 next := seg.NextSegment() 456 for next.Ok() { 457 if mseg := s.MergeUnchecked(seg, next); mseg.Ok() { 458 seg, next = mseg, mseg.NextSegment() 459 } else { 460 seg, next = next, next.NextSegment() 461 } 462 } 463 } 464 465 // MergeRange attempts to merge all adjacent segments that contain a key in the 466 // specific range. All existing iterators are invalidated. 467 func (s *FileRangeSet) MergeRange(r __generics_imported0.MappableRange) { 468 seg := s.LowerBoundSegment(r.Start) 469 if !seg.Ok() { 470 return 471 } 472 next := seg.NextSegment() 473 for next.Ok() && next.Range().Start < r.End { 474 if mseg := s.MergeUnchecked(seg, next); mseg.Ok() { 475 seg, next = mseg, mseg.NextSegment() 476 } else { 477 seg, next = next, next.NextSegment() 478 } 479 } 480 } 481 482 // MergeAdjacent attempts to merge the segment containing r.Start with its 483 // predecessor, and the segment containing r.End-1 with its successor. 484 func (s *FileRangeSet) MergeAdjacent(r __generics_imported0.MappableRange) { 485 first := s.FindSegment(r.Start) 486 if first.Ok() { 487 if prev := first.PrevSegment(); prev.Ok() { 488 s.Merge(prev, first) 489 } 490 } 491 last := s.FindSegment(r.End - 1) 492 if last.Ok() { 493 if next := last.NextSegment(); next.Ok() { 494 s.Merge(last, next) 495 } 496 } 497 } 498 499 // Split splits the given segment at the given key and returns iterators to the 500 // two resulting segments. All existing iterators (including seg, but not 501 // including the returned iterators) are invalidated. 502 // 503 // If the segment cannot be split at split (because split is at the start or 504 // end of the segment's range, so splitting would produce a segment with zero 505 // length, or because split falls outside the segment's range altogether), 506 // Split panics. 507 func (s *FileRangeSet) Split(seg FileRangeIterator, split uint64) (FileRangeIterator, FileRangeIterator) { 508 if !seg.Range().CanSplitAt(split) { 509 panic(fmt.Sprintf("can't split %v at %v", seg.Range(), split)) 510 } 511 return s.SplitUnchecked(seg, split) 512 } 513 514 // SplitUnchecked splits the given segment at the given key and returns 515 // iterators to the two resulting segments. All existing iterators (including 516 // seg, but not including the returned iterators) are invalidated. 517 // 518 // Preconditions: seg.Start() < key < seg.End(). 519 func (s *FileRangeSet) SplitUnchecked(seg FileRangeIterator, split uint64) (FileRangeIterator, FileRangeIterator) { 520 val1, val2 := (FileRangeSetFunctions{}).Split(seg.Range(), seg.Value(), split) 521 end2 := seg.End() 522 seg.SetEndUnchecked(split) 523 seg.SetValue(val1) 524 seg2 := s.InsertWithoutMergingUnchecked(seg.NextGap(), __generics_imported0.MappableRange{split, end2}, val2) 525 526 return seg2.PrevSegment(), seg2 527 } 528 529 // SplitAt splits the segment straddling split, if one exists. SplitAt returns 530 // true if a segment was split and false otherwise. If SplitAt splits a 531 // segment, all existing iterators are invalidated. 532 func (s *FileRangeSet) SplitAt(split uint64) bool { 533 if seg := s.FindSegment(split); seg.Ok() && seg.Range().CanSplitAt(split) { 534 s.SplitUnchecked(seg, split) 535 return true 536 } 537 return false 538 } 539 540 // Isolate ensures that the given segment's range does not escape r by 541 // splitting at r.Start and r.End if necessary, and returns an updated iterator 542 // to the bounded segment. All existing iterators (including seg, but not 543 // including the returned iterators) are invalidated. 544 func (s *FileRangeSet) Isolate(seg FileRangeIterator, r __generics_imported0.MappableRange) FileRangeIterator { 545 if seg.Range().CanSplitAt(r.Start) { 546 _, seg = s.SplitUnchecked(seg, r.Start) 547 } 548 if seg.Range().CanSplitAt(r.End) { 549 seg, _ = s.SplitUnchecked(seg, r.End) 550 } 551 return seg 552 } 553 554 // ApplyContiguous applies a function to a contiguous range of segments, 555 // splitting if necessary. The function is applied until the first gap is 556 // encountered, at which point the gap is returned. If the function is applied 557 // across the entire range, a terminal gap is returned. All existing iterators 558 // are invalidated. 559 // 560 // N.B. The Iterator must not be invalidated by the function. 561 func (s *FileRangeSet) ApplyContiguous(r __generics_imported0.MappableRange, fn func(seg FileRangeIterator)) FileRangeGapIterator { 562 seg, gap := s.Find(r.Start) 563 if !seg.Ok() { 564 return gap 565 } 566 for { 567 seg = s.Isolate(seg, r) 568 fn(seg) 569 if seg.End() >= r.End { 570 return FileRangeGapIterator{} 571 } 572 gap = seg.NextGap() 573 if !gap.IsEmpty() { 574 return gap 575 } 576 seg = gap.NextSegment() 577 if !seg.Ok() { 578 579 return FileRangeGapIterator{} 580 } 581 } 582 } 583 584 // +stateify savable 585 type FileRangenode struct { 586 // An internal binary tree node looks like: 587 // 588 // K 589 // / \ 590 // Cl Cr 591 // 592 // where all keys in the subtree rooted by Cl (the left subtree) are less 593 // than K (the key of the parent node), and all keys in the subtree rooted 594 // by Cr (the right subtree) are greater than K. 595 // 596 // An internal B-tree node's indexes work out to look like: 597 // 598 // K0 K1 K2 ... Kn-1 599 // / \/ \/ \ ... / \ 600 // C0 C1 C2 C3 ... Cn-1 Cn 601 // 602 // where n is nrSegments. 603 nrSegments int 604 605 // parent is a pointer to this node's parent. If this node is root, parent 606 // is nil. 607 parent *FileRangenode 608 609 // parentIndex is the index of this node in parent.children. 610 parentIndex int 611 612 // Flag for internal nodes that is technically redundant with "children[0] 613 // != nil", but is stored in the first cache line. "hasChildren" rather 614 // than "isLeaf" because false must be the correct value for an empty root. 615 hasChildren bool 616 617 // The longest gap within this node. If the node is a leaf, it's simply the 618 // maximum gap among all the (nrSegments+1) gaps formed by its nrSegments keys 619 // including the 0th and nrSegments-th gap possibly shared with its upper-level 620 // nodes; if it's a non-leaf node, it's the max of all children's maxGap. 621 maxGap FileRangedynamicGap 622 623 // Nodes store keys and values in separate arrays to maximize locality in 624 // the common case (scanning keys for lookup). 625 keys [FileRangemaxDegree - 1]__generics_imported0.MappableRange 626 values [FileRangemaxDegree - 1]uint64 627 children [FileRangemaxDegree]*FileRangenode 628 } 629 630 // firstSegment returns the first segment in the subtree rooted by n. 631 // 632 // Preconditions: n.nrSegments != 0. 633 func (n *FileRangenode) firstSegment() FileRangeIterator { 634 for n.hasChildren { 635 n = n.children[0] 636 } 637 return FileRangeIterator{n, 0} 638 } 639 640 // lastSegment returns the last segment in the subtree rooted by n. 641 // 642 // Preconditions: n.nrSegments != 0. 643 func (n *FileRangenode) lastSegment() FileRangeIterator { 644 for n.hasChildren { 645 n = n.children[n.nrSegments] 646 } 647 return FileRangeIterator{n, n.nrSegments - 1} 648 } 649 650 func (n *FileRangenode) prevSibling() *FileRangenode { 651 if n.parent == nil || n.parentIndex == 0 { 652 return nil 653 } 654 return n.parent.children[n.parentIndex-1] 655 } 656 657 func (n *FileRangenode) nextSibling() *FileRangenode { 658 if n.parent == nil || n.parentIndex == n.parent.nrSegments { 659 return nil 660 } 661 return n.parent.children[n.parentIndex+1] 662 } 663 664 // rebalanceBeforeInsert splits n and its ancestors if they are full, as 665 // required for insertion, and returns an updated iterator to the position 666 // represented by gap. 667 func (n *FileRangenode) rebalanceBeforeInsert(gap FileRangeGapIterator) FileRangeGapIterator { 668 if n.nrSegments < FileRangemaxDegree-1 { 669 return gap 670 } 671 if n.parent != nil { 672 gap = n.parent.rebalanceBeforeInsert(gap) 673 } 674 if n.parent == nil { 675 676 left := &FileRangenode{ 677 nrSegments: FileRangeminDegree - 1, 678 parent: n, 679 parentIndex: 0, 680 hasChildren: n.hasChildren, 681 } 682 right := &FileRangenode{ 683 nrSegments: FileRangeminDegree - 1, 684 parent: n, 685 parentIndex: 1, 686 hasChildren: n.hasChildren, 687 } 688 copy(left.keys[:FileRangeminDegree-1], n.keys[:FileRangeminDegree-1]) 689 copy(left.values[:FileRangeminDegree-1], n.values[:FileRangeminDegree-1]) 690 copy(right.keys[:FileRangeminDegree-1], n.keys[FileRangeminDegree:]) 691 copy(right.values[:FileRangeminDegree-1], n.values[FileRangeminDegree:]) 692 n.keys[0], n.values[0] = n.keys[FileRangeminDegree-1], n.values[FileRangeminDegree-1] 693 FileRangezeroValueSlice(n.values[1:]) 694 if n.hasChildren { 695 copy(left.children[:FileRangeminDegree], n.children[:FileRangeminDegree]) 696 copy(right.children[:FileRangeminDegree], n.children[FileRangeminDegree:]) 697 FileRangezeroNodeSlice(n.children[2:]) 698 for i := 0; i < FileRangeminDegree; i++ { 699 left.children[i].parent = left 700 left.children[i].parentIndex = i 701 right.children[i].parent = right 702 right.children[i].parentIndex = i 703 } 704 } 705 n.nrSegments = 1 706 n.hasChildren = true 707 n.children[0] = left 708 n.children[1] = right 709 710 if FileRangetrackGaps != 0 { 711 left.updateMaxGapLocal() 712 right.updateMaxGapLocal() 713 } 714 if gap.node != n { 715 return gap 716 } 717 if gap.index < FileRangeminDegree { 718 return FileRangeGapIterator{left, gap.index} 719 } 720 return FileRangeGapIterator{right, gap.index - FileRangeminDegree} 721 } 722 723 copy(n.parent.keys[n.parentIndex+1:], n.parent.keys[n.parentIndex:n.parent.nrSegments]) 724 copy(n.parent.values[n.parentIndex+1:], n.parent.values[n.parentIndex:n.parent.nrSegments]) 725 n.parent.keys[n.parentIndex], n.parent.values[n.parentIndex] = n.keys[FileRangeminDegree-1], n.values[FileRangeminDegree-1] 726 copy(n.parent.children[n.parentIndex+2:], n.parent.children[n.parentIndex+1:n.parent.nrSegments+1]) 727 for i := n.parentIndex + 2; i < n.parent.nrSegments+2; i++ { 728 n.parent.children[i].parentIndex = i 729 } 730 sibling := &FileRangenode{ 731 nrSegments: FileRangeminDegree - 1, 732 parent: n.parent, 733 parentIndex: n.parentIndex + 1, 734 hasChildren: n.hasChildren, 735 } 736 n.parent.children[n.parentIndex+1] = sibling 737 n.parent.nrSegments++ 738 copy(sibling.keys[:FileRangeminDegree-1], n.keys[FileRangeminDegree:]) 739 copy(sibling.values[:FileRangeminDegree-1], n.values[FileRangeminDegree:]) 740 FileRangezeroValueSlice(n.values[FileRangeminDegree-1:]) 741 if n.hasChildren { 742 copy(sibling.children[:FileRangeminDegree], n.children[FileRangeminDegree:]) 743 FileRangezeroNodeSlice(n.children[FileRangeminDegree:]) 744 for i := 0; i < FileRangeminDegree; i++ { 745 sibling.children[i].parent = sibling 746 sibling.children[i].parentIndex = i 747 } 748 } 749 n.nrSegments = FileRangeminDegree - 1 750 751 if FileRangetrackGaps != 0 { 752 n.updateMaxGapLocal() 753 sibling.updateMaxGapLocal() 754 } 755 756 if gap.node != n { 757 return gap 758 } 759 if gap.index < FileRangeminDegree { 760 return gap 761 } 762 return FileRangeGapIterator{sibling, gap.index - FileRangeminDegree} 763 } 764 765 // rebalanceAfterRemove "unsplits" n and its ancestors if they are deficient 766 // (contain fewer segments than required by B-tree invariants), as required for 767 // removal, and returns an updated iterator to the position represented by gap. 768 // 769 // Precondition: n is the only node in the tree that may currently violate a 770 // B-tree invariant. 771 func (n *FileRangenode) rebalanceAfterRemove(gap FileRangeGapIterator) FileRangeGapIterator { 772 for { 773 if n.nrSegments >= FileRangeminDegree-1 { 774 return gap 775 } 776 if n.parent == nil { 777 778 return gap 779 } 780 781 if sibling := n.prevSibling(); sibling != nil && sibling.nrSegments >= FileRangeminDegree { 782 copy(n.keys[1:], n.keys[:n.nrSegments]) 783 copy(n.values[1:], n.values[:n.nrSegments]) 784 n.keys[0] = n.parent.keys[n.parentIndex-1] 785 n.values[0] = n.parent.values[n.parentIndex-1] 786 n.parent.keys[n.parentIndex-1] = sibling.keys[sibling.nrSegments-1] 787 n.parent.values[n.parentIndex-1] = sibling.values[sibling.nrSegments-1] 788 FileRangeSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1]) 789 if n.hasChildren { 790 copy(n.children[1:], n.children[:n.nrSegments+1]) 791 n.children[0] = sibling.children[sibling.nrSegments] 792 sibling.children[sibling.nrSegments] = nil 793 n.children[0].parent = n 794 n.children[0].parentIndex = 0 795 for i := 1; i < n.nrSegments+2; i++ { 796 n.children[i].parentIndex = i 797 } 798 } 799 n.nrSegments++ 800 sibling.nrSegments-- 801 802 if FileRangetrackGaps != 0 { 803 n.updateMaxGapLocal() 804 sibling.updateMaxGapLocal() 805 } 806 if gap.node == sibling && gap.index == sibling.nrSegments { 807 return FileRangeGapIterator{n, 0} 808 } 809 if gap.node == n { 810 return FileRangeGapIterator{n, gap.index + 1} 811 } 812 return gap 813 } 814 if sibling := n.nextSibling(); sibling != nil && sibling.nrSegments >= FileRangeminDegree { 815 n.keys[n.nrSegments] = n.parent.keys[n.parentIndex] 816 n.values[n.nrSegments] = n.parent.values[n.parentIndex] 817 n.parent.keys[n.parentIndex] = sibling.keys[0] 818 n.parent.values[n.parentIndex] = sibling.values[0] 819 copy(sibling.keys[:sibling.nrSegments-1], sibling.keys[1:]) 820 copy(sibling.values[:sibling.nrSegments-1], sibling.values[1:]) 821 FileRangeSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1]) 822 if n.hasChildren { 823 n.children[n.nrSegments+1] = sibling.children[0] 824 copy(sibling.children[:sibling.nrSegments], sibling.children[1:]) 825 sibling.children[sibling.nrSegments] = nil 826 n.children[n.nrSegments+1].parent = n 827 n.children[n.nrSegments+1].parentIndex = n.nrSegments + 1 828 for i := 0; i < sibling.nrSegments; i++ { 829 sibling.children[i].parentIndex = i 830 } 831 } 832 n.nrSegments++ 833 sibling.nrSegments-- 834 835 if FileRangetrackGaps != 0 { 836 n.updateMaxGapLocal() 837 sibling.updateMaxGapLocal() 838 } 839 if gap.node == sibling { 840 if gap.index == 0 { 841 return FileRangeGapIterator{n, n.nrSegments} 842 } 843 return FileRangeGapIterator{sibling, gap.index - 1} 844 } 845 return gap 846 } 847 848 p := n.parent 849 if p.nrSegments == 1 { 850 851 left, right := p.children[0], p.children[1] 852 p.nrSegments = left.nrSegments + right.nrSegments + 1 853 p.hasChildren = left.hasChildren 854 p.keys[left.nrSegments] = p.keys[0] 855 p.values[left.nrSegments] = p.values[0] 856 copy(p.keys[:left.nrSegments], left.keys[:left.nrSegments]) 857 copy(p.values[:left.nrSegments], left.values[:left.nrSegments]) 858 copy(p.keys[left.nrSegments+1:], right.keys[:right.nrSegments]) 859 copy(p.values[left.nrSegments+1:], right.values[:right.nrSegments]) 860 if left.hasChildren { 861 copy(p.children[:left.nrSegments+1], left.children[:left.nrSegments+1]) 862 copy(p.children[left.nrSegments+1:], right.children[:right.nrSegments+1]) 863 for i := 0; i < p.nrSegments+1; i++ { 864 p.children[i].parent = p 865 p.children[i].parentIndex = i 866 } 867 } else { 868 p.children[0] = nil 869 p.children[1] = nil 870 } 871 872 if gap.node == left { 873 return FileRangeGapIterator{p, gap.index} 874 } 875 if gap.node == right { 876 return FileRangeGapIterator{p, gap.index + left.nrSegments + 1} 877 } 878 return gap 879 } 880 // Merge n and either sibling, along with the segment separating the 881 // two, into whichever of the two nodes comes first. This is the 882 // reverse of the non-root splitting case in 883 // node.rebalanceBeforeInsert. 884 var left, right *FileRangenode 885 if n.parentIndex > 0 { 886 left = n.prevSibling() 887 right = n 888 } else { 889 left = n 890 right = n.nextSibling() 891 } 892 893 if gap.node == right { 894 gap = FileRangeGapIterator{left, gap.index + left.nrSegments + 1} 895 } 896 left.keys[left.nrSegments] = p.keys[left.parentIndex] 897 left.values[left.nrSegments] = p.values[left.parentIndex] 898 copy(left.keys[left.nrSegments+1:], right.keys[:right.nrSegments]) 899 copy(left.values[left.nrSegments+1:], right.values[:right.nrSegments]) 900 if left.hasChildren { 901 copy(left.children[left.nrSegments+1:], right.children[:right.nrSegments+1]) 902 for i := left.nrSegments + 1; i < left.nrSegments+right.nrSegments+2; i++ { 903 left.children[i].parent = left 904 left.children[i].parentIndex = i 905 } 906 } 907 left.nrSegments += right.nrSegments + 1 908 copy(p.keys[left.parentIndex:], p.keys[left.parentIndex+1:p.nrSegments]) 909 copy(p.values[left.parentIndex:], p.values[left.parentIndex+1:p.nrSegments]) 910 FileRangeSetFunctions{}.ClearValue(&p.values[p.nrSegments-1]) 911 copy(p.children[left.parentIndex+1:], p.children[left.parentIndex+2:p.nrSegments+1]) 912 for i := 0; i < p.nrSegments; i++ { 913 p.children[i].parentIndex = i 914 } 915 p.children[p.nrSegments] = nil 916 p.nrSegments-- 917 918 if FileRangetrackGaps != 0 { 919 left.updateMaxGapLocal() 920 } 921 922 n = p 923 } 924 } 925 926 // updateMaxGapLeaf updates maxGap bottom-up from the calling leaf until no 927 // necessary update. 928 // 929 // Preconditions: n must be a leaf node, trackGaps must be 1. 930 func (n *FileRangenode) updateMaxGapLeaf() { 931 if n.hasChildren { 932 panic(fmt.Sprintf("updateMaxGapLeaf should always be called on leaf node: %v", n)) 933 } 934 max := n.calculateMaxGapLeaf() 935 if max == n.maxGap.Get() { 936 937 return 938 } 939 oldMax := n.maxGap.Get() 940 n.maxGap.Set(max) 941 if max > oldMax { 942 943 for p := n.parent; p != nil; p = p.parent { 944 if p.maxGap.Get() >= max { 945 946 break 947 } 948 949 p.maxGap.Set(max) 950 } 951 return 952 } 953 954 for p := n.parent; p != nil; p = p.parent { 955 if p.maxGap.Get() > oldMax { 956 957 break 958 } 959 960 parentNewMax := p.calculateMaxGapInternal() 961 if p.maxGap.Get() == parentNewMax { 962 963 break 964 } 965 966 p.maxGap.Set(parentNewMax) 967 } 968 } 969 970 // updateMaxGapLocal updates maxGap of the calling node solely with no 971 // propagation to ancestor nodes. 972 // 973 // Precondition: trackGaps must be 1. 974 func (n *FileRangenode) updateMaxGapLocal() { 975 if !n.hasChildren { 976 977 n.maxGap.Set(n.calculateMaxGapLeaf()) 978 } else { 979 980 n.maxGap.Set(n.calculateMaxGapInternal()) 981 } 982 } 983 984 // calculateMaxGapLeaf iterates the gaps within a leaf node and calculate the 985 // max. 986 // 987 // Preconditions: n must be a leaf node. 988 func (n *FileRangenode) calculateMaxGapLeaf() uint64 { 989 max := FileRangeGapIterator{n, 0}.Range().Length() 990 for i := 1; i <= n.nrSegments; i++ { 991 if current := (FileRangeGapIterator{n, i}).Range().Length(); current > max { 992 max = current 993 } 994 } 995 return max 996 } 997 998 // calculateMaxGapInternal iterates children's maxGap within an internal node n 999 // and calculate the max. 1000 // 1001 // Preconditions: n must be a non-leaf node. 1002 func (n *FileRangenode) calculateMaxGapInternal() uint64 { 1003 max := n.children[0].maxGap.Get() 1004 for i := 1; i <= n.nrSegments; i++ { 1005 if current := n.children[i].maxGap.Get(); current > max { 1006 max = current 1007 } 1008 } 1009 return max 1010 } 1011 1012 // searchFirstLargeEnoughGap returns the first gap having at least minSize length 1013 // in the subtree rooted by n. If not found, return a terminal gap iterator. 1014 func (n *FileRangenode) searchFirstLargeEnoughGap(minSize uint64) FileRangeGapIterator { 1015 if n.maxGap.Get() < minSize { 1016 return FileRangeGapIterator{} 1017 } 1018 if n.hasChildren { 1019 for i := 0; i <= n.nrSegments; i++ { 1020 if largeEnoughGap := n.children[i].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() { 1021 return largeEnoughGap 1022 } 1023 } 1024 } else { 1025 for i := 0; i <= n.nrSegments; i++ { 1026 currentGap := FileRangeGapIterator{n, i} 1027 if currentGap.Range().Length() >= minSize { 1028 return currentGap 1029 } 1030 } 1031 } 1032 panic(fmt.Sprintf("invalid maxGap in %v", n)) 1033 } 1034 1035 // searchLastLargeEnoughGap returns the last gap having at least minSize length 1036 // in the subtree rooted by n. If not found, return a terminal gap iterator. 1037 func (n *FileRangenode) searchLastLargeEnoughGap(minSize uint64) FileRangeGapIterator { 1038 if n.maxGap.Get() < minSize { 1039 return FileRangeGapIterator{} 1040 } 1041 if n.hasChildren { 1042 for i := n.nrSegments; i >= 0; i-- { 1043 if largeEnoughGap := n.children[i].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() { 1044 return largeEnoughGap 1045 } 1046 } 1047 } else { 1048 for i := n.nrSegments; i >= 0; i-- { 1049 currentGap := FileRangeGapIterator{n, i} 1050 if currentGap.Range().Length() >= minSize { 1051 return currentGap 1052 } 1053 } 1054 } 1055 panic(fmt.Sprintf("invalid maxGap in %v", n)) 1056 } 1057 1058 // A Iterator is conceptually one of: 1059 // 1060 // - A pointer to a segment in a set; or 1061 // 1062 // - A terminal iterator, which is a sentinel indicating that the end of 1063 // iteration has been reached. 1064 // 1065 // Iterators are copyable values and are meaningfully equality-comparable. The 1066 // zero value of Iterator is a terminal iterator. 1067 // 1068 // Unless otherwise specified, any mutation of a set invalidates all existing 1069 // iterators into the set. 1070 type FileRangeIterator struct { 1071 // node is the node containing the iterated segment. If the iterator is 1072 // terminal, node is nil. 1073 node *FileRangenode 1074 1075 // index is the index of the segment in node.keys/values. 1076 index int 1077 } 1078 1079 // Ok returns true if the iterator is not terminal. All other methods are only 1080 // valid for non-terminal iterators. 1081 func (seg FileRangeIterator) Ok() bool { 1082 return seg.node != nil 1083 } 1084 1085 // Range returns the iterated segment's range key. 1086 func (seg FileRangeIterator) Range() __generics_imported0.MappableRange { 1087 return seg.node.keys[seg.index] 1088 } 1089 1090 // Start is equivalent to Range().Start, but should be preferred if only the 1091 // start of the range is needed. 1092 func (seg FileRangeIterator) Start() uint64 { 1093 return seg.node.keys[seg.index].Start 1094 } 1095 1096 // End is equivalent to Range().End, but should be preferred if only the end of 1097 // the range is needed. 1098 func (seg FileRangeIterator) End() uint64 { 1099 return seg.node.keys[seg.index].End 1100 } 1101 1102 // SetRangeUnchecked mutates the iterated segment's range key. This operation 1103 // does not invalidate any iterators. 1104 // 1105 // Preconditions: 1106 // - r.Length() > 0. 1107 // - The new range must not overlap an existing one: 1108 // - If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start(). 1109 // - If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End(). 1110 func (seg FileRangeIterator) SetRangeUnchecked(r __generics_imported0.MappableRange) { 1111 seg.node.keys[seg.index] = r 1112 } 1113 1114 // SetRange mutates the iterated segment's range key. If the new range would 1115 // cause the iterated segment to overlap another segment, or if the new range 1116 // is invalid, SetRange panics. This operation does not invalidate any 1117 // iterators. 1118 func (seg FileRangeIterator) SetRange(r __generics_imported0.MappableRange) { 1119 if r.Length() <= 0 { 1120 panic(fmt.Sprintf("invalid segment range %v", r)) 1121 } 1122 if prev := seg.PrevSegment(); prev.Ok() && r.Start < prev.End() { 1123 panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, prev.Range())) 1124 } 1125 if next := seg.NextSegment(); next.Ok() && r.End > next.Start() { 1126 panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, next.Range())) 1127 } 1128 seg.SetRangeUnchecked(r) 1129 } 1130 1131 // SetStartUnchecked mutates the iterated segment's start. This operation does 1132 // not invalidate any iterators. 1133 // 1134 // Preconditions: The new start must be valid: 1135 // - start < seg.End() 1136 // - If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End(). 1137 func (seg FileRangeIterator) SetStartUnchecked(start uint64) { 1138 seg.node.keys[seg.index].Start = start 1139 } 1140 1141 // SetStart mutates the iterated segment's start. If the new start value would 1142 // cause the iterated segment to overlap another segment, or would result in an 1143 // invalid range, SetStart panics. This operation does not invalidate any 1144 // iterators. 1145 func (seg FileRangeIterator) SetStart(start uint64) { 1146 if start >= seg.End() { 1147 panic(fmt.Sprintf("new start %v would invalidate segment range %v", start, seg.Range())) 1148 } 1149 if prev := seg.PrevSegment(); prev.Ok() && start < prev.End() { 1150 panic(fmt.Sprintf("new start %v would cause segment range %v to overlap segment range %v", start, seg.Range(), prev.Range())) 1151 } 1152 seg.SetStartUnchecked(start) 1153 } 1154 1155 // SetEndUnchecked mutates the iterated segment's end. This operation does not 1156 // invalidate any iterators. 1157 // 1158 // Preconditions: The new end must be valid: 1159 // - end > seg.Start(). 1160 // - If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start(). 1161 func (seg FileRangeIterator) SetEndUnchecked(end uint64) { 1162 seg.node.keys[seg.index].End = end 1163 } 1164 1165 // SetEnd mutates the iterated segment's end. If the new end value would cause 1166 // the iterated segment to overlap another segment, or would result in an 1167 // invalid range, SetEnd panics. This operation does not invalidate any 1168 // iterators. 1169 func (seg FileRangeIterator) SetEnd(end uint64) { 1170 if end <= seg.Start() { 1171 panic(fmt.Sprintf("new end %v would invalidate segment range %v", end, seg.Range())) 1172 } 1173 if next := seg.NextSegment(); next.Ok() && end > next.Start() { 1174 panic(fmt.Sprintf("new end %v would cause segment range %v to overlap segment range %v", end, seg.Range(), next.Range())) 1175 } 1176 seg.SetEndUnchecked(end) 1177 } 1178 1179 // Value returns a copy of the iterated segment's value. 1180 func (seg FileRangeIterator) Value() uint64 { 1181 return seg.node.values[seg.index] 1182 } 1183 1184 // ValuePtr returns a pointer to the iterated segment's value. The pointer is 1185 // invalidated if the iterator is invalidated. This operation does not 1186 // invalidate any iterators. 1187 func (seg FileRangeIterator) ValuePtr() *uint64 { 1188 return &seg.node.values[seg.index] 1189 } 1190 1191 // SetValue mutates the iterated segment's value. This operation does not 1192 // invalidate any iterators. 1193 func (seg FileRangeIterator) SetValue(val uint64) { 1194 seg.node.values[seg.index] = val 1195 } 1196 1197 // PrevSegment returns the iterated segment's predecessor. If there is no 1198 // preceding segment, PrevSegment returns a terminal iterator. 1199 func (seg FileRangeIterator) PrevSegment() FileRangeIterator { 1200 if seg.node.hasChildren { 1201 return seg.node.children[seg.index].lastSegment() 1202 } 1203 if seg.index > 0 { 1204 return FileRangeIterator{seg.node, seg.index - 1} 1205 } 1206 if seg.node.parent == nil { 1207 return FileRangeIterator{} 1208 } 1209 return FileRangesegmentBeforePosition(seg.node.parent, seg.node.parentIndex) 1210 } 1211 1212 // NextSegment returns the iterated segment's successor. If there is no 1213 // succeeding segment, NextSegment returns a terminal iterator. 1214 func (seg FileRangeIterator) NextSegment() FileRangeIterator { 1215 if seg.node.hasChildren { 1216 return seg.node.children[seg.index+1].firstSegment() 1217 } 1218 if seg.index < seg.node.nrSegments-1 { 1219 return FileRangeIterator{seg.node, seg.index + 1} 1220 } 1221 if seg.node.parent == nil { 1222 return FileRangeIterator{} 1223 } 1224 return FileRangesegmentAfterPosition(seg.node.parent, seg.node.parentIndex) 1225 } 1226 1227 // PrevGap returns the gap immediately before the iterated segment. 1228 func (seg FileRangeIterator) PrevGap() FileRangeGapIterator { 1229 if seg.node.hasChildren { 1230 1231 return seg.node.children[seg.index].lastSegment().NextGap() 1232 } 1233 return FileRangeGapIterator{seg.node, seg.index} 1234 } 1235 1236 // NextGap returns the gap immediately after the iterated segment. 1237 func (seg FileRangeIterator) NextGap() FileRangeGapIterator { 1238 if seg.node.hasChildren { 1239 return seg.node.children[seg.index+1].firstSegment().PrevGap() 1240 } 1241 return FileRangeGapIterator{seg.node, seg.index + 1} 1242 } 1243 1244 // PrevNonEmpty returns the iterated segment's predecessor if it is adjacent, 1245 // or the gap before the iterated segment otherwise. If seg.Start() == 1246 // Functions.MinKey(), PrevNonEmpty will return two terminal iterators. 1247 // Otherwise, exactly one of the iterators returned by PrevNonEmpty will be 1248 // non-terminal. 1249 func (seg FileRangeIterator) PrevNonEmpty() (FileRangeIterator, FileRangeGapIterator) { 1250 gap := seg.PrevGap() 1251 if gap.Range().Length() != 0 { 1252 return FileRangeIterator{}, gap 1253 } 1254 return gap.PrevSegment(), FileRangeGapIterator{} 1255 } 1256 1257 // NextNonEmpty returns the iterated segment's successor if it is adjacent, or 1258 // the gap after the iterated segment otherwise. If seg.End() == 1259 // Functions.MaxKey(), NextNonEmpty will return two terminal iterators. 1260 // Otherwise, exactly one of the iterators returned by NextNonEmpty will be 1261 // non-terminal. 1262 func (seg FileRangeIterator) NextNonEmpty() (FileRangeIterator, FileRangeGapIterator) { 1263 gap := seg.NextGap() 1264 if gap.Range().Length() != 0 { 1265 return FileRangeIterator{}, gap 1266 } 1267 return gap.NextSegment(), FileRangeGapIterator{} 1268 } 1269 1270 // A GapIterator is conceptually one of: 1271 // 1272 // - A pointer to a position between two segments, before the first segment, or 1273 // after the last segment in a set, called a *gap*; or 1274 // 1275 // - A terminal iterator, which is a sentinel indicating that the end of 1276 // iteration has been reached. 1277 // 1278 // Note that the gap between two adjacent segments exists (iterators to it are 1279 // non-terminal), but has a length of zero. GapIterator.IsEmpty returns true 1280 // for such gaps. An empty set contains a single gap, spanning the entire range 1281 // of the set's keys. 1282 // 1283 // GapIterators are copyable values and are meaningfully equality-comparable. 1284 // The zero value of GapIterator is a terminal iterator. 1285 // 1286 // Unless otherwise specified, any mutation of a set invalidates all existing 1287 // iterators into the set. 1288 type FileRangeGapIterator struct { 1289 // The representation of a GapIterator is identical to that of an Iterator, 1290 // except that index corresponds to positions between segments in the same 1291 // way as for node.children (see comment for node.nrSegments). 1292 node *FileRangenode 1293 index int 1294 } 1295 1296 // Ok returns true if the iterator is not terminal. All other methods are only 1297 // valid for non-terminal iterators. 1298 func (gap FileRangeGapIterator) Ok() bool { 1299 return gap.node != nil 1300 } 1301 1302 // Range returns the range spanned by the iterated gap. 1303 func (gap FileRangeGapIterator) Range() __generics_imported0.MappableRange { 1304 return __generics_imported0.MappableRange{gap.Start(), gap.End()} 1305 } 1306 1307 // Start is equivalent to Range().Start, but should be preferred if only the 1308 // start of the range is needed. 1309 func (gap FileRangeGapIterator) Start() uint64 { 1310 if ps := gap.PrevSegment(); ps.Ok() { 1311 return ps.End() 1312 } 1313 return FileRangeSetFunctions{}.MinKey() 1314 } 1315 1316 // End is equivalent to Range().End, but should be preferred if only the end of 1317 // the range is needed. 1318 func (gap FileRangeGapIterator) End() uint64 { 1319 if ns := gap.NextSegment(); ns.Ok() { 1320 return ns.Start() 1321 } 1322 return FileRangeSetFunctions{}.MaxKey() 1323 } 1324 1325 // IsEmpty returns true if the iterated gap is empty (that is, the "gap" is 1326 // between two adjacent segments.) 1327 func (gap FileRangeGapIterator) IsEmpty() bool { 1328 return gap.Range().Length() == 0 1329 } 1330 1331 // PrevSegment returns the segment immediately before the iterated gap. If no 1332 // such segment exists, PrevSegment returns a terminal iterator. 1333 func (gap FileRangeGapIterator) PrevSegment() FileRangeIterator { 1334 return FileRangesegmentBeforePosition(gap.node, gap.index) 1335 } 1336 1337 // NextSegment returns the segment immediately after the iterated gap. If no 1338 // such segment exists, NextSegment returns a terminal iterator. 1339 func (gap FileRangeGapIterator) NextSegment() FileRangeIterator { 1340 return FileRangesegmentAfterPosition(gap.node, gap.index) 1341 } 1342 1343 // PrevGap returns the iterated gap's predecessor. If no such gap exists, 1344 // PrevGap returns a terminal iterator. 1345 func (gap FileRangeGapIterator) PrevGap() FileRangeGapIterator { 1346 seg := gap.PrevSegment() 1347 if !seg.Ok() { 1348 return FileRangeGapIterator{} 1349 } 1350 return seg.PrevGap() 1351 } 1352 1353 // NextGap returns the iterated gap's successor. If no such gap exists, NextGap 1354 // returns a terminal iterator. 1355 func (gap FileRangeGapIterator) NextGap() FileRangeGapIterator { 1356 seg := gap.NextSegment() 1357 if !seg.Ok() { 1358 return FileRangeGapIterator{} 1359 } 1360 return seg.NextGap() 1361 } 1362 1363 // NextLargeEnoughGap returns the iterated gap's first next gap with larger 1364 // length than minSize. If not found, return a terminal gap iterator (does NOT 1365 // include this gap itself). 1366 // 1367 // Precondition: trackGaps must be 1. 1368 func (gap FileRangeGapIterator) NextLargeEnoughGap(minSize uint64) FileRangeGapIterator { 1369 if FileRangetrackGaps != 1 { 1370 panic("set is not tracking gaps") 1371 } 1372 if gap.node != nil && gap.node.hasChildren && gap.index == gap.node.nrSegments { 1373 1374 gap.node = gap.NextSegment().node 1375 gap.index = 0 1376 return gap.nextLargeEnoughGapHelper(minSize) 1377 } 1378 return gap.nextLargeEnoughGapHelper(minSize) 1379 } 1380 1381 // nextLargeEnoughGapHelper is the helper function used by NextLargeEnoughGap 1382 // to do the real recursions. 1383 // 1384 // Preconditions: gap is NOT the trailing gap of a non-leaf node. 1385 func (gap FileRangeGapIterator) nextLargeEnoughGapHelper(minSize uint64) FileRangeGapIterator { 1386 1387 for gap.node != nil && 1388 (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) { 1389 gap.node, gap.index = gap.node.parent, gap.node.parentIndex 1390 } 1391 1392 if gap.node == nil { 1393 return FileRangeGapIterator{} 1394 } 1395 1396 gap.index++ 1397 for gap.index <= gap.node.nrSegments { 1398 if gap.node.hasChildren { 1399 if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() { 1400 return largeEnoughGap 1401 } 1402 } else { 1403 if gap.Range().Length() >= minSize { 1404 return gap 1405 } 1406 } 1407 gap.index++ 1408 } 1409 gap.node, gap.index = gap.node.parent, gap.node.parentIndex 1410 if gap.node != nil && gap.index == gap.node.nrSegments { 1411 1412 gap.node, gap.index = gap.node.parent, gap.node.parentIndex 1413 } 1414 return gap.nextLargeEnoughGapHelper(minSize) 1415 } 1416 1417 // PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or 1418 // equal length than minSize. If not found, return a terminal gap iterator 1419 // (does NOT include this gap itself). 1420 // 1421 // Precondition: trackGaps must be 1. 1422 func (gap FileRangeGapIterator) PrevLargeEnoughGap(minSize uint64) FileRangeGapIterator { 1423 if FileRangetrackGaps != 1 { 1424 panic("set is not tracking gaps") 1425 } 1426 if gap.node != nil && gap.node.hasChildren && gap.index == 0 { 1427 1428 gap.node = gap.PrevSegment().node 1429 gap.index = gap.node.nrSegments 1430 return gap.prevLargeEnoughGapHelper(minSize) 1431 } 1432 return gap.prevLargeEnoughGapHelper(minSize) 1433 } 1434 1435 // prevLargeEnoughGapHelper is the helper function used by PrevLargeEnoughGap 1436 // to do the real recursions. 1437 // 1438 // Preconditions: gap is NOT the first gap of a non-leaf node. 1439 func (gap FileRangeGapIterator) prevLargeEnoughGapHelper(minSize uint64) FileRangeGapIterator { 1440 1441 for gap.node != nil && 1442 (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) { 1443 gap.node, gap.index = gap.node.parent, gap.node.parentIndex 1444 } 1445 1446 if gap.node == nil { 1447 return FileRangeGapIterator{} 1448 } 1449 1450 gap.index-- 1451 for gap.index >= 0 { 1452 if gap.node.hasChildren { 1453 if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() { 1454 return largeEnoughGap 1455 } 1456 } else { 1457 if gap.Range().Length() >= minSize { 1458 return gap 1459 } 1460 } 1461 gap.index-- 1462 } 1463 gap.node, gap.index = gap.node.parent, gap.node.parentIndex 1464 if gap.node != nil && gap.index == 0 { 1465 1466 gap.node, gap.index = gap.node.parent, gap.node.parentIndex 1467 } 1468 return gap.prevLargeEnoughGapHelper(minSize) 1469 } 1470 1471 // segmentBeforePosition returns the predecessor segment of the position given 1472 // by n.children[i], which may or may not contain a child. If no such segment 1473 // exists, segmentBeforePosition returns a terminal iterator. 1474 func FileRangesegmentBeforePosition(n *FileRangenode, i int) FileRangeIterator { 1475 for i == 0 { 1476 if n.parent == nil { 1477 return FileRangeIterator{} 1478 } 1479 n, i = n.parent, n.parentIndex 1480 } 1481 return FileRangeIterator{n, i - 1} 1482 } 1483 1484 // segmentAfterPosition returns the successor segment of the position given by 1485 // n.children[i], which may or may not contain a child. If no such segment 1486 // exists, segmentAfterPosition returns a terminal iterator. 1487 func FileRangesegmentAfterPosition(n *FileRangenode, i int) FileRangeIterator { 1488 for i == n.nrSegments { 1489 if n.parent == nil { 1490 return FileRangeIterator{} 1491 } 1492 n, i = n.parent, n.parentIndex 1493 } 1494 return FileRangeIterator{n, i} 1495 } 1496 1497 func FileRangezeroValueSlice(slice []uint64) { 1498 1499 for i := range slice { 1500 FileRangeSetFunctions{}.ClearValue(&slice[i]) 1501 } 1502 } 1503 1504 func FileRangezeroNodeSlice(slice []*FileRangenode) { 1505 for i := range slice { 1506 slice[i] = nil 1507 } 1508 } 1509 1510 // String stringifies a Set for debugging. 1511 func (s *FileRangeSet) String() string { 1512 return s.root.String() 1513 } 1514 1515 // String stringifies a node (and all of its children) for debugging. 1516 func (n *FileRangenode) String() string { 1517 var buf bytes.Buffer 1518 n.writeDebugString(&buf, "") 1519 return buf.String() 1520 } 1521 1522 func (n *FileRangenode) writeDebugString(buf *bytes.Buffer, prefix string) { 1523 if n.hasChildren != (n.nrSegments > 0 && n.children[0] != nil) { 1524 buf.WriteString(prefix) 1525 buf.WriteString(fmt.Sprintf("WARNING: inconsistent value of hasChildren: got %v, want %v\n", n.hasChildren, !n.hasChildren)) 1526 } 1527 for i := 0; i < n.nrSegments; i++ { 1528 if child := n.children[i]; child != nil { 1529 cprefix := fmt.Sprintf("%s- % 3d ", prefix, i) 1530 if child.parent != n || child.parentIndex != i { 1531 buf.WriteString(cprefix) 1532 buf.WriteString(fmt.Sprintf("WARNING: inconsistent linkage to parent: got (%p, %d), want (%p, %d)\n", child.parent, child.parentIndex, n, i)) 1533 } 1534 child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, i)) 1535 } 1536 buf.WriteString(prefix) 1537 if n.hasChildren { 1538 if FileRangetrackGaps != 0 { 1539 buf.WriteString(fmt.Sprintf("- % 3d: %v => %v, maxGap: %d\n", i, n.keys[i], n.values[i], n.maxGap.Get())) 1540 } else { 1541 buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i])) 1542 } 1543 } else { 1544 buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i])) 1545 } 1546 } 1547 if child := n.children[n.nrSegments]; child != nil { 1548 child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, n.nrSegments)) 1549 } 1550 } 1551 1552 // SegmentDataSlices represents segments from a set as slices of start, end, and 1553 // values. SegmentDataSlices is primarily used as an intermediate representation 1554 // for save/restore and the layout here is optimized for that. 1555 // 1556 // +stateify savable 1557 type FileRangeSegmentDataSlices struct { 1558 Start []uint64 1559 End []uint64 1560 Values []uint64 1561 } 1562 1563 // ExportSortedSlices returns a copy of all segments in the given set, in 1564 // ascending key order. 1565 func (s *FileRangeSet) ExportSortedSlices() *FileRangeSegmentDataSlices { 1566 var sds FileRangeSegmentDataSlices 1567 for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { 1568 sds.Start = append(sds.Start, seg.Start()) 1569 sds.End = append(sds.End, seg.End()) 1570 sds.Values = append(sds.Values, seg.Value()) 1571 } 1572 sds.Start = sds.Start[:len(sds.Start):len(sds.Start)] 1573 sds.End = sds.End[:len(sds.End):len(sds.End)] 1574 sds.Values = sds.Values[:len(sds.Values):len(sds.Values)] 1575 return &sds 1576 } 1577 1578 // ImportSortedSlices initializes the given set from the given slice. 1579 // 1580 // Preconditions: 1581 // - s must be empty. 1582 // - sds must represent a valid set (the segments in sds must have valid 1583 // lengths that do not overlap). 1584 // - The segments in sds must be sorted in ascending key order. 1585 func (s *FileRangeSet) ImportSortedSlices(sds *FileRangeSegmentDataSlices) error { 1586 if !s.IsEmpty() { 1587 return fmt.Errorf("cannot import into non-empty set %v", s) 1588 } 1589 gap := s.FirstGap() 1590 for i := range sds.Start { 1591 r := __generics_imported0.MappableRange{sds.Start[i], sds.End[i]} 1592 if !gap.Range().IsSupersetOf(r) { 1593 return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: [%d, %d) => %v", sds.Start[i], sds.End[i], sds.Values[i]) 1594 } 1595 gap = s.InsertWithoutMerging(gap, r, sds.Values[i]).NextGap() 1596 } 1597 return nil 1598 } 1599 1600 // segmentTestCheck returns an error if s is incorrectly sorted, does not 1601 // contain exactly expectedSegments segments, or contains a segment which 1602 // fails the passed check. 1603 // 1604 // This should be used only for testing, and has been added to this package for 1605 // templating convenience. 1606 func (s *FileRangeSet) segmentTestCheck(expectedSegments int, segFunc func(int, __generics_imported0.MappableRange, uint64) error) error { 1607 havePrev := false 1608 prev := uint64(0) 1609 nrSegments := 0 1610 for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { 1611 next := seg.Start() 1612 if havePrev && prev >= next { 1613 return fmt.Errorf("incorrect order: key %d (segment %d) >= key %d (segment %d)", prev, nrSegments-1, next, nrSegments) 1614 } 1615 if segFunc != nil { 1616 if err := segFunc(nrSegments, seg.Range(), seg.Value()); err != nil { 1617 return err 1618 } 1619 } 1620 prev = next 1621 havePrev = true 1622 nrSegments++ 1623 } 1624 if nrSegments != expectedSegments { 1625 return fmt.Errorf("incorrect number of segments: got %d, wanted %d", nrSegments, expectedSegments) 1626 } 1627 return nil 1628 } 1629 1630 // countSegments counts the number of segments in the set. 1631 // 1632 // Similar to Check, this should only be used for testing. 1633 func (s *FileRangeSet) countSegments() (segments int) { 1634 for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { 1635 segments++ 1636 } 1637 return segments 1638 } 1639 func (s *FileRangeSet) saveRoot() *FileRangeSegmentDataSlices { 1640 return s.ExportSortedSlices() 1641 } 1642 1643 func (s *FileRangeSet) loadRoot(sds *FileRangeSegmentDataSlices) { 1644 if err := s.ImportSortedSlices(sds); err != nil { 1645 panic(err) 1646 } 1647 }