github.com/cockroachdb/pebble@v0.0.0-20231214172447-ab4952c5f87b/options.go (about) 1 // Copyright 2011 The LevelDB-Go and Pebble Authors. All rights reserved. Use 2 // of this source code is governed by a BSD-style license that can be found in 3 // the LICENSE file. 4 5 package pebble 6 7 import ( 8 "bytes" 9 "fmt" 10 "io" 11 "runtime" 12 "strconv" 13 "strings" 14 "time" 15 "unicode" 16 17 "github.com/cockroachdb/errors" 18 "github.com/cockroachdb/pebble/internal/base" 19 "github.com/cockroachdb/pebble/internal/cache" 20 "github.com/cockroachdb/pebble/internal/humanize" 21 "github.com/cockroachdb/pebble/internal/keyspan" 22 "github.com/cockroachdb/pebble/internal/manifest" 23 "github.com/cockroachdb/pebble/objstorage/remote" 24 "github.com/cockroachdb/pebble/rangekey" 25 "github.com/cockroachdb/pebble/sstable" 26 "github.com/cockroachdb/pebble/vfs" 27 ) 28 29 const ( 30 cacheDefaultSize = 8 << 20 // 8 MB 31 defaultLevelMultiplier = 10 32 ) 33 34 // Compression exports the base.Compression type. 35 type Compression = sstable.Compression 36 37 // Exported Compression constants. 38 const ( 39 DefaultCompression = sstable.DefaultCompression 40 NoCompression = sstable.NoCompression 41 SnappyCompression = sstable.SnappyCompression 42 ZstdCompression = sstable.ZstdCompression 43 ) 44 45 // FilterType exports the base.FilterType type. 46 type FilterType = base.FilterType 47 48 // Exported TableFilter constants. 49 const ( 50 TableFilter = base.TableFilter 51 ) 52 53 // FilterWriter exports the base.FilterWriter type. 54 type FilterWriter = base.FilterWriter 55 56 // FilterPolicy exports the base.FilterPolicy type. 57 type FilterPolicy = base.FilterPolicy 58 59 // TablePropertyCollector exports the sstable.TablePropertyCollector type. 60 type TablePropertyCollector = sstable.TablePropertyCollector 61 62 // BlockPropertyCollector exports the sstable.BlockPropertyCollector type. 63 type BlockPropertyCollector = sstable.BlockPropertyCollector 64 65 // BlockPropertyFilter exports the sstable.BlockPropertyFilter type. 66 type BlockPropertyFilter = base.BlockPropertyFilter 67 68 // ShortAttributeExtractor exports the base.ShortAttributeExtractor type. 69 type ShortAttributeExtractor = base.ShortAttributeExtractor 70 71 // UserKeyPrefixBound exports the sstable.UserKeyPrefixBound type. 72 type UserKeyPrefixBound = sstable.UserKeyPrefixBound 73 74 // IterKeyType configures which types of keys an iterator should surface. 75 type IterKeyType int8 76 77 const ( 78 // IterKeyTypePointsOnly configures an iterator to iterate over point keys 79 // only. 80 IterKeyTypePointsOnly IterKeyType = iota 81 // IterKeyTypeRangesOnly configures an iterator to iterate over range keys 82 // only. 83 IterKeyTypeRangesOnly 84 // IterKeyTypePointsAndRanges configures an iterator iterate over both point 85 // keys and range keys simultaneously. 86 IterKeyTypePointsAndRanges 87 ) 88 89 // String implements fmt.Stringer. 90 func (t IterKeyType) String() string { 91 switch t { 92 case IterKeyTypePointsOnly: 93 return "points-only" 94 case IterKeyTypeRangesOnly: 95 return "ranges-only" 96 case IterKeyTypePointsAndRanges: 97 return "points-and-ranges" 98 default: 99 panic(fmt.Sprintf("unknown key type %d", t)) 100 } 101 } 102 103 // IterOptions hold the optional per-query parameters for NewIter. 104 // 105 // Like Options, a nil *IterOptions is valid and means to use the default 106 // values. 107 type IterOptions struct { 108 // LowerBound specifies the smallest key (inclusive) that the iterator will 109 // return during iteration. If the iterator is seeked or iterated past this 110 // boundary the iterator will return Valid()==false. Setting LowerBound 111 // effectively truncates the key space visible to the iterator. 112 LowerBound []byte 113 // UpperBound specifies the largest key (exclusive) that the iterator will 114 // return during iteration. If the iterator is seeked or iterated past this 115 // boundary the iterator will return Valid()==false. Setting UpperBound 116 // effectively truncates the key space visible to the iterator. 117 UpperBound []byte 118 // TableFilter can be used to filter the tables that are scanned during 119 // iteration based on the user properties. Return true to scan the table and 120 // false to skip scanning. This function must be thread-safe since the same 121 // function can be used by multiple iterators, if the iterator is cloned. 122 TableFilter func(userProps map[string]string) bool 123 // SkipPoint may be used to skip over point keys that don't match an 124 // arbitrary predicate during iteration. If set, the Iterator invokes 125 // SkipPoint for keys encountered. If SkipPoint returns true, the iterator 126 // will skip the key without yielding it to the iterator operation in 127 // progress. 128 // 129 // SkipPoint must be a pure function and always return the same result when 130 // provided the same arguments. The iterator may call SkipPoint multiple 131 // times for the same user key. 132 SkipPoint func(userKey []byte) bool 133 // PointKeyFilters can be used to avoid scanning tables and blocks in tables 134 // when iterating over point keys. This slice represents an intersection 135 // across all filters, i.e., all filters must indicate that the block is 136 // relevant. 137 // 138 // Performance note: When len(PointKeyFilters) > 0, the caller should ensure 139 // that cap(PointKeyFilters) is at least len(PointKeyFilters)+1. This helps 140 // avoid allocations in Pebble internal code that mutates the slice. 141 PointKeyFilters []BlockPropertyFilter 142 // RangeKeyFilters can be usefd to avoid scanning tables and blocks in tables 143 // when iterating over range keys. The same requirements that apply to 144 // PointKeyFilters apply here too. 145 RangeKeyFilters []BlockPropertyFilter 146 // KeyTypes configures which types of keys to iterate over: point keys, 147 // range keys, or both. 148 KeyTypes IterKeyType 149 // RangeKeyMasking can be used to enable automatic masking of point keys by 150 // range keys. Range key masking is only supported during combined range key 151 // and point key iteration mode (IterKeyTypePointsAndRanges). 152 RangeKeyMasking RangeKeyMasking 153 154 // OnlyReadGuaranteedDurable is an advanced option that is only supported by 155 // the Reader implemented by DB. When set to true, only the guaranteed to be 156 // durable state is visible in the iterator. 157 // - This definition is made under the assumption that the FS implementation 158 // is providing a durability guarantee when data is synced. 159 // - The visible state represents a consistent point in the history of the 160 // DB. 161 // - The implementation is free to choose a conservative definition of what 162 // is guaranteed durable. For simplicity, the current implementation 163 // ignores memtables. A more sophisticated implementation could track the 164 // highest seqnum that is synced to the WAL and published and use that as 165 // the visible seqnum for an iterator. Note that the latter approach is 166 // not strictly better than the former since we can have DBs that are (a) 167 // synced more rarely than memtable flushes, (b) have no WAL. (a) is 168 // likely to be true in a future CockroachDB context where the DB 169 // containing the state machine may be rarely synced. 170 // NB: this current implementation relies on the fact that memtables are 171 // flushed in seqnum order, and any ingested sstables that happen to have a 172 // lower seqnum than a non-flushed memtable don't have any overlapping keys. 173 // This is the fundamental level invariant used in other code too, like when 174 // merging iterators. 175 // 176 // Semantically, using this option provides the caller a "snapshot" as of 177 // the time the most recent memtable was flushed. An alternate interface 178 // would be to add a NewSnapshot variant. Creating a snapshot is heavier 179 // weight than creating an iterator, so we have opted to support this 180 // iterator option. 181 OnlyReadGuaranteedDurable bool 182 // UseL6Filters allows the caller to opt into reading filter blocks for L6 183 // sstables. Helpful if a lot of SeekPrefixGEs are expected in quick 184 // succession, that are also likely to not yield a single key. Filter blocks in 185 // L6 can be relatively large, often larger than data blocks, so the benefit of 186 // loading them in the cache is minimized if the probability of the key 187 // existing is not low or if we just expect a one-time Seek (where loading the 188 // data block directly is better). 189 UseL6Filters bool 190 // CategoryAndQoS is used for categorized iterator stats. This should not be 191 // changed by calling SetOptions. 192 sstable.CategoryAndQoS 193 194 // Internal options. 195 196 logger Logger 197 // Level corresponding to this file. Only passed in if constructed by a 198 // levelIter. 199 level manifest.Level 200 // disableLazyCombinedIteration is an internal testing option. 201 disableLazyCombinedIteration bool 202 // snapshotForHideObsoletePoints is specified for/by levelIter when opening 203 // files and is used to decide whether to hide obsolete points. A value of 0 204 // implies obsolete points should not be hidden. 205 snapshotForHideObsoletePoints uint64 206 207 // NB: If adding new Options, you must account for them in iterator 208 // construction and Iterator.SetOptions. 209 } 210 211 // GetLowerBound returns the LowerBound or nil if the receiver is nil. 212 func (o *IterOptions) GetLowerBound() []byte { 213 if o == nil { 214 return nil 215 } 216 return o.LowerBound 217 } 218 219 // GetUpperBound returns the UpperBound or nil if the receiver is nil. 220 func (o *IterOptions) GetUpperBound() []byte { 221 if o == nil { 222 return nil 223 } 224 return o.UpperBound 225 } 226 227 func (o *IterOptions) pointKeys() bool { 228 if o == nil { 229 return true 230 } 231 return o.KeyTypes == IterKeyTypePointsOnly || o.KeyTypes == IterKeyTypePointsAndRanges 232 } 233 234 func (o *IterOptions) rangeKeys() bool { 235 if o == nil { 236 return false 237 } 238 return o.KeyTypes == IterKeyTypeRangesOnly || o.KeyTypes == IterKeyTypePointsAndRanges 239 } 240 241 func (o *IterOptions) getLogger() Logger { 242 if o == nil || o.logger == nil { 243 return DefaultLogger 244 } 245 return o.logger 246 } 247 248 // SpanIterOptions creates a SpanIterOptions from this IterOptions. 249 func (o *IterOptions) SpanIterOptions() keyspan.SpanIterOptions { 250 if o == nil { 251 return keyspan.SpanIterOptions{} 252 } 253 return keyspan.SpanIterOptions{ 254 RangeKeyFilters: o.RangeKeyFilters, 255 } 256 } 257 258 // scanInternalOptions is similar to IterOptions, meant for use with 259 // scanInternalIterator. 260 type scanInternalOptions struct { 261 sstable.CategoryAndQoS 262 IterOptions 263 264 visitPointKey func(key *InternalKey, value LazyValue, iterInfo IteratorLevel) error 265 visitRangeDel func(start, end []byte, seqNum uint64) error 266 visitRangeKey func(start, end []byte, keys []rangekey.Key) error 267 visitSharedFile func(sst *SharedSSTMeta) error 268 269 // skipSharedLevels skips levels that are shareable (level >= 270 // sharedLevelStart). 271 skipSharedLevels bool 272 273 // includeObsoleteKeys specifies whether keys shadowed by newer internal keys 274 // are exposed. If false, only one internal key per user key is exposed. 275 includeObsoleteKeys bool 276 277 // rateLimitFunc is used to limit the amount of bytes read per second. 278 rateLimitFunc func(key *InternalKey, value LazyValue) error 279 } 280 281 // RangeKeyMasking configures automatic hiding of point keys by range keys. A 282 // non-nil Suffix enables range-key masking. When enabled, range keys with 283 // suffixes ≥ Suffix behave as masks. All point keys that are contained within a 284 // masking range key's bounds and have suffixes greater than the range key's 285 // suffix are automatically skipped. 286 // 287 // Specifically, when configured with a RangeKeyMasking.Suffix _s_, and there 288 // exists a range key with suffix _r_ covering a point key with suffix _p_, and 289 // 290 // _s_ ≤ _r_ < _p_ 291 // 292 // then the point key is elided. 293 // 294 // Range-key masking may only be used when iterating over both point keys and 295 // range keys with IterKeyTypePointsAndRanges. 296 type RangeKeyMasking struct { 297 // Suffix configures which range keys may mask point keys. Only range keys 298 // that are defined at suffixes greater than or equal to Suffix will mask 299 // point keys. 300 Suffix []byte 301 // Filter is an optional field that may be used to improve performance of 302 // range-key masking through a block-property filter defined over key 303 // suffixes. If non-nil, Filter is called by Pebble to construct a 304 // block-property filter mask at iterator creation. The filter is used to 305 // skip whole point-key blocks containing point keys with suffixes greater 306 // than a covering range-key's suffix. 307 // 308 // To use this functionality, the caller must create and configure (through 309 // Options.BlockPropertyCollectors) a block-property collector that records 310 // the maxmimum suffix contained within a block. The caller then must write 311 // and provide a BlockPropertyFilterMask implementation on that same 312 // property. See the BlockPropertyFilterMask type for more information. 313 Filter func() BlockPropertyFilterMask 314 } 315 316 // BlockPropertyFilterMask extends the BlockPropertyFilter interface for use 317 // with range-key masking. Unlike an ordinary block property filter, a 318 // BlockPropertyFilterMask's filtering criteria is allowed to change when Pebble 319 // invokes its SetSuffix method. 320 // 321 // When a Pebble iterator steps into a range key's bounds and the range key has 322 // a suffix greater than or equal to RangeKeyMasking.Suffix, the range key acts 323 // as a mask. The masking range key hides all point keys that fall within the 324 // range key's bounds and have suffixes > the range key's suffix. Without a 325 // filter mask configured, Pebble performs this hiding by stepping through point 326 // keys and comparing suffixes. If large numbers of point keys are masked, this 327 // requires Pebble to load, iterate through and discard a large number of 328 // sstable blocks containing masked point keys. 329 // 330 // If a block-property collector and a filter mask are configured, Pebble may 331 // skip loading some point-key blocks altogether. If a block's keys are known to 332 // all fall within the bounds of the masking range key and the block was 333 // annotated by a block-property collector with the maximal suffix, Pebble can 334 // ask the filter mask to compare the property to the current masking range 335 // key's suffix. If the mask reports no intersection, the block may be skipped. 336 // 337 // If unsuffixed and suffixed keys are written to the database, care must be 338 // taken to avoid unintentionally masking un-suffixed keys located in the same 339 // block as suffixed keys. One solution is to interpret unsuffixed keys as 340 // containing the maximal suffix value, ensuring that blocks containing 341 // unsuffixed keys are always loaded. 342 type BlockPropertyFilterMask interface { 343 BlockPropertyFilter 344 345 // SetSuffix configures the mask with the suffix of a range key. The filter 346 // should return false from Intersects whenever it's provided with a 347 // property encoding a block's minimum suffix that's greater (according to 348 // Compare) than the provided suffix. 349 SetSuffix(suffix []byte) error 350 } 351 352 // WriteOptions hold the optional per-query parameters for Set and Delete 353 // operations. 354 // 355 // Like Options, a nil *WriteOptions is valid and means to use the default 356 // values. 357 type WriteOptions struct { 358 // Sync is whether to sync writes through the OS buffer cache and down onto 359 // the actual disk, if applicable. Setting Sync is required for durability of 360 // individual write operations but can result in slower writes. 361 // 362 // If false, and the process or machine crashes, then a recent write may be 363 // lost. This is due to the recently written data being buffered inside the 364 // process running Pebble. This differs from the semantics of a write system 365 // call in which the data is buffered in the OS buffer cache and would thus 366 // survive a process crash. 367 // 368 // The default value is true. 369 Sync bool 370 } 371 372 // Sync specifies the default write options for writes which synchronize to 373 // disk. 374 var Sync = &WriteOptions{Sync: true} 375 376 // NoSync specifies the default write options for writes which do not 377 // synchronize to disk. 378 var NoSync = &WriteOptions{Sync: false} 379 380 // GetSync returns the Sync value or true if the receiver is nil. 381 func (o *WriteOptions) GetSync() bool { 382 return o == nil || o.Sync 383 } 384 385 // LevelOptions holds the optional per-level parameters. 386 type LevelOptions struct { 387 // BlockRestartInterval is the number of keys between restart points 388 // for delta encoding of keys. 389 // 390 // The default value is 16. 391 BlockRestartInterval int 392 393 // BlockSize is the target uncompressed size in bytes of each table block. 394 // 395 // The default value is 4096. 396 BlockSize int 397 398 // BlockSizeThreshold finishes a block if the block size is larger than the 399 // specified percentage of the target block size and adding the next entry 400 // would cause the block to be larger than the target block size. 401 // 402 // The default value is 90 403 BlockSizeThreshold int 404 405 // Compression defines the per-block compression to use. 406 // 407 // The default value (DefaultCompression) uses snappy compression. 408 Compression Compression 409 410 // FilterPolicy defines a filter algorithm (such as a Bloom filter) that can 411 // reduce disk reads for Get calls. 412 // 413 // One such implementation is bloom.FilterPolicy(10) from the pebble/bloom 414 // package. 415 // 416 // The default value means to use no filter. 417 FilterPolicy FilterPolicy 418 419 // FilterType defines whether an existing filter policy is applied at a 420 // block-level or table-level. Block-level filters use less memory to create, 421 // but are slower to access as a check for the key in the index must first be 422 // performed to locate the filter block. A table-level filter will require 423 // memory proportional to the number of keys in an sstable to create, but 424 // avoids the index lookup when determining if a key is present. Table-level 425 // filters should be preferred except under constrained memory situations. 426 FilterType FilterType 427 428 // IndexBlockSize is the target uncompressed size in bytes of each index 429 // block. When the index block size is larger than this target, two-level 430 // indexes are automatically enabled. Setting this option to a large value 431 // (such as math.MaxInt32) disables the automatic creation of two-level 432 // indexes. 433 // 434 // The default value is the value of BlockSize. 435 IndexBlockSize int 436 437 // The target file size for the level. 438 TargetFileSize int64 439 } 440 441 // EnsureDefaults ensures that the default values for all of the options have 442 // been initialized. It is valid to call EnsureDefaults on a nil receiver. A 443 // non-nil result will always be returned. 444 func (o *LevelOptions) EnsureDefaults() *LevelOptions { 445 if o == nil { 446 o = &LevelOptions{} 447 } 448 if o.BlockRestartInterval <= 0 { 449 o.BlockRestartInterval = base.DefaultBlockRestartInterval 450 } 451 if o.BlockSize <= 0 { 452 o.BlockSize = base.DefaultBlockSize 453 } else if o.BlockSize > sstable.MaximumBlockSize { 454 panic(errors.Errorf("BlockSize %d exceeds MaximumBlockSize", o.BlockSize)) 455 } 456 if o.BlockSizeThreshold <= 0 { 457 o.BlockSizeThreshold = base.DefaultBlockSizeThreshold 458 } 459 if o.Compression <= DefaultCompression || o.Compression >= sstable.NCompression { 460 o.Compression = SnappyCompression 461 } 462 if o.IndexBlockSize <= 0 { 463 o.IndexBlockSize = o.BlockSize 464 } 465 if o.TargetFileSize <= 0 { 466 o.TargetFileSize = 2 << 20 // 2 MB 467 } 468 return o 469 } 470 471 // Options holds the optional parameters for configuring pebble. These options 472 // apply to the DB at large; per-query options are defined by the IterOptions 473 // and WriteOptions types. 474 type Options struct { 475 // Sync sstables periodically in order to smooth out writes to disk. This 476 // option does not provide any persistency guarantee, but is used to avoid 477 // latency spikes if the OS automatically decides to write out a large chunk 478 // of dirty filesystem buffers. This option only controls SSTable syncs; WAL 479 // syncs are controlled by WALBytesPerSync. 480 // 481 // The default value is 512KB. 482 BytesPerSync int 483 484 // Cache is used to cache uncompressed blocks from sstables. 485 // 486 // The default cache size is 8 MB. 487 Cache *cache.Cache 488 489 // Cleaner cleans obsolete files. 490 // 491 // The default cleaner uses the DeleteCleaner. 492 Cleaner Cleaner 493 494 // Comparer defines a total ordering over the space of []byte keys: a 'less 495 // than' relationship. The same comparison algorithm must be used for reads 496 // and writes over the lifetime of the DB. 497 // 498 // The default value uses the same ordering as bytes.Compare. 499 Comparer *Comparer 500 501 // DebugCheck is invoked, if non-nil, whenever a new version is being 502 // installed. Typically, this is set to pebble.DebugCheckLevels in tests 503 // or tools only, to check invariants over all the data in the database. 504 DebugCheck func(*DB) error 505 506 // Disable the write-ahead log (WAL). Disabling the write-ahead log prohibits 507 // crash recovery, but can improve performance if crash recovery is not 508 // needed (e.g. when only temporary state is being stored in the database). 509 // 510 // TODO(peter): untested 511 DisableWAL bool 512 513 // ErrorIfExists causes an error on Open if the database already exists. 514 // The error can be checked with errors.Is(err, ErrDBAlreadyExists). 515 // 516 // The default value is false. 517 ErrorIfExists bool 518 519 // ErrorIfNotExists causes an error on Open if the database does not already 520 // exist. The error can be checked with errors.Is(err, ErrDBDoesNotExist). 521 // 522 // The default value is false which will cause a database to be created if it 523 // does not already exist. 524 ErrorIfNotExists bool 525 526 // ErrorIfNotPristine causes an error on Open if the database already exists 527 // and any operations have been performed on the database. The error can be 528 // checked with errors.Is(err, ErrDBNotPristine). 529 // 530 // Note that a database that contained keys that were all subsequently deleted 531 // may or may not trigger the error. Currently, we check if there are any live 532 // SSTs or log records to replay. 533 ErrorIfNotPristine bool 534 535 // EventListener provides hooks to listening to significant DB events such as 536 // flushes, compactions, and table deletion. 537 EventListener *EventListener 538 539 // Experimental contains experimental options which are off by default. 540 // These options are temporary and will eventually either be deleted, moved 541 // out of the experimental group, or made the non-adjustable default. These 542 // options may change at any time, so do not rely on them. 543 Experimental struct { 544 // The threshold of L0 read-amplification at which compaction concurrency 545 // is enabled (if CompactionDebtConcurrency was not already exceeded). 546 // Every multiple of this value enables another concurrent 547 // compaction up to MaxConcurrentCompactions. 548 L0CompactionConcurrency int 549 550 // CompactionDebtConcurrency controls the threshold of compaction debt 551 // at which additional compaction concurrency slots are added. For every 552 // multiple of this value in compaction debt bytes, an additional 553 // concurrent compaction is added. This works "on top" of 554 // L0CompactionConcurrency, so the higher of the count of compaction 555 // concurrency slots as determined by the two options is chosen. 556 CompactionDebtConcurrency uint64 557 558 // IngestSplit, if it returns true, allows for ingest-time splitting of 559 // existing sstables into two virtual sstables to allow ingestion sstables to 560 // slot into a lower level than they otherwise would have. 561 IngestSplit func() bool 562 563 // ReadCompactionRate controls the frequency of read triggered 564 // compactions by adjusting `AllowedSeeks` in manifest.FileMetadata: 565 // 566 // AllowedSeeks = FileSize / ReadCompactionRate 567 // 568 // From LevelDB: 569 // ``` 570 // We arrange to automatically compact this file after 571 // a certain number of seeks. Let's assume: 572 // (1) One seek costs 10ms 573 // (2) Writing or reading 1MB costs 10ms (100MB/s) 574 // (3) A compaction of 1MB does 25MB of IO: 575 // 1MB read from this level 576 // 10-12MB read from next level (boundaries may be misaligned) 577 // 10-12MB written to next level 578 // This implies that 25 seeks cost the same as the compaction 579 // of 1MB of data. I.e., one seek costs approximately the 580 // same as the compaction of 40KB of data. We are a little 581 // conservative and allow approximately one seek for every 16KB 582 // of data before triggering a compaction. 583 // ``` 584 ReadCompactionRate int64 585 586 // ReadSamplingMultiplier is a multiplier for the readSamplingPeriod in 587 // iterator.maybeSampleRead() to control the frequency of read sampling 588 // to trigger a read triggered compaction. A value of -1 prevents sampling 589 // and disables read triggered compactions. The default is 1 << 4. which 590 // gets multiplied with a constant of 1 << 16 to yield 1 << 20 (1MB). 591 ReadSamplingMultiplier int64 592 593 // TableCacheShards is the number of shards per table cache. 594 // Reducing the value can reduce the number of idle goroutines per DB 595 // instance which can be useful in scenarios with a lot of DB instances 596 // and a large number of CPUs, but doing so can lead to higher contention 597 // in the table cache and reduced performance. 598 // 599 // The default value is the number of logical CPUs, which can be 600 // limited by runtime.GOMAXPROCS. 601 TableCacheShards int 602 603 // KeyValidationFunc is a function to validate a user key in an SSTable. 604 // 605 // Currently, this function is used to validate the smallest and largest 606 // keys in an SSTable undergoing compaction. In this case, returning an 607 // error from the validation function will result in a panic at runtime, 608 // given that there is rarely any way of recovering from malformed keys 609 // present in compacted files. By default, validation is not performed. 610 // 611 // Additional use-cases may be added in the future. 612 // 613 // NOTE: callers should take care to not mutate the key being validated. 614 KeyValidationFunc func(userKey []byte) error 615 616 // ValidateOnIngest schedules validation of sstables after they have 617 // been ingested. 618 // 619 // By default, this value is false. 620 ValidateOnIngest bool 621 622 // LevelMultiplier configures the size multiplier used to determine the 623 // desired size of each level of the LSM. Defaults to 10. 624 LevelMultiplier int 625 626 // MultiLevelCompactionHeuristic determines whether to add an additional 627 // level to a conventional two level compaction. If nil, a multilevel 628 // compaction will never get triggered. 629 MultiLevelCompactionHeuristic MultiLevelHeuristic 630 631 // MaxWriterConcurrency is used to indicate the maximum number of 632 // compression workers the compression queue is allowed to use. If 633 // MaxWriterConcurrency > 0, then the Writer will use parallelism, to 634 // compress and write blocks to disk. Otherwise, the writer will 635 // compress and write blocks to disk synchronously. 636 MaxWriterConcurrency int 637 638 // ForceWriterParallelism is used to force parallelism in the sstable 639 // Writer for the metamorphic tests. Even with the MaxWriterConcurrency 640 // option set, we only enable parallelism in the sstable Writer if there 641 // is enough CPU available, and this option bypasses that. 642 ForceWriterParallelism bool 643 644 // CPUWorkPermissionGranter should be set if Pebble should be given the 645 // ability to optionally schedule additional CPU. See the documentation 646 // for CPUWorkPermissionGranter for more details. 647 CPUWorkPermissionGranter CPUWorkPermissionGranter 648 649 // EnableValueBlocks is used to decide whether to enable writing 650 // TableFormatPebblev3 sstables. This setting is only respected by a 651 // specific subset of format major versions: FormatSSTableValueBlocks, 652 // FormatFlushableIngest and FormatPrePebblev1MarkedCompacted. In lower 653 // format major versions, value blocks are never enabled. In higher 654 // format major versions, value blocks are always enabled. 655 EnableValueBlocks func() bool 656 657 // ShortAttributeExtractor is used iff EnableValueBlocks() returns true 658 // (else ignored). If non-nil, a ShortAttribute can be extracted from the 659 // value and stored with the key, when the value is stored elsewhere. 660 ShortAttributeExtractor ShortAttributeExtractor 661 662 // RequiredInPlaceValueBound specifies an optional span of user key 663 // prefixes that are not-MVCC, but have a suffix. For these the values 664 // must be stored with the key, since the concept of "older versions" is 665 // not defined. It is also useful for statically known exclusions to value 666 // separation. In CockroachDB, this will be used for the lock table key 667 // space that has non-empty suffixes, but those locks don't represent 668 // actual MVCC versions (the suffix ordering is arbitrary). We will also 669 // need to add support for dynamically configured exclusions (we want the 670 // default to be to allow Pebble to decide whether to separate the value 671 // or not, hence this is structured as exclusions), for example, for users 672 // of CockroachDB to dynamically exclude certain tables. 673 // 674 // Any change in exclusion behavior takes effect only on future written 675 // sstables, and does not start rewriting existing sstables. 676 // 677 // Even ignoring changes in this setting, exclusions are interpreted as a 678 // guidance by Pebble, and not necessarily honored. Specifically, user 679 // keys with multiple Pebble-versions *may* have the older versions stored 680 // in value blocks. 681 RequiredInPlaceValueBound UserKeyPrefixBound 682 683 // DisableIngestAsFlushable disables lazy ingestion of sstables through 684 // a WAL write and memtable rotation. Only effectual if the the format 685 // major version is at least `FormatFlushableIngest`. 686 DisableIngestAsFlushable func() bool 687 688 // RemoteStorage enables use of remote storage (e.g. S3) for storing 689 // sstables. Setting this option enables use of CreateOnShared option and 690 // allows ingestion of external files. 691 RemoteStorage remote.StorageFactory 692 693 // If CreateOnShared is non-zero, new sstables are created on remote storage 694 // (using CreateOnSharedLocator and with the appropriate 695 // CreateOnSharedStrategy). These sstables can be shared between different 696 // Pebble instances; the lifecycle of such objects is managed by the 697 // remote.Storage constructed by options.RemoteStorage. 698 // 699 // Can only be used when RemoteStorage is set (and recognizes 700 // CreateOnSharedLocator). 701 CreateOnShared remote.CreateOnSharedStrategy 702 CreateOnSharedLocator remote.Locator 703 704 // CacheSizeBytesBytes is the size of the on-disk block cache for objects 705 // on shared storage in bytes. If it is 0, no cache is used. 706 SecondaryCacheSizeBytes int64 707 708 // IneffectualPointDeleteCallback is called in compactions/flushes if any 709 // single delete is being elided without deleting a point set/merge. 710 IneffectualSingleDeleteCallback func(userKey []byte) 711 712 // SingleDeleteInvariantViolationCallback is called in compactions/flushes if any 713 // single delete has consumed a Set/Merge, and there is another immediately older 714 // Set/SetWithDelete/Merge. The user of Pebble has violated the invariant under 715 // which SingleDelete can be used correctly. 716 // 717 // Consider the sequence SingleDelete#3, Set#2, Set#1. There are three 718 // ways some of these keys can first meet in a compaction. 719 // 720 // - All 3 keys in the same compaction: this callback will detect the 721 // violation. 722 // 723 // - SingleDelete#3, Set#2 meet in a compaction first: Both keys will 724 // disappear. The violation will not be detected, and the DB will have 725 // Set#1 which is likely incorrect (from the user's perspective). 726 // 727 // - Set#2, Set#1 meet in a compaction first: The output will be Set#2, 728 // which will later be consumed by SingleDelete#3. The violation will 729 // not be detected and the DB will be correct. 730 SingleDeleteInvariantViolationCallback func(userKey []byte) 731 } 732 733 // Filters is a map from filter policy name to filter policy. It is used for 734 // debugging tools which may be used on multiple databases configured with 735 // different filter policies. It is not necessary to populate this filters 736 // map during normal usage of a DB. 737 Filters map[string]FilterPolicy 738 739 // FlushDelayDeleteRange configures how long the database should wait before 740 // forcing a flush of a memtable that contains a range deletion. Disk space 741 // cannot be reclaimed until the range deletion is flushed. No automatic 742 // flush occurs if zero. 743 FlushDelayDeleteRange time.Duration 744 745 // FlushDelayRangeKey configures how long the database should wait before 746 // forcing a flush of a memtable that contains a range key. Range keys in 747 // the memtable prevent lazy combined iteration, so it's desirable to flush 748 // range keys promptly. No automatic flush occurs if zero. 749 FlushDelayRangeKey time.Duration 750 751 // FlushSplitBytes denotes the target number of bytes per sublevel in 752 // each flush split interval (i.e. range between two flush split keys) 753 // in L0 sstables. When set to zero, only a single sstable is generated 754 // by each flush. When set to a non-zero value, flushes are split at 755 // points to meet L0's TargetFileSize, any grandparent-related overlap 756 // options, and at boundary keys of L0 flush split intervals (which are 757 // targeted to contain around FlushSplitBytes bytes in each sublevel 758 // between pairs of boundary keys). Splitting sstables during flush 759 // allows increased compaction flexibility and concurrency when those 760 // tables are compacted to lower levels. 761 FlushSplitBytes int64 762 763 // FormatMajorVersion sets the format of on-disk files. It is 764 // recommended to set the format major version to an explicit 765 // version, as the default may change over time. 766 // 767 // At Open if the existing database is formatted using a later 768 // format major version that is known to this version of Pebble, 769 // Pebble will continue to use the later format major version. If 770 // the existing database's version is unknown, the caller may use 771 // FormatMostCompatible and will be able to open the database 772 // regardless of its actual version. 773 // 774 // If the existing database is formatted using a format major 775 // version earlier than the one specified, Open will automatically 776 // ratchet the database to the specified format major version. 777 FormatMajorVersion FormatMajorVersion 778 779 // FS provides the interface for persistent file storage. 780 // 781 // The default value uses the underlying operating system's file system. 782 FS vfs.FS 783 784 // Lock, if set, must be a database lock acquired through LockDirectory for 785 // the same directory passed to Open. If provided, Open will skip locking 786 // the directory. Closing the database will not release the lock, and it's 787 // the responsibility of the caller to release the lock after closing the 788 // database. 789 // 790 // Open will enforce that the Lock passed locks the same directory passed to 791 // Open. Concurrent calls to Open using the same Lock are detected and 792 // prohibited. 793 Lock *Lock 794 795 // The count of L0 files necessary to trigger an L0 compaction. 796 L0CompactionFileThreshold int 797 798 // The amount of L0 read-amplification necessary to trigger an L0 compaction. 799 L0CompactionThreshold int 800 801 // Hard limit on L0 read-amplification, computed as the number of L0 802 // sublevels. Writes are stopped when this threshold is reached. 803 L0StopWritesThreshold int 804 805 // The maximum number of bytes for LBase. The base level is the level which 806 // L0 is compacted into. The base level is determined dynamically based on 807 // the existing data in the LSM. The maximum number of bytes for other levels 808 // is computed dynamically based on the base level's maximum size. When the 809 // maximum number of bytes for a level is exceeded, compaction is requested. 810 LBaseMaxBytes int64 811 812 // Per-level options. Options for at least one level must be specified. The 813 // options for the last level are used for all subsequent levels. 814 Levels []LevelOptions 815 816 // LoggerAndTracer will be used, if non-nil, else Logger will be used and 817 // tracing will be a noop. 818 819 // Logger used to write log messages. 820 // 821 // The default logger uses the Go standard library log package. 822 Logger Logger 823 // LoggerAndTracer is used for writing log messages and traces. 824 LoggerAndTracer LoggerAndTracer 825 826 // MaxManifestFileSize is the maximum size the MANIFEST file is allowed to 827 // become. When the MANIFEST exceeds this size it is rolled over and a new 828 // MANIFEST is created. 829 MaxManifestFileSize int64 830 831 // MaxOpenFiles is a soft limit on the number of open files that can be 832 // used by the DB. 833 // 834 // The default value is 1000. 835 MaxOpenFiles int 836 837 // The size of a MemTable in steady state. The actual MemTable size starts at 838 // min(256KB, MemTableSize) and doubles for each subsequent MemTable up to 839 // MemTableSize. This reduces the memory pressure caused by MemTables for 840 // short lived (test) DB instances. Note that more than one MemTable can be 841 // in existence since flushing a MemTable involves creating a new one and 842 // writing the contents of the old one in the 843 // background. MemTableStopWritesThreshold places a hard limit on the size of 844 // the queued MemTables. 845 // 846 // The default value is 4MB. 847 MemTableSize uint64 848 849 // Hard limit on the number of queued of MemTables. Writes are stopped when 850 // the sum of the queued memtable sizes exceeds: 851 // MemTableStopWritesThreshold * MemTableSize. 852 // 853 // This value should be at least 2 or writes will stop whenever a MemTable is 854 // being flushed. 855 // 856 // The default value is 2. 857 MemTableStopWritesThreshold int 858 859 // Merger defines the associative merge operation to use for merging values 860 // written with {Batch,DB}.Merge. 861 // 862 // The default merger concatenates values. 863 Merger *Merger 864 865 // MaxConcurrentCompactions specifies the maximum number of concurrent 866 // compactions. The default is 1. Concurrent compactions are performed 867 // - when L0 read-amplification passes the L0CompactionConcurrency threshold 868 // - for automatic background compactions 869 // - when a manual compaction for a level is split and parallelized 870 // MaxConcurrentCompactions must be greater than 0. 871 MaxConcurrentCompactions func() int 872 873 // DisableAutomaticCompactions dictates whether automatic compactions are 874 // scheduled or not. The default is false (enabled). This option is only used 875 // externally when running a manual compaction, and internally for tests. 876 DisableAutomaticCompactions bool 877 878 // NoSyncOnClose decides whether the Pebble instance will enforce a 879 // close-time synchronization (e.g., fdatasync() or sync_file_range()) 880 // on files it writes to. Setting this to true removes the guarantee for a 881 // sync on close. Some implementations can still issue a non-blocking sync. 882 NoSyncOnClose bool 883 884 // NumPrevManifest is the number of non-current or older manifests which 885 // we want to keep around for debugging purposes. By default, we're going 886 // to keep one older manifest. 887 NumPrevManifest int 888 889 // ReadOnly indicates that the DB should be opened in read-only mode. Writes 890 // to the DB will return an error, background compactions are disabled, and 891 // the flush that normally occurs after replaying the WAL at startup is 892 // disabled. 893 ReadOnly bool 894 895 // TableCache is an initialized TableCache which should be set as an 896 // option if the DB needs to be initialized with a pre-existing table cache. 897 // If TableCache is nil, then a table cache which is unique to the DB instance 898 // is created. TableCache can be shared between db instances by setting it here. 899 // The TableCache set here must use the same underlying cache as Options.Cache 900 // and pebble will panic otherwise. 901 TableCache *TableCache 902 903 // TablePropertyCollectors is a list of TablePropertyCollector creation 904 // functions. A new TablePropertyCollector is created for each sstable built 905 // and lives for the lifetime of the table. 906 TablePropertyCollectors []func() TablePropertyCollector 907 908 // BlockPropertyCollectors is a list of BlockPropertyCollector creation 909 // functions. A new BlockPropertyCollector is created for each sstable 910 // built and lives for the lifetime of writing that table. 911 BlockPropertyCollectors []func() BlockPropertyCollector 912 913 // WALBytesPerSync sets the number of bytes to write to a WAL before calling 914 // Sync on it in the background. Just like with BytesPerSync above, this 915 // helps smooth out disk write latencies, and avoids cases where the OS 916 // writes a lot of buffered data to disk at once. However, this is less 917 // necessary with WALs, as many write operations already pass in 918 // Sync = true. 919 // 920 // The default value is 0, i.e. no background syncing. This matches the 921 // default behaviour in RocksDB. 922 WALBytesPerSync int 923 924 // WALDir specifies the directory to store write-ahead logs (WALs) in. If 925 // empty (the default), WALs will be stored in the same directory as sstables 926 // (i.e. the directory passed to pebble.Open). 927 WALDir string 928 929 // WALMinSyncInterval is the minimum duration between syncs of the WAL. If 930 // WAL syncs are requested faster than this interval, they will be 931 // artificially delayed. Introducing a small artificial delay (500us) between 932 // WAL syncs can allow more operations to arrive and reduce IO operations 933 // while having a minimal impact on throughput. This option is supplied as a 934 // closure in order to allow the value to be changed dynamically. The default 935 // value is 0. 936 // 937 // TODO(peter): rather than a closure, should there be another mechanism for 938 // changing options dynamically? 939 WALMinSyncInterval func() time.Duration 940 941 // TargetByteDeletionRate is the rate (in bytes per second) at which sstable file 942 // deletions are limited to (under normal circumstances). 943 // 944 // Deletion pacing is used to slow down deletions when compactions finish up 945 // or readers close and newly-obsolete files need cleaning up. Deleting lots 946 // of files at once can cause disk latency to go up on some SSDs, which this 947 // functionality guards against. 948 // 949 // This value is only a best-effort target; the effective rate can be 950 // higher if deletions are falling behind or disk space is running low. 951 // 952 // Setting this to 0 disables deletion pacing, which is also the default. 953 TargetByteDeletionRate int 954 955 // private options are only used by internal tests or are used internally 956 // for facilitating upgrade paths of unconfigurable functionality. 957 private struct { 958 // strictWALTail configures whether or not a database's WALs created 959 // prior to the most recent one should be interpreted strictly, 960 // requiring a clean EOF. RocksDB 6.2.1 and the version of Pebble 961 // included in CockroachDB 20.1 do not guarantee that closed WALs end 962 // cleanly. If this option is set within an OPTIONS file, Pebble 963 // interprets previous WALs strictly, requiring a clean EOF. 964 // Otherwise, it interprets them permissively in the same manner as 965 // RocksDB 6.2.1. 966 strictWALTail bool 967 968 // disableDeleteOnlyCompactions prevents the scheduling of delete-only 969 // compactions that drop sstables wholy covered by range tombstones or 970 // range key tombstones. 971 disableDeleteOnlyCompactions bool 972 973 // disableElisionOnlyCompactions prevents the scheduling of elision-only 974 // compactions that rewrite sstables in place in order to elide obsolete 975 // keys. 976 disableElisionOnlyCompactions bool 977 978 // disableLazyCombinedIteration is a private option used by the 979 // metamorphic tests to test equivalence between lazy-combined iteration 980 // and constructing the range-key iterator upfront. It's a private 981 // option to avoid littering the public interface with options that we 982 // do not want to allow users to actually configure. 983 disableLazyCombinedIteration bool 984 985 // A private option to disable stats collection. 986 disableTableStats bool 987 988 // testingAlwaysWaitForCleanup is set by some tests to force waiting for 989 // obsolete file deletion (to make events deterministic). 990 testingAlwaysWaitForCleanup bool 991 992 // fsCloser holds a closer that should be invoked after a DB using these 993 // Options is closed. This is used to automatically stop the 994 // long-running goroutine associated with the disk-health-checking FS. 995 // See the initialization of FS in EnsureDefaults. Note that care has 996 // been taken to ensure that it is still safe to continue using the FS 997 // after this closer has been invoked. However, if write operations 998 // against the FS are made after the DB is closed, the FS may leak a 999 // goroutine indefinitely. 1000 fsCloser io.Closer 1001 1002 // efosAlwaysCreatesIterators is set by some tests to force 1003 // EventuallyFileOnlySnapshots to always create iterators, even after a 1004 // conflicting excise. 1005 efosAlwaysCreatesIterators bool 1006 } 1007 } 1008 1009 // DebugCheckLevels calls CheckLevels on the provided database. 1010 // It may be set in the DebugCheck field of Options to check 1011 // level invariants whenever a new version is installed. 1012 func DebugCheckLevels(db *DB) error { 1013 return db.CheckLevels(nil) 1014 } 1015 1016 // EnsureDefaults ensures that the default values for all options are set if a 1017 // valid value was not already specified. Returns the new options. 1018 func (o *Options) EnsureDefaults() *Options { 1019 if o == nil { 1020 o = &Options{} 1021 } 1022 if o.BytesPerSync <= 0 { 1023 o.BytesPerSync = 512 << 10 // 512 KB 1024 } 1025 if o.Cleaner == nil { 1026 o.Cleaner = DeleteCleaner{} 1027 } 1028 if o.Comparer == nil { 1029 o.Comparer = DefaultComparer 1030 } 1031 if o.Experimental.DisableIngestAsFlushable == nil { 1032 o.Experimental.DisableIngestAsFlushable = func() bool { return false } 1033 } 1034 if o.Experimental.L0CompactionConcurrency <= 0 { 1035 o.Experimental.L0CompactionConcurrency = 10 1036 } 1037 if o.Experimental.CompactionDebtConcurrency <= 0 { 1038 o.Experimental.CompactionDebtConcurrency = 1 << 30 // 1 GB 1039 } 1040 if o.Experimental.KeyValidationFunc == nil { 1041 o.Experimental.KeyValidationFunc = func([]byte) error { return nil } 1042 } 1043 if o.L0CompactionThreshold <= 0 { 1044 o.L0CompactionThreshold = 4 1045 } 1046 if o.L0CompactionFileThreshold <= 0 { 1047 // Some justification for the default of 500: 1048 // Why not smaller?: 1049 // - The default target file size for L0 is 2MB, so 500 files is <= 1GB 1050 // of data. At observed compaction speeds of > 20MB/s, L0 can be 1051 // cleared of all files in < 1min, so this backlog is not huge. 1052 // - 500 files is low overhead for instantiating L0 sublevels from 1053 // scratch. 1054 // - Lower values were observed to cause excessive and inefficient 1055 // compactions out of L0 in a TPCC import benchmark. 1056 // Why not larger?: 1057 // - More than 1min to compact everything out of L0. 1058 // - CockroachDB's admission control system uses a threshold of 1000 1059 // files to start throttling writes to Pebble. Using 500 here gives 1060 // us headroom between when Pebble should start compacting L0 and 1061 // when the admission control threshold is reached. 1062 // 1063 // We can revisit this default in the future based on better 1064 // experimental understanding. 1065 // 1066 // TODO(jackson): Experiment with slightly lower thresholds [or higher 1067 // admission control thresholds] to see whether a higher L0 score at the 1068 // threshold (currently 2.0) is necessary for some workloads to avoid 1069 // starving L0 in favor of lower-level compactions. 1070 o.L0CompactionFileThreshold = 500 1071 } 1072 if o.L0StopWritesThreshold <= 0 { 1073 o.L0StopWritesThreshold = 12 1074 } 1075 if o.LBaseMaxBytes <= 0 { 1076 o.LBaseMaxBytes = 64 << 20 // 64 MB 1077 } 1078 if o.Levels == nil { 1079 o.Levels = make([]LevelOptions, 1) 1080 for i := range o.Levels { 1081 if i > 0 { 1082 l := &o.Levels[i] 1083 if l.TargetFileSize <= 0 { 1084 l.TargetFileSize = o.Levels[i-1].TargetFileSize * 2 1085 } 1086 } 1087 o.Levels[i].EnsureDefaults() 1088 } 1089 } else { 1090 for i := range o.Levels { 1091 o.Levels[i].EnsureDefaults() 1092 } 1093 } 1094 if o.Logger == nil { 1095 o.Logger = DefaultLogger 1096 } 1097 if o.EventListener == nil { 1098 o.EventListener = &EventListener{} 1099 } 1100 o.EventListener.EnsureDefaults(o.Logger) 1101 if o.MaxManifestFileSize == 0 { 1102 o.MaxManifestFileSize = 128 << 20 // 128 MB 1103 } 1104 if o.MaxOpenFiles == 0 { 1105 o.MaxOpenFiles = 1000 1106 } 1107 if o.MemTableSize <= 0 { 1108 o.MemTableSize = 4 << 20 // 4 MB 1109 } 1110 if o.MemTableStopWritesThreshold <= 0 { 1111 o.MemTableStopWritesThreshold = 2 1112 } 1113 if o.Merger == nil { 1114 o.Merger = DefaultMerger 1115 } 1116 o.private.strictWALTail = true 1117 if o.MaxConcurrentCompactions == nil { 1118 o.MaxConcurrentCompactions = func() int { return 1 } 1119 } 1120 if o.NumPrevManifest <= 0 { 1121 o.NumPrevManifest = 1 1122 } 1123 1124 if o.FormatMajorVersion == FormatDefault { 1125 o.FormatMajorVersion = FormatMostCompatible 1126 } 1127 1128 if o.FS == nil { 1129 o.WithFSDefaults() 1130 } 1131 if o.FlushSplitBytes <= 0 { 1132 o.FlushSplitBytes = 2 * o.Levels[0].TargetFileSize 1133 } 1134 if o.Experimental.LevelMultiplier <= 0 { 1135 o.Experimental.LevelMultiplier = defaultLevelMultiplier 1136 } 1137 if o.Experimental.ReadCompactionRate == 0 { 1138 o.Experimental.ReadCompactionRate = 16000 1139 } 1140 if o.Experimental.ReadSamplingMultiplier == 0 { 1141 o.Experimental.ReadSamplingMultiplier = 1 << 4 1142 } 1143 if o.Experimental.TableCacheShards <= 0 { 1144 o.Experimental.TableCacheShards = runtime.GOMAXPROCS(0) 1145 } 1146 if o.Experimental.CPUWorkPermissionGranter == nil { 1147 o.Experimental.CPUWorkPermissionGranter = defaultCPUWorkGranter{} 1148 } 1149 if o.Experimental.MultiLevelCompactionHeuristic == nil { 1150 o.Experimental.MultiLevelCompactionHeuristic = WriteAmpHeuristic{} 1151 } 1152 1153 o.initMaps() 1154 return o 1155 } 1156 1157 // WithFSDefaults configures the Options to wrap the configured filesystem with 1158 // the default virtual file system middleware, like disk-health checking. 1159 func (o *Options) WithFSDefaults() *Options { 1160 if o.FS == nil { 1161 o.FS = vfs.Default 1162 } 1163 o.FS, o.private.fsCloser = vfs.WithDiskHealthChecks(o.FS, 5*time.Second, 1164 func(info vfs.DiskSlowInfo) { 1165 o.EventListener.DiskSlow(info) 1166 }) 1167 return o 1168 } 1169 1170 // AddEventListener adds the provided event listener to the Options, in addition 1171 // to any existing event listener. 1172 func (o *Options) AddEventListener(l EventListener) { 1173 if o.EventListener != nil { 1174 l = TeeEventListener(l, *o.EventListener) 1175 } 1176 o.EventListener = &l 1177 } 1178 1179 // TestingAlwaysCreateEFOSIterators is used to toggle a private option for 1180 // having EventuallyFileOnlySnapshots always create iterators. Meant to only 1181 // be used in tests. 1182 func (o *Options) TestingAlwaysCreateEFOSIterators(value bool) { 1183 o.private.efosAlwaysCreatesIterators = value 1184 } 1185 1186 func (o *Options) equal() Equal { 1187 if o.Comparer.Equal == nil { 1188 return bytes.Equal 1189 } 1190 return o.Comparer.Equal 1191 } 1192 1193 // initMaps initializes the Comparers, Filters, and Mergers maps. 1194 func (o *Options) initMaps() { 1195 for i := range o.Levels { 1196 l := &o.Levels[i] 1197 if l.FilterPolicy != nil { 1198 if o.Filters == nil { 1199 o.Filters = make(map[string]FilterPolicy) 1200 } 1201 name := l.FilterPolicy.Name() 1202 if _, ok := o.Filters[name]; !ok { 1203 o.Filters[name] = l.FilterPolicy 1204 } 1205 } 1206 } 1207 } 1208 1209 // Level returns the LevelOptions for the specified level. 1210 func (o *Options) Level(level int) LevelOptions { 1211 if level < len(o.Levels) { 1212 return o.Levels[level] 1213 } 1214 n := len(o.Levels) - 1 1215 l := o.Levels[n] 1216 for i := n; i < level; i++ { 1217 l.TargetFileSize *= 2 1218 } 1219 return l 1220 } 1221 1222 // Clone creates a shallow-copy of the supplied options. 1223 func (o *Options) Clone() *Options { 1224 n := &Options{} 1225 if o != nil { 1226 *n = *o 1227 } 1228 return n 1229 } 1230 1231 func filterPolicyName(p FilterPolicy) string { 1232 if p == nil { 1233 return "none" 1234 } 1235 return p.Name() 1236 } 1237 1238 func (o *Options) String() string { 1239 var buf bytes.Buffer 1240 1241 cacheSize := int64(cacheDefaultSize) 1242 if o.Cache != nil { 1243 cacheSize = o.Cache.MaxSize() 1244 } 1245 1246 fmt.Fprintf(&buf, "[Version]\n") 1247 fmt.Fprintf(&buf, " pebble_version=0.1\n") 1248 fmt.Fprintf(&buf, "\n") 1249 fmt.Fprintf(&buf, "[Options]\n") 1250 fmt.Fprintf(&buf, " bytes_per_sync=%d\n", o.BytesPerSync) 1251 fmt.Fprintf(&buf, " cache_size=%d\n", cacheSize) 1252 fmt.Fprintf(&buf, " cleaner=%s\n", o.Cleaner) 1253 fmt.Fprintf(&buf, " compaction_debt_concurrency=%d\n", o.Experimental.CompactionDebtConcurrency) 1254 fmt.Fprintf(&buf, " comparer=%s\n", o.Comparer.Name) 1255 fmt.Fprintf(&buf, " disable_wal=%t\n", o.DisableWAL) 1256 if o.Experimental.DisableIngestAsFlushable != nil && o.Experimental.DisableIngestAsFlushable() { 1257 fmt.Fprintf(&buf, " disable_ingest_as_flushable=%t\n", true) 1258 } 1259 fmt.Fprintf(&buf, " flush_delay_delete_range=%s\n", o.FlushDelayDeleteRange) 1260 fmt.Fprintf(&buf, " flush_delay_range_key=%s\n", o.FlushDelayRangeKey) 1261 fmt.Fprintf(&buf, " flush_split_bytes=%d\n", o.FlushSplitBytes) 1262 fmt.Fprintf(&buf, " format_major_version=%d\n", o.FormatMajorVersion) 1263 fmt.Fprintf(&buf, " l0_compaction_concurrency=%d\n", o.Experimental.L0CompactionConcurrency) 1264 fmt.Fprintf(&buf, " l0_compaction_file_threshold=%d\n", o.L0CompactionFileThreshold) 1265 fmt.Fprintf(&buf, " l0_compaction_threshold=%d\n", o.L0CompactionThreshold) 1266 fmt.Fprintf(&buf, " l0_stop_writes_threshold=%d\n", o.L0StopWritesThreshold) 1267 fmt.Fprintf(&buf, " lbase_max_bytes=%d\n", o.LBaseMaxBytes) 1268 if o.Experimental.LevelMultiplier != defaultLevelMultiplier { 1269 fmt.Fprintf(&buf, " level_multiplier=%d\n", o.Experimental.LevelMultiplier) 1270 } 1271 fmt.Fprintf(&buf, " max_concurrent_compactions=%d\n", o.MaxConcurrentCompactions()) 1272 fmt.Fprintf(&buf, " max_manifest_file_size=%d\n", o.MaxManifestFileSize) 1273 fmt.Fprintf(&buf, " max_open_files=%d\n", o.MaxOpenFiles) 1274 fmt.Fprintf(&buf, " mem_table_size=%d\n", o.MemTableSize) 1275 fmt.Fprintf(&buf, " mem_table_stop_writes_threshold=%d\n", o.MemTableStopWritesThreshold) 1276 fmt.Fprintf(&buf, " min_deletion_rate=%d\n", o.TargetByteDeletionRate) 1277 fmt.Fprintf(&buf, " merger=%s\n", o.Merger.Name) 1278 if o.Experimental.MultiLevelCompactionHeuristic != nil { 1279 fmt.Fprintf(&buf, " multilevel_compaction_heuristic=%s\n", o.Experimental.MultiLevelCompactionHeuristic.String()) 1280 } 1281 fmt.Fprintf(&buf, " read_compaction_rate=%d\n", o.Experimental.ReadCompactionRate) 1282 fmt.Fprintf(&buf, " read_sampling_multiplier=%d\n", o.Experimental.ReadSamplingMultiplier) 1283 fmt.Fprintf(&buf, " strict_wal_tail=%t\n", o.private.strictWALTail) 1284 fmt.Fprintf(&buf, " table_cache_shards=%d\n", o.Experimental.TableCacheShards) 1285 fmt.Fprintf(&buf, " table_property_collectors=[") 1286 for i := range o.TablePropertyCollectors { 1287 if i > 0 { 1288 fmt.Fprintf(&buf, ",") 1289 } 1290 // NB: This creates a new TablePropertyCollector, but Options.String() is 1291 // called rarely so the overhead of doing so is not consequential. 1292 fmt.Fprintf(&buf, "%s", o.TablePropertyCollectors[i]().Name()) 1293 } 1294 fmt.Fprintf(&buf, "]\n") 1295 fmt.Fprintf(&buf, " validate_on_ingest=%t\n", o.Experimental.ValidateOnIngest) 1296 fmt.Fprintf(&buf, " wal_dir=%s\n", o.WALDir) 1297 fmt.Fprintf(&buf, " wal_bytes_per_sync=%d\n", o.WALBytesPerSync) 1298 fmt.Fprintf(&buf, " max_writer_concurrency=%d\n", o.Experimental.MaxWriterConcurrency) 1299 fmt.Fprintf(&buf, " force_writer_parallelism=%t\n", o.Experimental.ForceWriterParallelism) 1300 fmt.Fprintf(&buf, " secondary_cache_size_bytes=%d\n", o.Experimental.SecondaryCacheSizeBytes) 1301 fmt.Fprintf(&buf, " create_on_shared=%d\n", o.Experimental.CreateOnShared) 1302 1303 // Private options. 1304 // 1305 // These options are only encoded if true, because we do not want them to 1306 // appear in production serialized Options files, since they're testing-only 1307 // options. They're only serialized when true, which still ensures that the 1308 // metamorphic tests may propagate them to subprocesses. 1309 if o.private.disableDeleteOnlyCompactions { 1310 fmt.Fprintln(&buf, " disable_delete_only_compactions=true") 1311 } 1312 if o.private.disableElisionOnlyCompactions { 1313 fmt.Fprintln(&buf, " disable_elision_only_compactions=true") 1314 } 1315 if o.private.disableLazyCombinedIteration { 1316 fmt.Fprintln(&buf, " disable_lazy_combined_iteration=true") 1317 } 1318 1319 for i := range o.Levels { 1320 l := &o.Levels[i] 1321 fmt.Fprintf(&buf, "\n") 1322 fmt.Fprintf(&buf, "[Level \"%d\"]\n", i) 1323 fmt.Fprintf(&buf, " block_restart_interval=%d\n", l.BlockRestartInterval) 1324 fmt.Fprintf(&buf, " block_size=%d\n", l.BlockSize) 1325 fmt.Fprintf(&buf, " block_size_threshold=%d\n", l.BlockSizeThreshold) 1326 fmt.Fprintf(&buf, " compression=%s\n", l.Compression) 1327 fmt.Fprintf(&buf, " filter_policy=%s\n", filterPolicyName(l.FilterPolicy)) 1328 fmt.Fprintf(&buf, " filter_type=%s\n", l.FilterType) 1329 fmt.Fprintf(&buf, " index_block_size=%d\n", l.IndexBlockSize) 1330 fmt.Fprintf(&buf, " target_file_size=%d\n", l.TargetFileSize) 1331 } 1332 1333 return buf.String() 1334 } 1335 1336 func parseOptions(s string, fn func(section, key, value string) error) error { 1337 var section string 1338 for _, line := range strings.Split(s, "\n") { 1339 line = strings.TrimSpace(line) 1340 if len(line) == 0 { 1341 // Skip blank lines. 1342 continue 1343 } 1344 if line[0] == ';' || line[0] == '#' { 1345 // Skip comments. 1346 continue 1347 } 1348 n := len(line) 1349 if line[0] == '[' && line[n-1] == ']' { 1350 // Parse section. 1351 section = line[1 : n-1] 1352 continue 1353 } 1354 1355 pos := strings.Index(line, "=") 1356 if pos < 0 { 1357 const maxLen = 50 1358 if len(line) > maxLen { 1359 line = line[:maxLen-3] + "..." 1360 } 1361 return base.CorruptionErrorf("invalid key=value syntax: %q", errors.Safe(line)) 1362 } 1363 1364 key := strings.TrimSpace(line[:pos]) 1365 value := strings.TrimSpace(line[pos+1:]) 1366 1367 // RocksDB uses a similar (INI-style) syntax for the OPTIONS file, but 1368 // different section names and keys. The "CFOptions ..." paths are the 1369 // RocksDB versions which we map to the Pebble paths. 1370 mappedSection := section 1371 if section == `CFOptions "default"` { 1372 mappedSection = "Options" 1373 switch key { 1374 case "comparator": 1375 key = "comparer" 1376 case "merge_operator": 1377 key = "merger" 1378 } 1379 } 1380 1381 if err := fn(mappedSection, key, value); err != nil { 1382 return err 1383 } 1384 } 1385 return nil 1386 } 1387 1388 // ParseHooks contains callbacks to create options fields which can have 1389 // user-defined implementations. 1390 type ParseHooks struct { 1391 NewCache func(size int64) *Cache 1392 NewCleaner func(name string) (Cleaner, error) 1393 NewComparer func(name string) (*Comparer, error) 1394 NewFilterPolicy func(name string) (FilterPolicy, error) 1395 NewMerger func(name string) (*Merger, error) 1396 SkipUnknown func(name, value string) bool 1397 } 1398 1399 // Parse parses the options from the specified string. Note that certain 1400 // options cannot be parsed into populated fields. For example, comparer and 1401 // merger. 1402 func (o *Options) Parse(s string, hooks *ParseHooks) error { 1403 return parseOptions(s, func(section, key, value string) error { 1404 // WARNING: DO NOT remove entries from the switches below because doing so 1405 // causes a key previously written to the OPTIONS file to be considered unknown, 1406 // a backwards incompatible change. Instead, leave in support for parsing the 1407 // key but simply don't parse the value. 1408 1409 switch { 1410 case section == "Version": 1411 switch key { 1412 case "pebble_version": 1413 default: 1414 if hooks != nil && hooks.SkipUnknown != nil && hooks.SkipUnknown(section+"."+key, value) { 1415 return nil 1416 } 1417 return errors.Errorf("pebble: unknown option: %s.%s", 1418 errors.Safe(section), errors.Safe(key)) 1419 } 1420 return nil 1421 1422 case section == "Options": 1423 var err error 1424 switch key { 1425 case "bytes_per_sync": 1426 o.BytesPerSync, err = strconv.Atoi(value) 1427 case "cache_size": 1428 var n int64 1429 n, err = strconv.ParseInt(value, 10, 64) 1430 if err == nil && hooks != nil && hooks.NewCache != nil { 1431 if o.Cache != nil { 1432 o.Cache.Unref() 1433 } 1434 o.Cache = hooks.NewCache(n) 1435 } 1436 // We avoid calling cache.New in parsing because it makes it 1437 // too easy to leak a cache. 1438 case "cleaner": 1439 switch value { 1440 case "archive": 1441 o.Cleaner = ArchiveCleaner{} 1442 case "delete": 1443 o.Cleaner = DeleteCleaner{} 1444 default: 1445 if hooks != nil && hooks.NewCleaner != nil { 1446 o.Cleaner, err = hooks.NewCleaner(value) 1447 } 1448 } 1449 case "comparer": 1450 switch value { 1451 case "leveldb.BytewiseComparator": 1452 o.Comparer = DefaultComparer 1453 default: 1454 if hooks != nil && hooks.NewComparer != nil { 1455 o.Comparer, err = hooks.NewComparer(value) 1456 } 1457 } 1458 case "compaction_debt_concurrency": 1459 o.Experimental.CompactionDebtConcurrency, err = strconv.ParseUint(value, 10, 64) 1460 case "delete_range_flush_delay": 1461 // NB: This is a deprecated serialization of the 1462 // `flush_delay_delete_range`. 1463 o.FlushDelayDeleteRange, err = time.ParseDuration(value) 1464 case "disable_delete_only_compactions": 1465 o.private.disableDeleteOnlyCompactions, err = strconv.ParseBool(value) 1466 case "disable_elision_only_compactions": 1467 o.private.disableElisionOnlyCompactions, err = strconv.ParseBool(value) 1468 case "disable_ingest_as_flushable": 1469 var v bool 1470 v, err = strconv.ParseBool(value) 1471 if err == nil { 1472 o.Experimental.DisableIngestAsFlushable = func() bool { return v } 1473 } 1474 case "disable_lazy_combined_iteration": 1475 o.private.disableLazyCombinedIteration, err = strconv.ParseBool(value) 1476 case "disable_wal": 1477 o.DisableWAL, err = strconv.ParseBool(value) 1478 case "flush_delay_delete_range": 1479 o.FlushDelayDeleteRange, err = time.ParseDuration(value) 1480 case "flush_delay_range_key": 1481 o.FlushDelayRangeKey, err = time.ParseDuration(value) 1482 case "flush_split_bytes": 1483 o.FlushSplitBytes, err = strconv.ParseInt(value, 10, 64) 1484 case "format_major_version": 1485 // NB: The version written here may be stale. Open does 1486 // not use the format major version encoded in the 1487 // OPTIONS file other than to validate that the encoded 1488 // version is valid right here. 1489 var v uint64 1490 v, err = strconv.ParseUint(value, 10, 64) 1491 if vers := FormatMajorVersion(v); vers > internalFormatNewest || vers == FormatDefault { 1492 err = errors.Newf("unknown format major version %d", o.FormatMajorVersion) 1493 } 1494 if err == nil { 1495 o.FormatMajorVersion = FormatMajorVersion(v) 1496 } 1497 case "l0_compaction_concurrency": 1498 o.Experimental.L0CompactionConcurrency, err = strconv.Atoi(value) 1499 case "l0_compaction_file_threshold": 1500 o.L0CompactionFileThreshold, err = strconv.Atoi(value) 1501 case "l0_compaction_threshold": 1502 o.L0CompactionThreshold, err = strconv.Atoi(value) 1503 case "l0_stop_writes_threshold": 1504 o.L0StopWritesThreshold, err = strconv.Atoi(value) 1505 case "l0_sublevel_compactions": 1506 // Do nothing; option existed in older versions of pebble. 1507 case "lbase_max_bytes": 1508 o.LBaseMaxBytes, err = strconv.ParseInt(value, 10, 64) 1509 case "level_multiplier": 1510 o.Experimental.LevelMultiplier, err = strconv.Atoi(value) 1511 case "max_concurrent_compactions": 1512 var concurrentCompactions int 1513 concurrentCompactions, err = strconv.Atoi(value) 1514 if concurrentCompactions <= 0 { 1515 err = errors.New("max_concurrent_compactions cannot be <= 0") 1516 } else { 1517 o.MaxConcurrentCompactions = func() int { return concurrentCompactions } 1518 } 1519 case "max_manifest_file_size": 1520 o.MaxManifestFileSize, err = strconv.ParseInt(value, 10, 64) 1521 case "max_open_files": 1522 o.MaxOpenFiles, err = strconv.Atoi(value) 1523 case "mem_table_size": 1524 o.MemTableSize, err = strconv.ParseUint(value, 10, 64) 1525 case "mem_table_stop_writes_threshold": 1526 o.MemTableStopWritesThreshold, err = strconv.Atoi(value) 1527 case "min_compaction_rate": 1528 // Do nothing; option existed in older versions of pebble, and 1529 // may be meaningful again eventually. 1530 case "min_deletion_rate": 1531 o.TargetByteDeletionRate, err = strconv.Atoi(value) 1532 case "min_flush_rate": 1533 // Do nothing; option existed in older versions of pebble, and 1534 // may be meaningful again eventually. 1535 case "multilevel_compaction_heuristic": 1536 switch { 1537 case value == "none": 1538 o.Experimental.MultiLevelCompactionHeuristic = NoMultiLevel{} 1539 case strings.HasPrefix(value, "wamp"): 1540 fields := strings.FieldsFunc(strings.TrimPrefix(value, "wamp"), func(r rune) bool { 1541 return unicode.IsSpace(r) || r == ',' || r == '(' || r == ')' 1542 }) 1543 if len(fields) != 2 { 1544 err = errors.Newf("require 2 arguments") 1545 } 1546 var h WriteAmpHeuristic 1547 if err == nil { 1548 h.AddPropensity, err = strconv.ParseFloat(fields[0], 64) 1549 } 1550 if err == nil { 1551 h.AllowL0, err = strconv.ParseBool(fields[1]) 1552 } 1553 if err == nil { 1554 o.Experimental.MultiLevelCompactionHeuristic = h 1555 } else { 1556 err = errors.Wrapf(err, "unexpected wamp heuristic arguments: %s", value) 1557 } 1558 default: 1559 err = errors.Newf("unrecognized multilevel compaction heuristic: %s", value) 1560 } 1561 case "point_tombstone_weight": 1562 // Do nothing; deprecated. 1563 case "strict_wal_tail": 1564 o.private.strictWALTail, err = strconv.ParseBool(value) 1565 case "merger": 1566 switch value { 1567 case "nullptr": 1568 o.Merger = nil 1569 case "pebble.concatenate": 1570 o.Merger = DefaultMerger 1571 default: 1572 if hooks != nil && hooks.NewMerger != nil { 1573 o.Merger, err = hooks.NewMerger(value) 1574 } 1575 } 1576 case "read_compaction_rate": 1577 o.Experimental.ReadCompactionRate, err = strconv.ParseInt(value, 10, 64) 1578 case "read_sampling_multiplier": 1579 o.Experimental.ReadSamplingMultiplier, err = strconv.ParseInt(value, 10, 64) 1580 case "table_cache_shards": 1581 o.Experimental.TableCacheShards, err = strconv.Atoi(value) 1582 case "table_format": 1583 switch value { 1584 case "leveldb": 1585 case "rocksdbv2": 1586 default: 1587 return errors.Errorf("pebble: unknown table format: %q", errors.Safe(value)) 1588 } 1589 case "table_property_collectors": 1590 // TODO(peter): set o.TablePropertyCollectors 1591 case "validate_on_ingest": 1592 o.Experimental.ValidateOnIngest, err = strconv.ParseBool(value) 1593 case "wal_dir": 1594 o.WALDir = value 1595 case "wal_bytes_per_sync": 1596 o.WALBytesPerSync, err = strconv.Atoi(value) 1597 case "max_writer_concurrency": 1598 o.Experimental.MaxWriterConcurrency, err = strconv.Atoi(value) 1599 case "force_writer_parallelism": 1600 o.Experimental.ForceWriterParallelism, err = strconv.ParseBool(value) 1601 case "secondary_cache_size_bytes": 1602 o.Experimental.SecondaryCacheSizeBytes, err = strconv.ParseInt(value, 10, 64) 1603 case "create_on_shared": 1604 var createOnSharedInt int64 1605 createOnSharedInt, err = strconv.ParseInt(value, 10, 64) 1606 o.Experimental.CreateOnShared = remote.CreateOnSharedStrategy(createOnSharedInt) 1607 default: 1608 if hooks != nil && hooks.SkipUnknown != nil && hooks.SkipUnknown(section+"."+key, value) { 1609 return nil 1610 } 1611 return errors.Errorf("pebble: unknown option: %s.%s", 1612 errors.Safe(section), errors.Safe(key)) 1613 } 1614 return err 1615 1616 case strings.HasPrefix(section, "Level "): 1617 var index int 1618 if n, err := fmt.Sscanf(section, `Level "%d"`, &index); err != nil { 1619 return err 1620 } else if n != 1 { 1621 if hooks != nil && hooks.SkipUnknown != nil && hooks.SkipUnknown(section, value) { 1622 return nil 1623 } 1624 return errors.Errorf("pebble: unknown section: %q", errors.Safe(section)) 1625 } 1626 1627 if len(o.Levels) <= index { 1628 newLevels := make([]LevelOptions, index+1) 1629 copy(newLevels, o.Levels) 1630 o.Levels = newLevels 1631 } 1632 l := &o.Levels[index] 1633 1634 var err error 1635 switch key { 1636 case "block_restart_interval": 1637 l.BlockRestartInterval, err = strconv.Atoi(value) 1638 case "block_size": 1639 l.BlockSize, err = strconv.Atoi(value) 1640 case "block_size_threshold": 1641 l.BlockSizeThreshold, err = strconv.Atoi(value) 1642 case "compression": 1643 switch value { 1644 case "Default": 1645 l.Compression = DefaultCompression 1646 case "NoCompression": 1647 l.Compression = NoCompression 1648 case "Snappy": 1649 l.Compression = SnappyCompression 1650 case "ZSTD": 1651 l.Compression = ZstdCompression 1652 default: 1653 return errors.Errorf("pebble: unknown compression: %q", errors.Safe(value)) 1654 } 1655 case "filter_policy": 1656 if hooks != nil && hooks.NewFilterPolicy != nil { 1657 l.FilterPolicy, err = hooks.NewFilterPolicy(value) 1658 } 1659 case "filter_type": 1660 switch value { 1661 case "table": 1662 l.FilterType = TableFilter 1663 default: 1664 return errors.Errorf("pebble: unknown filter type: %q", errors.Safe(value)) 1665 } 1666 case "index_block_size": 1667 l.IndexBlockSize, err = strconv.Atoi(value) 1668 case "target_file_size": 1669 l.TargetFileSize, err = strconv.ParseInt(value, 10, 64) 1670 default: 1671 if hooks != nil && hooks.SkipUnknown != nil && hooks.SkipUnknown(section+"."+key, value) { 1672 return nil 1673 } 1674 return errors.Errorf("pebble: unknown option: %s.%s", errors.Safe(section), errors.Safe(key)) 1675 } 1676 return err 1677 } 1678 if hooks != nil && hooks.SkipUnknown != nil && hooks.SkipUnknown(section+"."+key, value) { 1679 return nil 1680 } 1681 return errors.Errorf("pebble: unknown section: %q", errors.Safe(section)) 1682 }) 1683 } 1684 1685 func (o *Options) checkOptions(s string) (strictWALTail bool, err error) { 1686 // TODO(jackson): Refactor to avoid awkwardness of the strictWALTail return value. 1687 return strictWALTail, parseOptions(s, func(section, key, value string) error { 1688 switch section + "." + key { 1689 case "Options.comparer": 1690 if value != o.Comparer.Name { 1691 return errors.Errorf("pebble: comparer name from file %q != comparer name from options %q", 1692 errors.Safe(value), errors.Safe(o.Comparer.Name)) 1693 } 1694 case "Options.merger": 1695 // RocksDB allows the merge operator to be unspecified, in which case it 1696 // shows up as "nullptr". 1697 if value != "nullptr" && value != o.Merger.Name { 1698 return errors.Errorf("pebble: merger name from file %q != merger name from options %q", 1699 errors.Safe(value), errors.Safe(o.Merger.Name)) 1700 } 1701 case "Options.strict_wal_tail": 1702 strictWALTail, err = strconv.ParseBool(value) 1703 if err != nil { 1704 return errors.Errorf("pebble: error parsing strict_wal_tail value %q: %w", value, err) 1705 } 1706 } 1707 return nil 1708 }) 1709 } 1710 1711 // Check verifies the options are compatible with the previous options 1712 // serialized by Options.String(). For example, the Comparer and Merger must be 1713 // the same, or data will not be able to be properly read from the DB. 1714 func (o *Options) Check(s string) error { 1715 _, err := o.checkOptions(s) 1716 return err 1717 } 1718 1719 // Validate verifies that the options are mutually consistent. For example, 1720 // L0StopWritesThreshold must be >= L0CompactionThreshold, otherwise a write 1721 // stall would persist indefinitely. 1722 func (o *Options) Validate() error { 1723 // Note that we can presume Options.EnsureDefaults has been called, so there 1724 // is no need to check for zero values. 1725 1726 var buf strings.Builder 1727 if o.Experimental.L0CompactionConcurrency < 1 { 1728 fmt.Fprintf(&buf, "L0CompactionConcurrency (%d) must be >= 1\n", 1729 o.Experimental.L0CompactionConcurrency) 1730 } 1731 if o.L0StopWritesThreshold < o.L0CompactionThreshold { 1732 fmt.Fprintf(&buf, "L0StopWritesThreshold (%d) must be >= L0CompactionThreshold (%d)\n", 1733 o.L0StopWritesThreshold, o.L0CompactionThreshold) 1734 } 1735 if uint64(o.MemTableSize) >= maxMemTableSize { 1736 fmt.Fprintf(&buf, "MemTableSize (%s) must be < %s\n", 1737 humanize.Bytes.Uint64(uint64(o.MemTableSize)), humanize.Bytes.Uint64(maxMemTableSize)) 1738 } 1739 if o.MemTableStopWritesThreshold < 2 { 1740 fmt.Fprintf(&buf, "MemTableStopWritesThreshold (%d) must be >= 2\n", 1741 o.MemTableStopWritesThreshold) 1742 } 1743 if o.FormatMajorVersion > internalFormatNewest { 1744 fmt.Fprintf(&buf, "FormatMajorVersion (%d) must be <= %d\n", 1745 o.FormatMajorVersion, internalFormatNewest) 1746 } 1747 if o.TableCache != nil && o.Cache != o.TableCache.cache { 1748 fmt.Fprintf(&buf, "underlying cache in the TableCache and the Cache dont match\n") 1749 } 1750 if buf.Len() == 0 { 1751 return nil 1752 } 1753 return errors.New(buf.String()) 1754 } 1755 1756 // MakeReaderOptions constructs sstable.ReaderOptions from the corresponding 1757 // options in the receiver. 1758 func (o *Options) MakeReaderOptions() sstable.ReaderOptions { 1759 var readerOpts sstable.ReaderOptions 1760 if o != nil { 1761 readerOpts.Cache = o.Cache 1762 readerOpts.Comparer = o.Comparer 1763 readerOpts.Filters = o.Filters 1764 if o.Merger != nil { 1765 readerOpts.Merge = o.Merger.Merge 1766 readerOpts.MergerName = o.Merger.Name 1767 } 1768 readerOpts.LoggerAndTracer = o.LoggerAndTracer 1769 } 1770 return readerOpts 1771 } 1772 1773 // MakeWriterOptions constructs sstable.WriterOptions for the specified level 1774 // from the corresponding options in the receiver. 1775 func (o *Options) MakeWriterOptions(level int, format sstable.TableFormat) sstable.WriterOptions { 1776 var writerOpts sstable.WriterOptions 1777 writerOpts.TableFormat = format 1778 if o != nil { 1779 writerOpts.Cache = o.Cache 1780 writerOpts.Comparer = o.Comparer 1781 if o.Merger != nil { 1782 writerOpts.MergerName = o.Merger.Name 1783 } 1784 writerOpts.TablePropertyCollectors = o.TablePropertyCollectors 1785 writerOpts.BlockPropertyCollectors = o.BlockPropertyCollectors 1786 } 1787 if format >= sstable.TableFormatPebblev3 { 1788 writerOpts.ShortAttributeExtractor = o.Experimental.ShortAttributeExtractor 1789 writerOpts.RequiredInPlaceValueBound = o.Experimental.RequiredInPlaceValueBound 1790 if format >= sstable.TableFormatPebblev4 && level == numLevels-1 { 1791 writerOpts.WritingToLowestLevel = true 1792 } 1793 } 1794 levelOpts := o.Level(level) 1795 writerOpts.BlockRestartInterval = levelOpts.BlockRestartInterval 1796 writerOpts.BlockSize = levelOpts.BlockSize 1797 writerOpts.BlockSizeThreshold = levelOpts.BlockSizeThreshold 1798 writerOpts.Compression = levelOpts.Compression 1799 writerOpts.FilterPolicy = levelOpts.FilterPolicy 1800 writerOpts.FilterType = levelOpts.FilterType 1801 writerOpts.IndexBlockSize = levelOpts.IndexBlockSize 1802 return writerOpts 1803 }