github.com/insionng/yougam@v0.0.0-20170714101924-2bc18d833463/libraries/syndtr/goleveldb/leveldb/opt/options.go (about)

     1  // Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
     2  // All rights reserved.
     3  //
     4  // Use of this source code is governed by a BSD-style license that can be
     5  // found in the LICENSE file.
     6  
     7  // Package opt provides sets of options used by LevelDB.
     8  package opt
     9  
    10  import (
    11  	"math"
    12  
    13  	"github.com/insionng/yougam/libraries/syndtr/goleveldb/leveldb/cache"
    14  	"github.com/insionng/yougam/libraries/syndtr/goleveldb/leveldb/comparer"
    15  	"github.com/insionng/yougam/libraries/syndtr/goleveldb/leveldb/filter"
    16  )
    17  
    18  const (
    19  	KiB = 1024
    20  	MiB = KiB * 1024
    21  	GiB = MiB * 1024
    22  )
    23  
    24  var (
    25  	DefaultBlockCacher                   = LRUCacher
    26  	DefaultBlockCacheCapacity            = 8 * MiB
    27  	DefaultBlockRestartInterval          = 16
    28  	DefaultBlockSize                     = 4 * KiB
    29  	DefaultCompactionExpandLimitFactor   = 25
    30  	DefaultCompactionGPOverlapsFactor    = 10
    31  	DefaultCompactionL0Trigger           = 4
    32  	DefaultCompactionSourceLimitFactor   = 1
    33  	DefaultCompactionTableSize           = 2 * MiB
    34  	DefaultCompactionTableSizeMultiplier = 1.0
    35  	DefaultCompactionTotalSize           = 10 * MiB
    36  	DefaultCompactionTotalSizeMultiplier = 10.0
    37  	DefaultCompressionType               = SnappyCompression
    38  	DefaultIteratorSamplingRate          = 1 * MiB
    39  	DefaultOpenFilesCacher               = LRUCacher
    40  	DefaultOpenFilesCacheCapacity        = 500
    41  	DefaultWriteBuffer                   = 4 * MiB
    42  	DefaultWriteL0PauseTrigger           = 12
    43  	DefaultWriteL0SlowdownTrigger        = 8
    44  )
    45  
    46  // Cacher is a caching algorithm.
    47  type Cacher interface {
    48  	New(capacity int) cache.Cacher
    49  }
    50  
    51  type CacherFunc struct {
    52  	NewFunc func(capacity int) cache.Cacher
    53  }
    54  
    55  func (f *CacherFunc) New(capacity int) cache.Cacher {
    56  	if f.NewFunc != nil {
    57  		return f.NewFunc(capacity)
    58  	}
    59  	return nil
    60  }
    61  
    62  func noCacher(int) cache.Cacher { return nil }
    63  
    64  var (
    65  	// LRUCacher is the LRU-cache algorithm.
    66  	LRUCacher = &CacherFunc{cache.NewLRU}
    67  
    68  	// NoCacher is the value to disable caching algorithm.
    69  	NoCacher = &CacherFunc{}
    70  )
    71  
    72  // Compression is the 'sorted table' block compression algorithm to use.
    73  type Compression uint
    74  
    75  func (c Compression) String() string {
    76  	switch c {
    77  	case DefaultCompression:
    78  		return "default"
    79  	case NoCompression:
    80  		return "none"
    81  	case SnappyCompression:
    82  		return "snappy"
    83  	}
    84  	return "invalid"
    85  }
    86  
    87  const (
    88  	DefaultCompression Compression = iota
    89  	NoCompression
    90  	SnappyCompression
    91  	nCompression
    92  )
    93  
    94  // Strict is the DB 'strict level'.
    95  type Strict uint
    96  
    97  const (
    98  	// If present then a corrupted or invalid chunk or block in manifest
    99  	// journal will cause an error instead of being dropped.
   100  	// This will prevent database with corrupted manifest to be opened.
   101  	StrictManifest Strict = 1 << iota
   102  
   103  	// If present then journal chunk checksum will be verified.
   104  	StrictJournalChecksum
   105  
   106  	// If present then a corrupted or invalid chunk or block in journal
   107  	// will cause an error instead of being dropped.
   108  	// This will prevent database with corrupted journal to be opened.
   109  	StrictJournal
   110  
   111  	// If present then 'sorted table' block checksum will be verified.
   112  	// This has effect on both 'read operation' and compaction.
   113  	StrictBlockChecksum
   114  
   115  	// If present then a corrupted 'sorted table' will fails compaction.
   116  	// The database will enter read-only mode.
   117  	StrictCompaction
   118  
   119  	// If present then a corrupted 'sorted table' will halts 'read operation'.
   120  	StrictReader
   121  
   122  	// If present then leveldb.Recover will drop corrupted 'sorted table'.
   123  	StrictRecovery
   124  
   125  	// This only applicable for ReadOptions, if present then this ReadOptions
   126  	// 'strict level' will override global ones.
   127  	StrictOverride
   128  
   129  	// StrictAll enables all strict flags.
   130  	StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery
   131  
   132  	// DefaultStrict is the default strict flags. Specify any strict flags
   133  	// will override default strict flags as whole (i.e. not OR'ed).
   134  	DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader
   135  
   136  	// NoStrict disables all strict flags. Override default strict flags.
   137  	NoStrict = ^StrictAll
   138  )
   139  
   140  // Options holds the optional parameters for the DB at large.
   141  type Options struct {
   142  	// AltFilters defines one or more 'alternative filters'.
   143  	// 'alternative filters' will be used during reads if a filter block
   144  	// does not match with the 'effective filter'.
   145  	//
   146  	// The default value is nil
   147  	AltFilters []filter.Filter
   148  
   149  	// BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching.
   150  	// Specify NoCacher to disable caching algorithm.
   151  	//
   152  	// The default value is LRUCacher.
   153  	BlockCacher Cacher
   154  
   155  	// BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
   156  	// Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher.
   157  	//
   158  	// The default value is 8MiB.
   159  	BlockCacheCapacity int
   160  
   161  	// BlockRestartInterval is the number of keys between restart points for
   162  	// delta encoding of keys.
   163  	//
   164  	// The default value is 16.
   165  	BlockRestartInterval int
   166  
   167  	// BlockSize is the minimum uncompressed size in bytes of each 'sorted table'
   168  	// block.
   169  	//
   170  	// The default value is 4KiB.
   171  	BlockSize int
   172  
   173  	// CompactionExpandLimitFactor limits compaction size after expanded.
   174  	// This will be multiplied by table size limit at compaction target level.
   175  	//
   176  	// The default value is 25.
   177  	CompactionExpandLimitFactor int
   178  
   179  	// CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a
   180  	// single 'sorted table' generates.
   181  	// This will be multiplied by table size limit at grandparent level.
   182  	//
   183  	// The default value is 10.
   184  	CompactionGPOverlapsFactor int
   185  
   186  	// CompactionL0Trigger defines number of 'sorted table' at level-0 that will
   187  	// trigger compaction.
   188  	//
   189  	// The default value is 4.
   190  	CompactionL0Trigger int
   191  
   192  	// CompactionSourceLimitFactor limits compaction source size. This doesn't apply to
   193  	// level-0.
   194  	// This will be multiplied by table size limit at compaction target level.
   195  	//
   196  	// The default value is 1.
   197  	CompactionSourceLimitFactor int
   198  
   199  	// CompactionTableSize limits size of 'sorted table' that compaction generates.
   200  	// The limits for each level will be calculated as:
   201  	//   CompactionTableSize * (CompactionTableSizeMultiplier ^ Level)
   202  	// The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel.
   203  	//
   204  	// The default value is 2MiB.
   205  	CompactionTableSize int
   206  
   207  	// CompactionTableSizeMultiplier defines multiplier for CompactionTableSize.
   208  	//
   209  	// The default value is 1.
   210  	CompactionTableSizeMultiplier float64
   211  
   212  	// CompactionTableSizeMultiplierPerLevel defines per-level multiplier for
   213  	// CompactionTableSize.
   214  	// Use zero to skip a level.
   215  	//
   216  	// The default value is nil.
   217  	CompactionTableSizeMultiplierPerLevel []float64
   218  
   219  	// CompactionTotalSize limits total size of 'sorted table' for each level.
   220  	// The limits for each level will be calculated as:
   221  	//   CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level)
   222  	// The multiplier for each level can also fine-tuned using
   223  	// CompactionTotalSizeMultiplierPerLevel.
   224  	//
   225  	// The default value is 10MiB.
   226  	CompactionTotalSize int
   227  
   228  	// CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize.
   229  	//
   230  	// The default value is 10.
   231  	CompactionTotalSizeMultiplier float64
   232  
   233  	// CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for
   234  	// CompactionTotalSize.
   235  	// Use zero to skip a level.
   236  	//
   237  	// The default value is nil.
   238  	CompactionTotalSizeMultiplierPerLevel []float64
   239  
   240  	// Comparer defines a total ordering over the space of []byte keys: a 'less
   241  	// than' relationship. The same comparison algorithm must be used for reads
   242  	// and writes over the lifetime of the DB.
   243  	//
   244  	// The default value uses the same ordering as bytes.Compare.
   245  	Comparer comparer.Comparer
   246  
   247  	// Compression defines the 'sorted table' block compression to use.
   248  	//
   249  	// The default value (DefaultCompression) uses snappy compression.
   250  	Compression Compression
   251  
   252  	// DisableBufferPool allows disable use of util.BufferPool functionality.
   253  	//
   254  	// The default value is false.
   255  	DisableBufferPool bool
   256  
   257  	// DisableBlockCache allows disable use of cache.Cache functionality on
   258  	// 'sorted table' block.
   259  	//
   260  	// The default value is false.
   261  	DisableBlockCache bool
   262  
   263  	// DisableCompactionBackoff allows disable compaction retry backoff.
   264  	//
   265  	// The default value is false.
   266  	DisableCompactionBackoff bool
   267  
   268  	// DisableLargeBatchTransaction allows disabling switch-to-transaction mode
   269  	// on large batch write. If enable batch writes large than WriteBuffer will
   270  	// use transaction.
   271  	//
   272  	// The default is false.
   273  	DisableLargeBatchTransaction bool
   274  
   275  	// ErrorIfExist defines whether an error should returned if the DB already
   276  	// exist.
   277  	//
   278  	// The default value is false.
   279  	ErrorIfExist bool
   280  
   281  	// ErrorIfMissing defines whether an error should returned if the DB is
   282  	// missing. If false then the database will be created if missing, otherwise
   283  	// an error will be returned.
   284  	//
   285  	// The default value is false.
   286  	ErrorIfMissing bool
   287  
   288  	// Filter defines an 'effective filter' to use. An 'effective filter'
   289  	// if defined will be used to generate per-table filter block.
   290  	// The filter name will be stored on disk.
   291  	// During reads LevelDB will try to find matching filter from
   292  	// 'effective filter' and 'alternative filters'.
   293  	//
   294  	// Filter can be changed after a DB has been created. It is recommended
   295  	// to put old filter to the 'alternative filters' to mitigate lack of
   296  	// filter during transition period.
   297  	//
   298  	// A filter is used to reduce disk reads when looking for a specific key.
   299  	//
   300  	// The default value is nil.
   301  	Filter filter.Filter
   302  
   303  	// IteratorSamplingRate defines approximate gap (in bytes) between read
   304  	// sampling of an iterator. The samples will be used to determine when
   305  	// compaction should be triggered.
   306  	//
   307  	// The default is 1MiB.
   308  	IteratorSamplingRate int
   309  
   310  	// NoSync allows completely disable fsync.
   311  	//
   312  	// The default is false.
   313  	NoSync bool
   314  
   315  	// OpenFilesCacher provides cache algorithm for open files caching.
   316  	// Specify NoCacher to disable caching algorithm.
   317  	//
   318  	// The default value is LRUCacher.
   319  	OpenFilesCacher Cacher
   320  
   321  	// OpenFilesCacheCapacity defines the capacity of the open files caching.
   322  	// Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher.
   323  	//
   324  	// The default value is 500.
   325  	OpenFilesCacheCapacity int
   326  
   327  	// If true then opens DB in read-only mode.
   328  	//
   329  	// The default value is false.
   330  	ReadOnly bool
   331  
   332  	// Strict defines the DB strict level.
   333  	Strict Strict
   334  
   335  	// WriteBuffer defines maximum size of a 'memdb' before flushed to
   336  	// 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk
   337  	// unsorted journal.
   338  	//
   339  	// LevelDB may held up to two 'memdb' at the same time.
   340  	//
   341  	// The default value is 4MiB.
   342  	WriteBuffer int
   343  
   344  	// WriteL0StopTrigger defines number of 'sorted table' at level-0 that will
   345  	// pause write.
   346  	//
   347  	// The default value is 12.
   348  	WriteL0PauseTrigger int
   349  
   350  	// WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that
   351  	// will trigger write slowdown.
   352  	//
   353  	// The default value is 8.
   354  	WriteL0SlowdownTrigger int
   355  }
   356  
   357  func (o *Options) GetAltFilters() []filter.Filter {
   358  	if o == nil {
   359  		return nil
   360  	}
   361  	return o.AltFilters
   362  }
   363  
   364  func (o *Options) GetBlockCacher() Cacher {
   365  	if o == nil || o.BlockCacher == nil {
   366  		return DefaultBlockCacher
   367  	} else if o.BlockCacher == NoCacher {
   368  		return nil
   369  	}
   370  	return o.BlockCacher
   371  }
   372  
   373  func (o *Options) GetBlockCacheCapacity() int {
   374  	if o == nil || o.BlockCacheCapacity == 0 {
   375  		return DefaultBlockCacheCapacity
   376  	} else if o.BlockCacheCapacity < 0 {
   377  		return 0
   378  	}
   379  	return o.BlockCacheCapacity
   380  }
   381  
   382  func (o *Options) GetBlockRestartInterval() int {
   383  	if o == nil || o.BlockRestartInterval <= 0 {
   384  		return DefaultBlockRestartInterval
   385  	}
   386  	return o.BlockRestartInterval
   387  }
   388  
   389  func (o *Options) GetBlockSize() int {
   390  	if o == nil || o.BlockSize <= 0 {
   391  		return DefaultBlockSize
   392  	}
   393  	return o.BlockSize
   394  }
   395  
   396  func (o *Options) GetCompactionExpandLimit(level int) int {
   397  	factor := DefaultCompactionExpandLimitFactor
   398  	if o != nil && o.CompactionExpandLimitFactor > 0 {
   399  		factor = o.CompactionExpandLimitFactor
   400  	}
   401  	return o.GetCompactionTableSize(level+1) * factor
   402  }
   403  
   404  func (o *Options) GetCompactionGPOverlaps(level int) int {
   405  	factor := DefaultCompactionGPOverlapsFactor
   406  	if o != nil && o.CompactionGPOverlapsFactor > 0 {
   407  		factor = o.CompactionGPOverlapsFactor
   408  	}
   409  	return o.GetCompactionTableSize(level+2) * factor
   410  }
   411  
   412  func (o *Options) GetCompactionL0Trigger() int {
   413  	if o == nil || o.CompactionL0Trigger == 0 {
   414  		return DefaultCompactionL0Trigger
   415  	}
   416  	return o.CompactionL0Trigger
   417  }
   418  
   419  func (o *Options) GetCompactionSourceLimit(level int) int {
   420  	factor := DefaultCompactionSourceLimitFactor
   421  	if o != nil && o.CompactionSourceLimitFactor > 0 {
   422  		factor = o.CompactionSourceLimitFactor
   423  	}
   424  	return o.GetCompactionTableSize(level+1) * factor
   425  }
   426  
   427  func (o *Options) GetCompactionTableSize(level int) int {
   428  	var (
   429  		base = DefaultCompactionTableSize
   430  		mult float64
   431  	)
   432  	if o != nil {
   433  		if o.CompactionTableSize > 0 {
   434  			base = o.CompactionTableSize
   435  		}
   436  		if level < len(o.CompactionTableSizeMultiplierPerLevel) && o.CompactionTableSizeMultiplierPerLevel[level] > 0 {
   437  			mult = o.CompactionTableSizeMultiplierPerLevel[level]
   438  		} else if o.CompactionTableSizeMultiplier > 0 {
   439  			mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level))
   440  		}
   441  	}
   442  	if mult == 0 {
   443  		mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level))
   444  	}
   445  	return int(float64(base) * mult)
   446  }
   447  
   448  func (o *Options) GetCompactionTotalSize(level int) int64 {
   449  	var (
   450  		base = DefaultCompactionTotalSize
   451  		mult float64
   452  	)
   453  	if o != nil {
   454  		if o.CompactionTotalSize > 0 {
   455  			base = o.CompactionTotalSize
   456  		}
   457  		if level < len(o.CompactionTotalSizeMultiplierPerLevel) && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 {
   458  			mult = o.CompactionTotalSizeMultiplierPerLevel[level]
   459  		} else if o.CompactionTotalSizeMultiplier > 0 {
   460  			mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level))
   461  		}
   462  	}
   463  	if mult == 0 {
   464  		mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level))
   465  	}
   466  	return int64(float64(base) * mult)
   467  }
   468  
   469  func (o *Options) GetComparer() comparer.Comparer {
   470  	if o == nil || o.Comparer == nil {
   471  		return comparer.DefaultComparer
   472  	}
   473  	return o.Comparer
   474  }
   475  
   476  func (o *Options) GetCompression() Compression {
   477  	if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression {
   478  		return DefaultCompressionType
   479  	}
   480  	return o.Compression
   481  }
   482  
   483  func (o *Options) GetDisableBufferPool() bool {
   484  	if o == nil {
   485  		return false
   486  	}
   487  	return o.DisableBufferPool
   488  }
   489  
   490  func (o *Options) GetDisableBlockCache() bool {
   491  	if o == nil {
   492  		return false
   493  	}
   494  	return o.DisableBlockCache
   495  }
   496  
   497  func (o *Options) GetDisableCompactionBackoff() bool {
   498  	if o == nil {
   499  		return false
   500  	}
   501  	return o.DisableCompactionBackoff
   502  }
   503  
   504  func (o *Options) GetDisableLargeBatchTransaction() bool {
   505  	if o == nil {
   506  		return false
   507  	}
   508  	return o.DisableLargeBatchTransaction
   509  }
   510  
   511  func (o *Options) GetErrorIfExist() bool {
   512  	if o == nil {
   513  		return false
   514  	}
   515  	return o.ErrorIfExist
   516  }
   517  
   518  func (o *Options) GetErrorIfMissing() bool {
   519  	if o == nil {
   520  		return false
   521  	}
   522  	return o.ErrorIfMissing
   523  }
   524  
   525  func (o *Options) GetFilter() filter.Filter {
   526  	if o == nil {
   527  		return nil
   528  	}
   529  	return o.Filter
   530  }
   531  
   532  func (o *Options) GetIteratorSamplingRate() int {
   533  	if o == nil || o.IteratorSamplingRate <= 0 {
   534  		return DefaultIteratorSamplingRate
   535  	}
   536  	return o.IteratorSamplingRate
   537  }
   538  
   539  func (o *Options) GetNoSync() bool {
   540  	if o == nil {
   541  		return false
   542  	}
   543  	return o.NoSync
   544  }
   545  
   546  func (o *Options) GetOpenFilesCacher() Cacher {
   547  	if o == nil || o.OpenFilesCacher == nil {
   548  		return DefaultOpenFilesCacher
   549  	}
   550  	if o.OpenFilesCacher == NoCacher {
   551  		return nil
   552  	}
   553  	return o.OpenFilesCacher
   554  }
   555  
   556  func (o *Options) GetOpenFilesCacheCapacity() int {
   557  	if o == nil || o.OpenFilesCacheCapacity == 0 {
   558  		return DefaultOpenFilesCacheCapacity
   559  	} else if o.OpenFilesCacheCapacity < 0 {
   560  		return 0
   561  	}
   562  	return o.OpenFilesCacheCapacity
   563  }
   564  
   565  func (o *Options) GetReadOnly() bool {
   566  	if o == nil {
   567  		return false
   568  	}
   569  	return o.ReadOnly
   570  }
   571  
   572  func (o *Options) GetStrict(strict Strict) bool {
   573  	if o == nil || o.Strict == 0 {
   574  		return DefaultStrict&strict != 0
   575  	}
   576  	return o.Strict&strict != 0
   577  }
   578  
   579  func (o *Options) GetWriteBuffer() int {
   580  	if o == nil || o.WriteBuffer <= 0 {
   581  		return DefaultWriteBuffer
   582  	}
   583  	return o.WriteBuffer
   584  }
   585  
   586  func (o *Options) GetWriteL0PauseTrigger() int {
   587  	if o == nil || o.WriteL0PauseTrigger == 0 {
   588  		return DefaultWriteL0PauseTrigger
   589  	}
   590  	return o.WriteL0PauseTrigger
   591  }
   592  
   593  func (o *Options) GetWriteL0SlowdownTrigger() int {
   594  	if o == nil || o.WriteL0SlowdownTrigger == 0 {
   595  		return DefaultWriteL0SlowdownTrigger
   596  	}
   597  	return o.WriteL0SlowdownTrigger
   598  }
   599  
   600  // ReadOptions holds the optional parameters for 'read operation'. The
   601  // 'read operation' includes Get, Find and NewIterator.
   602  type ReadOptions struct {
   603  	// DontFillCache defines whether block reads for this 'read operation'
   604  	// should be cached. If false then the block will be cached. This does
   605  	// not affects already cached block.
   606  	//
   607  	// The default value is false.
   608  	DontFillCache bool
   609  
   610  	// Strict will be OR'ed with global DB 'strict level' unless StrictOverride
   611  	// is present. Currently only StrictReader that has effect here.
   612  	Strict Strict
   613  }
   614  
   615  func (ro *ReadOptions) GetDontFillCache() bool {
   616  	if ro == nil {
   617  		return false
   618  	}
   619  	return ro.DontFillCache
   620  }
   621  
   622  func (ro *ReadOptions) GetStrict(strict Strict) bool {
   623  	if ro == nil {
   624  		return false
   625  	}
   626  	return ro.Strict&strict != 0
   627  }
   628  
   629  // WriteOptions holds the optional parameters for 'write operation'. The
   630  // 'write operation' includes Write, Put and Delete.
   631  type WriteOptions struct {
   632  	// Sync is whether to sync underlying writes from the OS buffer cache
   633  	// through to actual disk, if applicable. Setting Sync can result in
   634  	// slower writes.
   635  	//
   636  	// If false, and the machine crashes, then some recent writes may be lost.
   637  	// Note that if it is just the process that crashes (and the machine does
   638  	// not) then no writes will be lost.
   639  	//
   640  	// In other words, Sync being false has the same semantics as a write
   641  	// system call. Sync being true means write followed by fsync.
   642  	//
   643  	// The default value is false.
   644  	Sync bool
   645  }
   646  
   647  func (wo *WriteOptions) GetSync() bool {
   648  	if wo == nil {
   649  		return false
   650  	}
   651  	return wo.Sync
   652  }
   653  
   654  func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool {
   655  	if ro.GetStrict(StrictOverride) {
   656  		return ro.GetStrict(strict)
   657  	} else {
   658  		return o.GetStrict(strict) || ro.GetStrict(strict)
   659  	}
   660  }