github.com/df-mc/goleveldb@v1.1.9/leveldb/opt/options.go (about)

     1  // Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
     2  // All rights reserved.
     3  //
     4  // Use of this source code is governed by a BSD-style license that can be
     5  // found in the LICENSE file.
     6  
     7  // Package opt provides sets of options used by LevelDB.
     8  package opt
     9  
    10  import (
    11  	"math"
    12  
    13  	"github.com/df-mc/goleveldb/leveldb/cache"
    14  	"github.com/df-mc/goleveldb/leveldb/comparer"
    15  	"github.com/df-mc/goleveldb/leveldb/filter"
    16  )
    17  
    18  const (
    19  	KiB = 1024
    20  	MiB = KiB * 1024
    21  	GiB = MiB * 1024
    22  )
    23  
    24  var (
    25  	DefaultBlockCacher                   = LRUCacher
    26  	DefaultBlockCacheCapacity            = 8 * MiB
    27  	DefaultBlockRestartInterval          = 16
    28  	DefaultBlockSize                     = 4 * KiB
    29  	DefaultCompactionExpandLimitFactor   = 25
    30  	DefaultCompactionGPOverlapsFactor    = 10
    31  	DefaultCompactionL0Trigger           = 4
    32  	DefaultCompactionSourceLimitFactor   = 1
    33  	DefaultCompactionTableSize           = 2 * MiB
    34  	DefaultCompactionTableSizeMultiplier = 1.0
    35  	DefaultCompactionTotalSize           = 10 * MiB
    36  	DefaultCompactionTotalSizeMultiplier = 10.0
    37  	DefaultCompressionType               = FlateCompression
    38  	DefaultIteratorSamplingRate          = 1 * MiB
    39  	DefaultOpenFilesCacher               = LRUCacher
    40  	DefaultOpenFilesCacheCapacity        = 500
    41  	DefaultWriteBuffer                   = 4 * MiB
    42  	DefaultWriteL0PauseTrigger           = 12
    43  	DefaultWriteL0SlowdownTrigger        = 8
    44  )
    45  
    46  // Cacher is a caching algorithm.
    47  type Cacher interface {
    48  	New(capacity int) cache.Cacher
    49  }
    50  
    51  type CacherFunc struct {
    52  	NewFunc func(capacity int) cache.Cacher
    53  }
    54  
    55  func (f *CacherFunc) New(capacity int) cache.Cacher {
    56  	if f.NewFunc != nil {
    57  		return f.NewFunc(capacity)
    58  	}
    59  	return nil
    60  }
    61  
    62  func noCacher(int) cache.Cacher { return nil }
    63  
    64  var (
    65  	// LRUCacher is the LRU-cache algorithm.
    66  	LRUCacher = &CacherFunc{cache.NewLRU}
    67  
    68  	// NoCacher is the value to disable caching algorithm.
    69  	NoCacher = &CacherFunc{}
    70  )
    71  
    72  // Compression is the 'sorted table' block compression algorithm to use.
    73  type Compression uint
    74  
    75  func (c Compression) String() string {
    76  	switch c {
    77  	case DefaultCompression:
    78  		return "default"
    79  	case NoCompression:
    80  		return "none"
    81  	case SnappyCompression:
    82  		return "snappy"
    83  	}
    84  	return "invalid"
    85  }
    86  
    87  const (
    88  	DefaultCompression Compression = iota
    89  	NoCompression
    90  	SnappyCompression
    91  	_
    92  	FlateCompression
    93  	nCompression
    94  )
    95  
    96  // Strict is the DB 'strict level'.
    97  type Strict uint
    98  
    99  const (
   100  	// If present then a corrupted or invalid chunk or block in manifest
   101  	// journal will cause an error instead of being dropped.
   102  	// This will prevent database with corrupted manifest to be opened.
   103  	StrictManifest Strict = 1 << iota
   104  
   105  	// If present then journal chunk checksum will be verified.
   106  	StrictJournalChecksum
   107  
   108  	// If present then a corrupted or invalid chunk or block in journal
   109  	// will cause an error instead of being dropped.
   110  	// This will prevent database with corrupted journal to be opened.
   111  	StrictJournal
   112  
   113  	// If present then 'sorted table' block checksum will be verified.
   114  	// This has effect on both 'read operation' and compaction.
   115  	StrictBlockChecksum
   116  
   117  	// If present then a corrupted 'sorted table' will fails compaction.
   118  	// The database will enter read-only mode.
   119  	StrictCompaction
   120  
   121  	// If present then a corrupted 'sorted table' will halts 'read operation'.
   122  	StrictReader
   123  
   124  	// If present then leveldb.Recover will drop corrupted 'sorted table'.
   125  	StrictRecovery
   126  
   127  	// This only applicable for ReadOptions, if present then this ReadOptions
   128  	// 'strict level' will override global ones.
   129  	StrictOverride
   130  
   131  	// StrictAll enables all strict flags.
   132  	StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery
   133  
   134  	// DefaultStrict is the default strict flags. Specify any strict flags
   135  	// will override default strict flags as whole (i.e. not OR'ed).
   136  	DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader
   137  
   138  	// NoStrict disables all strict flags. Override default strict flags.
   139  	NoStrict = ^StrictAll
   140  )
   141  
   142  // Options holds the optional parameters for the DB at large.
   143  type Options struct {
   144  	// AltFilters defines one or more 'alternative filters'.
   145  	// 'alternative filters' will be used during reads if a filter block
   146  	// does not match with the 'effective filter'.
   147  	//
   148  	// The default value is nil
   149  	AltFilters []filter.Filter
   150  
   151  	// BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching.
   152  	// Specify NoCacher to disable caching algorithm.
   153  	//
   154  	// The default value is LRUCacher.
   155  	BlockCacher Cacher
   156  
   157  	// BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
   158  	// Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher.
   159  	//
   160  	// The default value is 8MiB.
   161  	BlockCacheCapacity int
   162  
   163  	// BlockCacheEvictRemoved allows enable forced-eviction on cached block belonging
   164  	// to removed 'sorted table'.
   165  	//
   166  	// The default if false.
   167  	BlockCacheEvictRemoved bool
   168  
   169  	// BlockRestartInterval is the number of keys between restart points for
   170  	// delta encoding of keys.
   171  	//
   172  	// The default value is 16.
   173  	BlockRestartInterval int
   174  
   175  	// BlockSize is the minimum uncompressed size in bytes of each 'sorted table'
   176  	// block.
   177  	//
   178  	// The default value is 4KiB.
   179  	BlockSize int
   180  
   181  	// CompactionExpandLimitFactor limits compaction size after expanded.
   182  	// This will be multiplied by table size limit at compaction target level.
   183  	//
   184  	// The default value is 25.
   185  	CompactionExpandLimitFactor int
   186  
   187  	// CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a
   188  	// single 'sorted table' generates.
   189  	// This will be multiplied by table size limit at grandparent level.
   190  	//
   191  	// The default value is 10.
   192  	CompactionGPOverlapsFactor int
   193  
   194  	// CompactionL0Trigger defines number of 'sorted table' at level-0 that will
   195  	// trigger compaction.
   196  	//
   197  	// The default value is 4.
   198  	CompactionL0Trigger int
   199  
   200  	// CompactionSourceLimitFactor limits compaction source size. This doesn't apply to
   201  	// level-0.
   202  	// This will be multiplied by table size limit at compaction target level.
   203  	//
   204  	// The default value is 1.
   205  	CompactionSourceLimitFactor int
   206  
   207  	// CompactionTableSize limits size of 'sorted table' that compaction generates.
   208  	// The limits for each level will be calculated as:
   209  	//   CompactionTableSize * (CompactionTableSizeMultiplier ^ Level)
   210  	// The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel.
   211  	//
   212  	// The default value is 2MiB.
   213  	CompactionTableSize int
   214  
   215  	// CompactionTableSizeMultiplier defines multiplier for CompactionTableSize.
   216  	//
   217  	// The default value is 1.
   218  	CompactionTableSizeMultiplier float64
   219  
   220  	// CompactionTableSizeMultiplierPerLevel defines per-level multiplier for
   221  	// CompactionTableSize.
   222  	// Use zero to skip a level.
   223  	//
   224  	// The default value is nil.
   225  	CompactionTableSizeMultiplierPerLevel []float64
   226  
   227  	// CompactionTotalSize limits total size of 'sorted table' for each level.
   228  	// The limits for each level will be calculated as:
   229  	//   CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level)
   230  	// The multiplier for each level can also fine-tuned using
   231  	// CompactionTotalSizeMultiplierPerLevel.
   232  	//
   233  	// The default value is 10MiB.
   234  	CompactionTotalSize int
   235  
   236  	// CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize.
   237  	//
   238  	// The default value is 10.
   239  	CompactionTotalSizeMultiplier float64
   240  
   241  	// CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for
   242  	// CompactionTotalSize.
   243  	// Use zero to skip a level.
   244  	//
   245  	// The default value is nil.
   246  	CompactionTotalSizeMultiplierPerLevel []float64
   247  
   248  	// Comparer defines a total ordering over the space of []byte keys: a 'less
   249  	// than' relationship. The same comparison algorithm must be used for reads
   250  	// and writes over the lifetime of the DB.
   251  	//
   252  	// The default value uses the same ordering as bytes.Compare.
   253  	Comparer comparer.Comparer
   254  
   255  	// Compression defines the 'sorted table' block compression to use.
   256  	//
   257  	// The default value (DefaultCompression) uses snappy compression.
   258  	Compression Compression
   259  
   260  	// DisableBufferPool allows disable use of util.BufferPool functionality.
   261  	//
   262  	// The default value is false.
   263  	DisableBufferPool bool
   264  
   265  	// DisableBlockCache allows disable use of cache.Cache functionality on
   266  	// 'sorted table' block.
   267  	//
   268  	// The default value is false.
   269  	DisableBlockCache bool
   270  
   271  	// DisableCompactionBackoff allows disable compaction retry backoff.
   272  	//
   273  	// The default value is false.
   274  	DisableCompactionBackoff bool
   275  
   276  	// DisableLargeBatchTransaction allows disabling switch-to-transaction mode
   277  	// on large batch write. If enable batch writes large than WriteBuffer will
   278  	// use transaction.
   279  	//
   280  	// The default is false.
   281  	DisableLargeBatchTransaction bool
   282  
   283  	// DisableSeeksCompaction allows disabling 'seeks triggered compaction'.
   284  	// The purpose of 'seeks triggered compaction' is to optimize database so
   285  	// that 'level seeks' can be minimized, however this might generate many
   286  	// small compaction which may not preferable.
   287  	//
   288  	// The default is false.
   289  	DisableSeeksCompaction bool
   290  
   291  	// ErrorIfExist defines whether an error should returned if the DB already
   292  	// exist.
   293  	//
   294  	// The default value is false.
   295  	ErrorIfExist bool
   296  
   297  	// ErrorIfMissing defines whether an error should returned if the DB is
   298  	// missing. If false then the database will be created if missing, otherwise
   299  	// an error will be returned.
   300  	//
   301  	// The default value is false.
   302  	ErrorIfMissing bool
   303  
   304  	// Filter defines an 'effective filter' to use. An 'effective filter'
   305  	// if defined will be used to generate per-table filter block.
   306  	// The filter name will be stored on disk.
   307  	// During reads LevelDB will try to find matching filter from
   308  	// 'effective filter' and 'alternative filters'.
   309  	//
   310  	// Filter can be changed after a DB has been created. It is recommended
   311  	// to put old filter to the 'alternative filters' to mitigate lack of
   312  	// filter during transition period.
   313  	//
   314  	// A filter is used to reduce disk reads when looking for a specific key.
   315  	//
   316  	// The default value is nil.
   317  	Filter filter.Filter
   318  
   319  	// IteratorSamplingRate defines approximate gap (in bytes) between read
   320  	// sampling of an iterator. The samples will be used to determine when
   321  	// compaction should be triggered.
   322  	// Use negative value to disable iterator sampling.
   323  	// The iterator sampling is disabled if DisableSeeksCompaction is true.
   324  	//
   325  	// The default is 1MiB.
   326  	IteratorSamplingRate int
   327  
   328  	// NoSync allows completely disable fsync.
   329  	//
   330  	// The default is false.
   331  	NoSync bool
   332  
   333  	// NoWriteMerge allows disabling write merge.
   334  	//
   335  	// The default is false.
   336  	NoWriteMerge bool
   337  
   338  	// OpenFilesCacher provides cache algorithm for open files caching.
   339  	// Specify NoCacher to disable caching algorithm.
   340  	//
   341  	// The default value is LRUCacher.
   342  	OpenFilesCacher Cacher
   343  
   344  	// OpenFilesCacheCapacity defines the capacity of the open files caching.
   345  	// Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher.
   346  	//
   347  	// The default value is 500.
   348  	OpenFilesCacheCapacity int
   349  
   350  	// If true then opens DB in read-only mode.
   351  	//
   352  	// The default value is false.
   353  	ReadOnly bool
   354  
   355  	// Strict defines the DB strict level.
   356  	Strict Strict
   357  
   358  	// WriteBuffer defines maximum size of a 'memdb' before flushed to
   359  	// 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk
   360  	// unsorted journal.
   361  	//
   362  	// LevelDB may held up to two 'memdb' at the same time.
   363  	//
   364  	// The default value is 4MiB.
   365  	WriteBuffer int
   366  
   367  	// WriteL0StopTrigger defines number of 'sorted table' at level-0 that will
   368  	// pause write.
   369  	//
   370  	// The default value is 12.
   371  	WriteL0PauseTrigger int
   372  
   373  	// WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that
   374  	// will trigger write slowdown.
   375  	//
   376  	// The default value is 8.
   377  	WriteL0SlowdownTrigger int
   378  }
   379  
   380  func (o *Options) GetAltFilters() []filter.Filter {
   381  	if o == nil {
   382  		return nil
   383  	}
   384  	return o.AltFilters
   385  }
   386  
   387  func (o *Options) GetBlockCacher() Cacher {
   388  	if o == nil || o.BlockCacher == nil {
   389  		return DefaultBlockCacher
   390  	} else if o.BlockCacher == NoCacher {
   391  		return nil
   392  	}
   393  	return o.BlockCacher
   394  }
   395  
   396  func (o *Options) GetBlockCacheCapacity() int {
   397  	if o == nil || o.BlockCacheCapacity == 0 {
   398  		return DefaultBlockCacheCapacity
   399  	} else if o.BlockCacheCapacity < 0 {
   400  		return 0
   401  	}
   402  	return o.BlockCacheCapacity
   403  }
   404  
   405  func (o *Options) GetBlockCacheEvictRemoved() bool {
   406  	if o == nil {
   407  		return false
   408  	}
   409  	return o.BlockCacheEvictRemoved
   410  }
   411  
   412  func (o *Options) GetBlockRestartInterval() int {
   413  	if o == nil || o.BlockRestartInterval <= 0 {
   414  		return DefaultBlockRestartInterval
   415  	}
   416  	return o.BlockRestartInterval
   417  }
   418  
   419  func (o *Options) GetBlockSize() int {
   420  	if o == nil || o.BlockSize <= 0 {
   421  		return DefaultBlockSize
   422  	}
   423  	return o.BlockSize
   424  }
   425  
   426  func (o *Options) GetCompactionExpandLimit(level int) int {
   427  	factor := DefaultCompactionExpandLimitFactor
   428  	if o != nil && o.CompactionExpandLimitFactor > 0 {
   429  		factor = o.CompactionExpandLimitFactor
   430  	}
   431  	return o.GetCompactionTableSize(level+1) * factor
   432  }
   433  
   434  func (o *Options) GetCompactionGPOverlaps(level int) int {
   435  	factor := DefaultCompactionGPOverlapsFactor
   436  	if o != nil && o.CompactionGPOverlapsFactor > 0 {
   437  		factor = o.CompactionGPOverlapsFactor
   438  	}
   439  	return o.GetCompactionTableSize(level+2) * factor
   440  }
   441  
   442  func (o *Options) GetCompactionL0Trigger() int {
   443  	if o == nil || o.CompactionL0Trigger == 0 {
   444  		return DefaultCompactionL0Trigger
   445  	}
   446  	return o.CompactionL0Trigger
   447  }
   448  
   449  func (o *Options) GetCompactionSourceLimit(level int) int {
   450  	factor := DefaultCompactionSourceLimitFactor
   451  	if o != nil && o.CompactionSourceLimitFactor > 0 {
   452  		factor = o.CompactionSourceLimitFactor
   453  	}
   454  	return o.GetCompactionTableSize(level+1) * factor
   455  }
   456  
   457  func (o *Options) GetCompactionTableSize(level int) int {
   458  	var (
   459  		base = DefaultCompactionTableSize
   460  		mult float64
   461  	)
   462  	if o != nil {
   463  		if o.CompactionTableSize > 0 {
   464  			base = o.CompactionTableSize
   465  		}
   466  		if level < len(o.CompactionTableSizeMultiplierPerLevel) && o.CompactionTableSizeMultiplierPerLevel[level] > 0 {
   467  			mult = o.CompactionTableSizeMultiplierPerLevel[level]
   468  		} else if o.CompactionTableSizeMultiplier > 0 {
   469  			mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level))
   470  		}
   471  	}
   472  	if mult == 0 {
   473  		mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level))
   474  	}
   475  	return int(float64(base) * mult)
   476  }
   477  
   478  func (o *Options) GetCompactionTotalSize(level int) int64 {
   479  	var (
   480  		base = DefaultCompactionTotalSize
   481  		mult float64
   482  	)
   483  	if o != nil {
   484  		if o.CompactionTotalSize > 0 {
   485  			base = o.CompactionTotalSize
   486  		}
   487  		if level < len(o.CompactionTotalSizeMultiplierPerLevel) && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 {
   488  			mult = o.CompactionTotalSizeMultiplierPerLevel[level]
   489  		} else if o.CompactionTotalSizeMultiplier > 0 {
   490  			mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level))
   491  		}
   492  	}
   493  	if mult == 0 {
   494  		mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level))
   495  	}
   496  	return int64(float64(base) * mult)
   497  }
   498  
   499  func (o *Options) GetComparer() comparer.Comparer {
   500  	if o == nil || o.Comparer == nil {
   501  		return comparer.DefaultComparer
   502  	}
   503  	return o.Comparer
   504  }
   505  
   506  func (o *Options) GetCompression() Compression {
   507  	if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression {
   508  		return DefaultCompressionType
   509  	}
   510  	return o.Compression
   511  }
   512  
   513  func (o *Options) GetDisableBufferPool() bool {
   514  	if o == nil {
   515  		return false
   516  	}
   517  	return o.DisableBufferPool
   518  }
   519  
   520  func (o *Options) GetDisableBlockCache() bool {
   521  	if o == nil {
   522  		return false
   523  	}
   524  	return o.DisableBlockCache
   525  }
   526  
   527  func (o *Options) GetDisableCompactionBackoff() bool {
   528  	if o == nil {
   529  		return false
   530  	}
   531  	return o.DisableCompactionBackoff
   532  }
   533  
   534  func (o *Options) GetDisableLargeBatchTransaction() bool {
   535  	if o == nil {
   536  		return false
   537  	}
   538  	return o.DisableLargeBatchTransaction
   539  }
   540  
   541  func (o *Options) GetDisableSeeksCompaction() bool {
   542  	if o == nil {
   543  		return false
   544  	}
   545  	return o.DisableSeeksCompaction
   546  }
   547  
   548  func (o *Options) GetErrorIfExist() bool {
   549  	if o == nil {
   550  		return false
   551  	}
   552  	return o.ErrorIfExist
   553  }
   554  
   555  func (o *Options) GetErrorIfMissing() bool {
   556  	if o == nil {
   557  		return false
   558  	}
   559  	return o.ErrorIfMissing
   560  }
   561  
   562  func (o *Options) GetFilter() filter.Filter {
   563  	if o == nil {
   564  		return nil
   565  	}
   566  	return o.Filter
   567  }
   568  
   569  func (o *Options) GetIteratorSamplingRate() int {
   570  	if o == nil || o.IteratorSamplingRate == 0 {
   571  		return DefaultIteratorSamplingRate
   572  	} else if o.IteratorSamplingRate < 0 {
   573  		return 0
   574  	}
   575  	return o.IteratorSamplingRate
   576  }
   577  
   578  func (o *Options) GetNoSync() bool {
   579  	if o == nil {
   580  		return false
   581  	}
   582  	return o.NoSync
   583  }
   584  
   585  func (o *Options) GetNoWriteMerge() bool {
   586  	if o == nil {
   587  		return false
   588  	}
   589  	return o.NoWriteMerge
   590  }
   591  
   592  func (o *Options) GetOpenFilesCacher() Cacher {
   593  	if o == nil || o.OpenFilesCacher == nil {
   594  		return DefaultOpenFilesCacher
   595  	}
   596  	if o.OpenFilesCacher == NoCacher {
   597  		return nil
   598  	}
   599  	return o.OpenFilesCacher
   600  }
   601  
   602  func (o *Options) GetOpenFilesCacheCapacity() int {
   603  	if o == nil || o.OpenFilesCacheCapacity == 0 {
   604  		return DefaultOpenFilesCacheCapacity
   605  	} else if o.OpenFilesCacheCapacity < 0 {
   606  		return 0
   607  	}
   608  	return o.OpenFilesCacheCapacity
   609  }
   610  
   611  func (o *Options) GetReadOnly() bool {
   612  	if o == nil {
   613  		return false
   614  	}
   615  	return o.ReadOnly
   616  }
   617  
   618  func (o *Options) GetStrict(strict Strict) bool {
   619  	if o == nil || o.Strict == 0 {
   620  		return DefaultStrict&strict != 0
   621  	}
   622  	return o.Strict&strict != 0
   623  }
   624  
   625  func (o *Options) GetWriteBuffer() int {
   626  	if o == nil || o.WriteBuffer <= 0 {
   627  		return DefaultWriteBuffer
   628  	}
   629  	return o.WriteBuffer
   630  }
   631  
   632  func (o *Options) GetWriteL0PauseTrigger() int {
   633  	if o == nil || o.WriteL0PauseTrigger == 0 {
   634  		return DefaultWriteL0PauseTrigger
   635  	}
   636  	return o.WriteL0PauseTrigger
   637  }
   638  
   639  func (o *Options) GetWriteL0SlowdownTrigger() int {
   640  	if o == nil || o.WriteL0SlowdownTrigger == 0 {
   641  		return DefaultWriteL0SlowdownTrigger
   642  	}
   643  	return o.WriteL0SlowdownTrigger
   644  }
   645  
   646  // ReadOptions holds the optional parameters for 'read operation'. The
   647  // 'read operation' includes Get, Find and NewIterator.
   648  type ReadOptions struct {
   649  	// DontFillCache defines whether block reads for this 'read operation'
   650  	// should be cached. If false then the block will be cached. This does
   651  	// not affects already cached block.
   652  	//
   653  	// The default value is false.
   654  	DontFillCache bool
   655  
   656  	// Strict will be OR'ed with global DB 'strict level' unless StrictOverride
   657  	// is present. Currently only StrictReader that has effect here.
   658  	Strict Strict
   659  }
   660  
   661  func (ro *ReadOptions) GetDontFillCache() bool {
   662  	if ro == nil {
   663  		return false
   664  	}
   665  	return ro.DontFillCache
   666  }
   667  
   668  func (ro *ReadOptions) GetStrict(strict Strict) bool {
   669  	if ro == nil {
   670  		return false
   671  	}
   672  	return ro.Strict&strict != 0
   673  }
   674  
   675  // WriteOptions holds the optional parameters for 'write operation'. The
   676  // 'write operation' includes Write, Put and Delete.
   677  type WriteOptions struct {
   678  	// NoWriteMerge allows disabling write merge.
   679  	//
   680  	// The default is false.
   681  	NoWriteMerge bool
   682  
   683  	// Sync is whether to sync underlying writes from the OS buffer cache
   684  	// through to actual disk, if applicable. Setting Sync can result in
   685  	// slower writes.
   686  	//
   687  	// If false, and the machine crashes, then some recent writes may be lost.
   688  	// Note that if it is just the process that crashes (and the machine does
   689  	// not) then no writes will be lost.
   690  	//
   691  	// In other words, Sync being false has the same semantics as a write
   692  	// system call. Sync being true means write followed by fsync.
   693  	//
   694  	// The default value is false.
   695  	Sync bool
   696  }
   697  
   698  func (wo *WriteOptions) GetNoWriteMerge() bool {
   699  	if wo == nil {
   700  		return false
   701  	}
   702  	return wo.NoWriteMerge
   703  }
   704  
   705  func (wo *WriteOptions) GetSync() bool {
   706  	if wo == nil {
   707  		return false
   708  	}
   709  	return wo.Sync
   710  }
   711  
   712  func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool {
   713  	if ro.GetStrict(StrictOverride) {
   714  		return ro.GetStrict(strict)
   715  	} else {
   716  		return o.GetStrict(strict) || ro.GetStrict(strict)
   717  	}
   718  }