github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/server/config.go (about)

     1  // Copyright 2015 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package server
    12  
    13  import (
    14  	"bytes"
    15  	"context"
    16  	"fmt"
    17  	"net"
    18  	"path/filepath"
    19  	"strings"
    20  	"text/tabwriter"
    21  	"time"
    22  
    23  	"github.com/cockroachdb/cockroach/pkg/base"
    24  	"github.com/cockroachdb/cockroach/pkg/config/zonepb"
    25  	"github.com/cockroachdb/cockroach/pkg/gossip/resolver"
    26  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
    27  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    28  	"github.com/cockroachdb/cockroach/pkg/server/status"
    29  	"github.com/cockroachdb/cockroach/pkg/settings/cluster"
    30  	"github.com/cockroachdb/cockroach/pkg/storage"
    31  	"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
    32  	"github.com/cockroachdb/cockroach/pkg/ts"
    33  	"github.com/cockroachdb/cockroach/pkg/util"
    34  	"github.com/cockroachdb/cockroach/pkg/util/envutil"
    35  	"github.com/cockroachdb/cockroach/pkg/util/humanizeutil"
    36  	"github.com/cockroachdb/cockroach/pkg/util/log"
    37  	"github.com/cockroachdb/cockroach/pkg/util/retry"
    38  	"github.com/cockroachdb/errors"
    39  	"github.com/cockroachdb/pebble"
    40  	"github.com/elastic/gosigar"
    41  )
    42  
    43  // Context defaults.
    44  const (
    45  	// DefaultCacheSize is the default size of the RocksDB and Pebble caches. We
    46  	// default the cache size and SQL memory pool size to 128 MiB. Larger values
    47  	// might provide significantly better performance, but we're not sure what
    48  	// type of system we're running on (development or production or some shared
    49  	// environment). Production users should almost certainly override these
    50  	// settings and we'll warn in the logs about doing so.
    51  	DefaultCacheSize         = 128 << 20 // 128 MB
    52  	defaultSQLMemoryPoolSize = 128 << 20 // 128 MB
    53  	defaultScanInterval      = 10 * time.Minute
    54  	defaultScanMinIdleTime   = 10 * time.Millisecond
    55  	defaultScanMaxIdleTime   = 1 * time.Second
    56  
    57  	defaultStorePath = "cockroach-data"
    58  	// TempDirPrefix is the filename prefix of any temporary subdirectory
    59  	// created.
    60  	TempDirPrefix = "cockroach-temp"
    61  	// TempDirsRecordFilename is the filename for the record file
    62  	// that keeps track of the paths of the temporary directories created.
    63  	TempDirsRecordFilename = "temp-dirs-record.txt"
    64  	defaultEventLogEnabled = true
    65  
    66  	maximumMaxClockOffset = 5 * time.Second
    67  
    68  	minimumNetworkFileDescriptors     = 256
    69  	recommendedNetworkFileDescriptors = 5000
    70  
    71  	defaultSQLTableStatCacheSize = 256
    72  
    73  	// This comes out to 1024 cache entries.
    74  	defaultSQLQueryCacheSize = 8 * 1024 * 1024
    75  )
    76  
    77  var productionSettingsWebpage = fmt.Sprintf(
    78  	"please see %s for more details",
    79  	base.DocsURL("recommended-production-settings.html"),
    80  )
    81  
    82  // MaxOffsetType stores the configured MaxOffset.
    83  type MaxOffsetType time.Duration
    84  
    85  // Type implements the pflag.Value interface.
    86  func (mo *MaxOffsetType) Type() string {
    87  	return "MaxOffset"
    88  }
    89  
    90  // Set implements the pflag.Value interface.
    91  func (mo *MaxOffsetType) Set(v string) error {
    92  	nanos, err := time.ParseDuration(v)
    93  	if err != nil {
    94  		return err
    95  	}
    96  	if nanos > maximumMaxClockOffset {
    97  		return errors.Errorf("%s is not a valid max offset, must be less than %v.", v, maximumMaxClockOffset)
    98  	}
    99  	*mo = MaxOffsetType(nanos)
   100  	return nil
   101  }
   102  
   103  // String implements the pflag.Value interface.
   104  func (mo *MaxOffsetType) String() string {
   105  	return time.Duration(*mo).String()
   106  }
   107  
   108  // BaseConfig holds parameters that are needed to setup either a KV or a SQL
   109  // server.
   110  type BaseConfig struct {
   111  	Settings *cluster.Settings
   112  	*base.Config
   113  
   114  	// AmbientCtx is used to annotate contexts used inside the server.
   115  	AmbientCtx log.AmbientContext
   116  
   117  	// Maximum allowed clock offset for the cluster. If observed clock
   118  	// offsets exceed this limit, inconsistency may result, and servers
   119  	// will panic to minimize the likelihood of inconsistent data.
   120  	// Increasing this value will increase time to recovery after
   121  	// failures, and increase the frequency and impact of
   122  	// ReadWithinUncertaintyIntervalError.
   123  	MaxOffset MaxOffsetType
   124  
   125  	// DefaultZoneConfig is used to set the default zone config inside the server.
   126  	// It can be overridden during tests by setting the DefaultZoneConfigOverride
   127  	// server testing knob.
   128  	DefaultZoneConfig zonepb.ZoneConfig
   129  
   130  	// Locality is a description of the topography of the server.
   131  	Locality roachpb.Locality
   132  
   133  	// StorageEngine specifies the engine type (eg. rocksdb, pebble) to use to
   134  	// instantiate stores.
   135  	StorageEngine enginepb.EngineType
   136  
   137  	// TestingKnobs is used for internal test controls only.
   138  	TestingKnobs base.TestingKnobs
   139  }
   140  
   141  // MakeBaseConfig returns a BaseConfig with default values.
   142  func MakeBaseConfig(st *cluster.Settings) BaseConfig {
   143  	baseCfg := BaseConfig{
   144  		AmbientCtx:        log.AmbientContext{Tracer: st.Tracer},
   145  		Config:            new(base.Config),
   146  		Settings:          st,
   147  		MaxOffset:         MaxOffsetType(base.DefaultMaxClockOffset),
   148  		DefaultZoneConfig: zonepb.DefaultZoneConfig(),
   149  		StorageEngine:     storage.DefaultStorageEngine,
   150  	}
   151  	baseCfg.InitDefaults()
   152  	return baseCfg
   153  }
   154  
   155  // Config holds the parameters needed to set up a combined KV and SQL server.
   156  type Config struct {
   157  	BaseConfig
   158  	KVConfig
   159  	SQLConfig
   160  }
   161  
   162  // KVConfig holds the parameters that (together with a BaseConfig) allow setting
   163  // up a KV server.
   164  type KVConfig struct {
   165  	base.RaftConfig
   166  
   167  	// Stores is specified to enable durable key-value storage.
   168  	Stores base.StoreSpecList
   169  
   170  	// Attrs specifies a colon-separated list of node topography or machine
   171  	// capabilities, used to match capabilities or location preferences specified
   172  	// in zone configs.
   173  	Attrs string
   174  
   175  	// JoinList is a list of node addresses that act as bootstrap hosts for
   176  	// connecting to the gossip network.
   177  	JoinList base.JoinListType
   178  
   179  	// JoinPreferSRVRecords, if set, causes the lookup logic for the
   180  	// names in JoinList to prefer SRV records from DNS, if available,
   181  	// to A/AAAA records.
   182  	JoinPreferSRVRecords bool
   183  
   184  	// RetryOptions controls the retry behavior of the server.
   185  	//
   186  	// TODO(tbg): this is only ever used in one test. Make it a testing knob.
   187  	RetryOptions retry.Options
   188  
   189  	// CacheSize is the amount of memory in bytes to use for caching data.
   190  	// The value is split evenly between the stores if there are more than one.
   191  	CacheSize int64
   192  
   193  	// TimeSeriesServerConfig contains configuration specific to the time series
   194  	// server.
   195  	TimeSeriesServerConfig ts.ServerConfig
   196  
   197  	// GoroutineDumpDirName is the directory name for goroutine dumps using
   198  	// goroutinedumper.
   199  	GoroutineDumpDirName string
   200  
   201  	// HeapProfileDirName is the directory name for heap profiles using
   202  	// heapprofiler. If empty, no heap profiles will be collected.
   203  	HeapProfileDirName string
   204  
   205  	// Parsed values.
   206  
   207  	// NodeAttributes is the parsed representation of Attrs.
   208  	NodeAttributes roachpb.Attributes
   209  
   210  	// GossipBootstrapResolvers is a list of gossip resolvers used
   211  	// to find bootstrap nodes for connecting to the gossip network.
   212  	GossipBootstrapResolvers []resolver.Resolver
   213  
   214  	// The following values can only be set via environment variables and are
   215  	// for testing only. They are not meant to be set by the end user.
   216  
   217  	// Enables linearizable behavior of operations on this node by making sure
   218  	// that no commit timestamp is reported back to the client until all other
   219  	// node clocks have necessarily passed it.
   220  	// Environment Variable: COCKROACH_EXPERIMENTAL_LINEARIZABLE
   221  	Linearizable bool
   222  
   223  	// ScanInterval determines a duration during which each range should be
   224  	// visited approximately once by the range scanner. Set to 0 to disable.
   225  	// Environment Variable: COCKROACH_SCAN_INTERVAL
   226  	ScanInterval time.Duration
   227  
   228  	// ScanMinIdleTime is the minimum time the scanner will be idle between ranges.
   229  	// If enabled (> 0), the scanner may complete in more than ScanInterval for large
   230  	// stores.
   231  	// Environment Variable: COCKROACH_SCAN_MIN_IDLE_TIME
   232  	ScanMinIdleTime time.Duration
   233  
   234  	// ScanMaxIdleTime is the maximum time the scanner will be idle between ranges.
   235  	// If enabled (> 0), the scanner may complete in less than ScanInterval for small
   236  	// stores.
   237  	// Environment Variable: COCKROACH_SCAN_MAX_IDLE_TIME
   238  	ScanMaxIdleTime time.Duration
   239  
   240  	// DefaultSystemZoneConfig is used to set the default system zone config
   241  	// inside the server. It can be overridden during tests by setting the
   242  	// DefaultSystemZoneConfigOverride server testing knob.
   243  	DefaultSystemZoneConfig zonepb.ZoneConfig
   244  
   245  	// LocalityAddresses contains private IP addresses the can only be accessed
   246  	// in the corresponding locality.
   247  	LocalityAddresses []roachpb.LocalityAddress
   248  
   249  	// EventLogEnabled is a switch which enables recording into cockroach's SQL
   250  	// event log tables. These tables record transactional events about changes
   251  	// to cluster metadata, such as DDL statements and range rebalancing
   252  	// actions.
   253  	EventLogEnabled bool
   254  
   255  	// ReadyFn is called when the server has started listening on its
   256  	// sockets.
   257  	//
   258  	// The bool parameter is true if the server is not bootstrapped yet, will not
   259  	// bootstrap itself and will be waiting for an `init` command or accept
   260  	// bootstrapping from a joined node.
   261  	//
   262  	// This method is invoked from the main start goroutine, so it should not
   263  	// do nontrivial work.
   264  	ReadyFn func(waitForInit bool)
   265  
   266  	// DelayedBootstrapFn is called if the boostrap process does not complete
   267  	// in a timely fashion, typically 30s after the server starts listening.
   268  	DelayedBootstrapFn func()
   269  
   270  	// EnableWebSessionAuthentication enables session-based authentication for
   271  	// the Admin API's HTTP endpoints.
   272  	EnableWebSessionAuthentication bool
   273  
   274  	enginesCreated bool
   275  }
   276  
   277  // MakeKVConfig returns a KVConfig with default values.
   278  func MakeKVConfig(storeSpec base.StoreSpec) KVConfig {
   279  	disableWebLogin := envutil.EnvOrDefaultBool("COCKROACH_DISABLE_WEB_LOGIN", false)
   280  	kvCfg := KVConfig{
   281  		DefaultSystemZoneConfig:        zonepb.DefaultSystemZoneConfig(),
   282  		CacheSize:                      DefaultCacheSize,
   283  		ScanInterval:                   defaultScanInterval,
   284  		ScanMinIdleTime:                defaultScanMinIdleTime,
   285  		ScanMaxIdleTime:                defaultScanMaxIdleTime,
   286  		EventLogEnabled:                defaultEventLogEnabled,
   287  		EnableWebSessionAuthentication: !disableWebLogin,
   288  		Stores: base.StoreSpecList{
   289  			Specs: []base.StoreSpec{storeSpec},
   290  		},
   291  	}
   292  	kvCfg.RaftConfig.SetDefaults()
   293  	return kvCfg
   294  }
   295  
   296  // SQLConfig holds the parameters that (together with a BaseConfig) allow
   297  // setting up a SQL server.
   298  type SQLConfig struct {
   299  	// The tenant that the SQL server runs on the behalf of.
   300  	TenantID roachpb.TenantID
   301  
   302  	// LeaseManagerConfig holds configuration values specific to the LeaseManager.
   303  	LeaseManagerConfig *base.LeaseManagerConfig
   304  
   305  	// SocketFile, if non-empty, sets up a TLS-free local listener using
   306  	// a unix datagram socket at the specified path.
   307  	SocketFile string
   308  
   309  	// TempStorageConfig is used to configure temp storage, which stores
   310  	// ephemeral data when processing large queries.
   311  	TempStorageConfig base.TempStorageConfig
   312  
   313  	// ExternalIODirConfig is used to configure external storage
   314  	// access (http://, nodelocal://, etc)
   315  	ExternalIODirConfig base.ExternalIODirConfig
   316  
   317  	// MemoryPoolSize is the amount of memory in bytes that can be
   318  	// used by SQL clients to store row data in server RAM.
   319  	MemoryPoolSize int64
   320  
   321  	// AuditLogDirName is the target directory name for SQL audit logs.
   322  	AuditLogDirName *log.DirName
   323  
   324  	// TableStatCacheSize is the size (number of tables) of the table
   325  	// statistics cache.
   326  	TableStatCacheSize int
   327  
   328  	// QueryCacheSize is the memory size (in bytes) of the query plan cache.
   329  	QueryCacheSize int64
   330  }
   331  
   332  // MakeSQLConfig returns a SQLConfig with default values.
   333  func MakeSQLConfig(tenID roachpb.TenantID, tempStorageCfg base.TempStorageConfig) SQLConfig {
   334  	sqlCfg := SQLConfig{
   335  		TenantID:           tenID,
   336  		MemoryPoolSize:     defaultSQLMemoryPoolSize,
   337  		TableStatCacheSize: defaultSQLTableStatCacheSize,
   338  		QueryCacheSize:     defaultSQLQueryCacheSize,
   339  		TempStorageConfig:  tempStorageCfg,
   340  		LeaseManagerConfig: base.NewLeaseManagerConfig(),
   341  	}
   342  	return sqlCfg
   343  }
   344  
   345  // setOpenFileLimit sets the soft limit for open file descriptors to the hard
   346  // limit if needed. Returns an error if the hard limit is too low. Returns the
   347  // value to set maxOpenFiles to for each store.
   348  //
   349  // Minimum - 1700 per store, 256 saved for networking
   350  //
   351  // Constrained - 256 saved for networking, rest divided evenly per store
   352  //
   353  // Constrained (network only) - 10000 per store, rest saved for networking
   354  //
   355  // Recommended - 10000 per store, 5000 for network
   356  //
   357  // Please note that current and max limits are commonly referred to as the soft
   358  // and hard limits respectively.
   359  //
   360  // On Windows there is no need to change the file descriptor, known as handles,
   361  // limit. This limit cannot be changed and is approximately 16,711,680. See
   362  // https://blogs.technet.microsoft.com/markrussinovich/2009/09/29/pushing-the-limits-of-windows-handles/
   363  func setOpenFileLimit(physicalStoreCount int) (uint64, error) {
   364  	return setOpenFileLimitInner(physicalStoreCount)
   365  }
   366  
   367  // SetOpenFileLimitForOneStore sets the soft limit for open file descriptors
   368  // when there is only one store.
   369  func SetOpenFileLimitForOneStore() (uint64, error) {
   370  	return setOpenFileLimit(1)
   371  }
   372  
   373  // MakeConfig returns a Config for the system tenant with default values.
   374  func MakeConfig(ctx context.Context, st *cluster.Settings) Config {
   375  	storeSpec, err := base.NewStoreSpec(defaultStorePath)
   376  	if err != nil {
   377  		panic(err)
   378  	}
   379  	tempStorageCfg := base.TempStorageConfigFromEnv(
   380  		ctx, st, storeSpec, "" /* parentDir */, base.DefaultTempStorageMaxSizeBytes)
   381  
   382  	sqlCfg := MakeSQLConfig(roachpb.SystemTenantID, tempStorageCfg)
   383  	baseCfg := MakeBaseConfig(st)
   384  	kvCfg := MakeKVConfig(storeSpec)
   385  
   386  	cfg := Config{
   387  		BaseConfig: baseCfg,
   388  		KVConfig:   kvCfg,
   389  		SQLConfig:  sqlCfg,
   390  	}
   391  
   392  	return cfg
   393  }
   394  
   395  // String implements the fmt.Stringer interface.
   396  func (cfg *Config) String() string {
   397  	var buf bytes.Buffer
   398  
   399  	w := tabwriter.NewWriter(&buf, 2, 1, 2, ' ', 0)
   400  	fmt.Fprintln(w, "max offset\t", cfg.MaxOffset)
   401  	fmt.Fprintln(w, "cache size\t", humanizeutil.IBytes(cfg.CacheSize))
   402  	fmt.Fprintln(w, "SQL memory pool size\t", humanizeutil.IBytes(cfg.MemoryPoolSize))
   403  	fmt.Fprintln(w, "scan interval\t", cfg.ScanInterval)
   404  	fmt.Fprintln(w, "scan min idle time\t", cfg.ScanMinIdleTime)
   405  	fmt.Fprintln(w, "scan max idle time\t", cfg.ScanMaxIdleTime)
   406  	fmt.Fprintln(w, "event log enabled\t", cfg.EventLogEnabled)
   407  	if cfg.Linearizable {
   408  		fmt.Fprintln(w, "linearizable\t", cfg.Linearizable)
   409  	}
   410  	_ = w.Flush()
   411  
   412  	return buf.String()
   413  }
   414  
   415  // Report logs an overview of the server configuration parameters via
   416  // the given context.
   417  func (cfg *Config) Report(ctx context.Context) {
   418  	if memSize, err := status.GetTotalMemory(ctx); err != nil {
   419  		log.Infof(ctx, "unable to retrieve system total memory: %v", err)
   420  	} else {
   421  		log.Infof(ctx, "system total memory: %s", humanizeutil.IBytes(memSize))
   422  	}
   423  	log.Infof(ctx, "server configuration:\n%s", cfg)
   424  }
   425  
   426  // Engines is a container of engines, allowing convenient closing.
   427  type Engines []storage.Engine
   428  
   429  // Close closes all the Engines.
   430  // This method has a pointer receiver so that the following pattern works:
   431  //	func f() {
   432  //		engines := Engines(engineSlice)
   433  //		defer engines.Close()  // make sure the engines are Closed if this
   434  //		                       // function returns early.
   435  //		... do something with engines, pass ownership away...
   436  //		engines = nil  // neutralize the preceding defer
   437  //	}
   438  func (e *Engines) Close() {
   439  	for _, eng := range *e {
   440  		eng.Close()
   441  	}
   442  	*e = nil
   443  }
   444  
   445  // CreateEngines creates Engines based on the specs in cfg.Stores.
   446  func (cfg *Config) CreateEngines(ctx context.Context) (Engines, error) {
   447  	engines := Engines(nil)
   448  	defer engines.Close()
   449  
   450  	if cfg.enginesCreated {
   451  		return Engines{}, errors.Errorf("engines already created")
   452  	}
   453  	cfg.enginesCreated = true
   454  
   455  	var details []string
   456  
   457  	var cache storage.RocksDBCache
   458  	var pebbleCache *pebble.Cache
   459  	if cfg.StorageEngine == enginepb.EngineTypeDefault ||
   460  		cfg.StorageEngine == enginepb.EngineTypePebble || cfg.StorageEngine == enginepb.EngineTypeTeePebbleRocksDB {
   461  		details = append(details, fmt.Sprintf("Pebble cache size: %s", humanizeutil.IBytes(cfg.CacheSize)))
   462  		pebbleCache = pebble.NewCache(cfg.CacheSize)
   463  		defer pebbleCache.Unref()
   464  	}
   465  	if cfg.StorageEngine == enginepb.EngineTypeRocksDB || cfg.StorageEngine == enginepb.EngineTypeTeePebbleRocksDB {
   466  		details = append(details, fmt.Sprintf("RocksDB cache size: %s", humanizeutil.IBytes(cfg.CacheSize)))
   467  		cache = storage.NewRocksDBCache(cfg.CacheSize)
   468  		defer cache.Release()
   469  	}
   470  
   471  	var physicalStores int
   472  	for _, spec := range cfg.Stores.Specs {
   473  		if !spec.InMemory {
   474  			physicalStores++
   475  		}
   476  	}
   477  	openFileLimitPerStore, err := setOpenFileLimit(physicalStores)
   478  	if err != nil {
   479  		return Engines{}, err
   480  	}
   481  
   482  	log.Event(ctx, "initializing engines")
   483  
   484  	skipSizeCheck := cfg.TestingKnobs.Store != nil &&
   485  		cfg.TestingKnobs.Store.(*kvserver.StoreTestingKnobs).SkipMinSizeCheck
   486  	for i, spec := range cfg.Stores.Specs {
   487  		log.Eventf(ctx, "initializing %+v", spec)
   488  		var sizeInBytes = spec.Size.InBytes
   489  		if spec.InMemory {
   490  			if spec.Size.Percent > 0 {
   491  				sysMem, err := status.GetTotalMemory(ctx)
   492  				if err != nil {
   493  					return Engines{}, errors.Errorf("could not retrieve system memory")
   494  				}
   495  				sizeInBytes = int64(float64(sysMem) * spec.Size.Percent / 100)
   496  			}
   497  			if sizeInBytes != 0 && !skipSizeCheck && sizeInBytes < base.MinimumStoreSize {
   498  				return Engines{}, errors.Errorf("%f%% of memory is only %s bytes, which is below the minimum requirement of %s",
   499  					spec.Size.Percent, humanizeutil.IBytes(sizeInBytes), humanizeutil.IBytes(base.MinimumStoreSize))
   500  			}
   501  			details = append(details, fmt.Sprintf("store %d: in-memory, size %s",
   502  				i, humanizeutil.IBytes(sizeInBytes)))
   503  			if spec.StickyInMemoryEngineID != "" {
   504  				e, err := getOrCreateStickyInMemEngine(
   505  					ctx, spec.StickyInMemoryEngineID, cfg.StorageEngine, spec.Attributes, sizeInBytes,
   506  				)
   507  				if err != nil {
   508  					return Engines{}, err
   509  				}
   510  				engines = append(engines, e)
   511  			} else {
   512  				engines = append(engines, storage.NewInMem(ctx, cfg.StorageEngine, spec.Attributes, sizeInBytes))
   513  			}
   514  		} else {
   515  			if spec.Size.Percent > 0 {
   516  				fileSystemUsage := gosigar.FileSystemUsage{}
   517  				if err := fileSystemUsage.Get(spec.Path); err != nil {
   518  					return Engines{}, err
   519  				}
   520  				sizeInBytes = int64(float64(fileSystemUsage.Total) * spec.Size.Percent / 100)
   521  			}
   522  			if sizeInBytes != 0 && !skipSizeCheck && sizeInBytes < base.MinimumStoreSize {
   523  				return Engines{}, errors.Errorf("%f%% of %s's total free space is only %s bytes, which is below the minimum requirement of %s",
   524  					spec.Size.Percent, spec.Path, humanizeutil.IBytes(sizeInBytes), humanizeutil.IBytes(base.MinimumStoreSize))
   525  			}
   526  
   527  			details = append(details, fmt.Sprintf("store %d: RocksDB, max size %s, max open file limit %d",
   528  				i, humanizeutil.IBytes(sizeInBytes), openFileLimitPerStore))
   529  
   530  			var eng storage.Engine
   531  			var err error
   532  			storageConfig := base.StorageConfig{
   533  				Attrs:           spec.Attributes,
   534  				Dir:             spec.Path,
   535  				MaxSize:         sizeInBytes,
   536  				Settings:        cfg.Settings,
   537  				UseFileRegistry: spec.UseFileRegistry,
   538  				ExtraOptions:    spec.ExtraOptions,
   539  			}
   540  			if cfg.StorageEngine == enginepb.EngineTypePebble || cfg.StorageEngine == enginepb.EngineTypeDefault {
   541  				// TODO(itsbilal): Tune these options, and allow them to be overridden
   542  				// in the spec (similar to the existing spec.RocksDBOptions and others).
   543  				pebbleConfig := storage.PebbleConfig{
   544  					StorageConfig: storageConfig,
   545  					Opts:          storage.DefaultPebbleOptions(),
   546  				}
   547  				pebbleConfig.Opts.Cache = pebbleCache
   548  				pebbleConfig.Opts.MaxOpenFiles = int(openFileLimitPerStore)
   549  				eng, err = storage.NewPebble(ctx, pebbleConfig)
   550  			} else if cfg.StorageEngine == enginepb.EngineTypeRocksDB {
   551  				rocksDBConfig := storage.RocksDBConfig{
   552  					StorageConfig:           storageConfig,
   553  					MaxOpenFiles:            openFileLimitPerStore,
   554  					WarnLargeBatchThreshold: 500 * time.Millisecond,
   555  					RocksDBOptions:          spec.RocksDBOptions,
   556  				}
   557  
   558  				eng, err = storage.NewRocksDB(rocksDBConfig, cache)
   559  			} else {
   560  				// cfg.StorageEngine == enginepb.EngineTypeTeePebbleRocksDB
   561  				pebbleConfig := storage.PebbleConfig{
   562  					StorageConfig: storageConfig,
   563  					Opts:          storage.DefaultPebbleOptions(),
   564  				}
   565  				pebbleConfig.Dir = filepath.Join(pebbleConfig.Dir, "pebble")
   566  				pebbleConfig.Opts.Cache = pebbleCache
   567  				pebbleConfig.Opts.MaxOpenFiles = int(openFileLimitPerStore)
   568  				pebbleEng, err := storage.NewPebble(ctx, pebbleConfig)
   569  				if err != nil {
   570  					return nil, err
   571  				}
   572  
   573  				rocksDBConfig := storage.RocksDBConfig{
   574  					StorageConfig:           storageConfig,
   575  					MaxOpenFiles:            openFileLimitPerStore,
   576  					WarnLargeBatchThreshold: 500 * time.Millisecond,
   577  					RocksDBOptions:          spec.RocksDBOptions,
   578  				}
   579  				rocksDBConfig.Dir = filepath.Join(rocksDBConfig.Dir, "rocksdb")
   580  
   581  				rocksdbEng, err := storage.NewRocksDB(rocksDBConfig, cache)
   582  				if err != nil {
   583  					return nil, err
   584  				}
   585  
   586  				eng = storage.NewTee(ctx, rocksdbEng, pebbleEng)
   587  			}
   588  			if err != nil {
   589  				return Engines{}, err
   590  			}
   591  			engines = append(engines, eng)
   592  		}
   593  	}
   594  
   595  	log.Infof(ctx, "%d storage engine%s initialized",
   596  		len(engines), util.Pluralize(int64(len(engines))))
   597  	for _, s := range details {
   598  		log.Infof(ctx, "%v", s)
   599  	}
   600  	enginesCopy := engines
   601  	engines = nil
   602  	return enginesCopy, nil
   603  }
   604  
   605  // InitNode parses node attributes and initializes the gossip bootstrap
   606  // resolvers.
   607  func (cfg *Config) InitNode(ctx context.Context) error {
   608  	cfg.readEnvironmentVariables()
   609  
   610  	// Initialize attributes.
   611  	cfg.NodeAttributes = parseAttributes(cfg.Attrs)
   612  
   613  	// Get the gossip bootstrap resolvers.
   614  	resolvers, err := cfg.parseGossipBootstrapResolvers(ctx)
   615  	if err != nil {
   616  		return err
   617  	}
   618  	if len(resolvers) > 0 {
   619  		cfg.GossipBootstrapResolvers = resolvers
   620  	}
   621  
   622  	return nil
   623  }
   624  
   625  // FilterGossipBootstrapResolvers removes any gossip bootstrap resolvers which
   626  // match either this node's listen address or its advertised host address.
   627  func (cfg *Config) FilterGossipBootstrapResolvers(
   628  	ctx context.Context, listen, advert net.Addr,
   629  ) []resolver.Resolver {
   630  	filtered := make([]resolver.Resolver, 0, len(cfg.GossipBootstrapResolvers))
   631  	addrs := make([]string, 0, len(cfg.GossipBootstrapResolvers))
   632  	for _, r := range cfg.GossipBootstrapResolvers {
   633  		if r.Addr() == advert.String() || r.Addr() == listen.String() {
   634  			if log.V(1) {
   635  				log.Infof(ctx, "skipping -join address %q, because a node cannot join itself", r.Addr())
   636  			}
   637  		} else {
   638  			filtered = append(filtered, r)
   639  			addrs = append(addrs, r.Addr())
   640  		}
   641  	}
   642  	if log.V(1) {
   643  		log.Infof(ctx, "initial resolvers: %v", addrs)
   644  	}
   645  	return filtered
   646  }
   647  
   648  // RequireWebSession indicates whether the server should require authentication
   649  // sessions when serving admin API requests.
   650  func (cfg *Config) RequireWebSession() bool {
   651  	return !cfg.Insecure && cfg.EnableWebSessionAuthentication
   652  }
   653  
   654  // readEnvironmentVariables populates all context values that are environment
   655  // variable based. Note that this only happens when initializing a node and not
   656  // when NewContext is called.
   657  func (cfg *Config) readEnvironmentVariables() {
   658  	cfg.Linearizable = envutil.EnvOrDefaultBool("COCKROACH_EXPERIMENTAL_LINEARIZABLE", cfg.Linearizable)
   659  	cfg.ScanInterval = envutil.EnvOrDefaultDuration("COCKROACH_SCAN_INTERVAL", cfg.ScanInterval)
   660  	cfg.ScanMinIdleTime = envutil.EnvOrDefaultDuration("COCKROACH_SCAN_MIN_IDLE_TIME", cfg.ScanMinIdleTime)
   661  	cfg.ScanMaxIdleTime = envutil.EnvOrDefaultDuration("COCKROACH_SCAN_MAX_IDLE_TIME", cfg.ScanMaxIdleTime)
   662  }
   663  
   664  // parseGossipBootstrapResolvers parses list of gossip bootstrap resolvers.
   665  func (cfg *Config) parseGossipBootstrapResolvers(ctx context.Context) ([]resolver.Resolver, error) {
   666  	var bootstrapResolvers []resolver.Resolver
   667  	for _, address := range cfg.JoinList {
   668  		if cfg.JoinPreferSRVRecords {
   669  			// The following code substitutes the entry in --join by the
   670  			// result of SRV resolution, if suitable SRV records are found
   671  			// for that name.
   672  			//
   673  			// TODO(knz): Delay this lookup. The logic for "regular" resolvers
   674  			// is delayed until the point the connection is attempted, so that
   675  			// fresh DNS records are used for a new connection. This makes
   676  			// it possible to update DNS records without restarting the node.
   677  			// The SRV logic here does not have this property (yet).
   678  			srvAddrs, err := resolver.SRV(ctx, address)
   679  			if err != nil {
   680  				return nil, err
   681  			}
   682  
   683  			if len(srvAddrs) > 0 {
   684  				for _, sa := range srvAddrs {
   685  					resolver, err := resolver.NewResolver(sa)
   686  					if err != nil {
   687  						return nil, err
   688  					}
   689  					bootstrapResolvers = append(bootstrapResolvers, resolver)
   690  				}
   691  
   692  				continue
   693  			}
   694  		}
   695  
   696  		// Otherwise, use the address.
   697  		resolver, err := resolver.NewResolver(address)
   698  		if err != nil {
   699  			return nil, err
   700  		}
   701  		bootstrapResolvers = append(bootstrapResolvers, resolver)
   702  	}
   703  
   704  	return bootstrapResolvers, nil
   705  }
   706  
   707  // parseAttributes parses a colon-separated list of strings,
   708  // filtering empty strings (i.e. "::" will yield no attributes.
   709  // Returns the list of strings as Attributes.
   710  func parseAttributes(attrsStr string) roachpb.Attributes {
   711  	var filtered []string
   712  	for _, attr := range strings.Split(attrsStr, ":") {
   713  		if len(attr) != 0 {
   714  			filtered = append(filtered, attr)
   715  		}
   716  	}
   717  	return roachpb.Attributes{Attrs: filtered}
   718  }