github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/dbnode/client/config.go (about) 1 // Copyright (c) 2017 Uber Technologies, Inc. 2 // 3 // Permission is hereby granted, free of charge, to any person obtaining a copy 4 // of this software and associated documentation files (the "Software"), to deal 5 // in the Software without restriction, including without limitation the rights 6 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 // copies of the Software, and to permit persons to whom the Software is 8 // furnished to do so, subject to the following conditions: 9 // 10 // The above copyright notice and this permission notice shall be included in 11 // all copies or substantial portions of the Software. 12 // 13 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 // THE SOFTWARE. 20 21 package client 22 23 import ( 24 "errors" 25 "fmt" 26 "time" 27 28 "github.com/m3db/m3/src/dbnode/encoding" 29 "github.com/m3db/m3/src/dbnode/encoding/m3tsz" 30 "github.com/m3db/m3/src/dbnode/environment" 31 "github.com/m3db/m3/src/dbnode/namespace" 32 "github.com/m3db/m3/src/dbnode/topology" 33 "github.com/m3db/m3/src/x/clock" 34 xerrors "github.com/m3db/m3/src/x/errors" 35 "github.com/m3db/m3/src/x/ident" 36 "github.com/m3db/m3/src/x/instrument" 37 "github.com/m3db/m3/src/x/retry" 38 "github.com/m3db/m3/src/x/sampler" 39 xsync "github.com/m3db/m3/src/x/sync" 40 ) 41 42 const ( 43 asyncWriteWorkerPoolDefaultSize = 128 44 ) 45 46 // Configuration is a configuration that can be used to construct a client. 47 type Configuration struct { 48 // The environment (static or dynamic) configuration. 49 EnvironmentConfig *environment.Configuration `yaml:"config"` 50 51 // WriteConsistencyLevel specifies the write consistency level. 52 WriteConsistencyLevel *topology.ConsistencyLevel `yaml:"writeConsistencyLevel"` 53 54 // ReadConsistencyLevel specifies the read consistency level. 55 ReadConsistencyLevel *topology.ReadConsistencyLevel `yaml:"readConsistencyLevel"` 56 57 // ConnectConsistencyLevel specifies the cluster connect consistency level. 58 ConnectConsistencyLevel *topology.ConnectConsistencyLevel `yaml:"connectConsistencyLevel"` 59 60 // WriteTimeout is the write request timeout. 61 WriteTimeout *time.Duration `yaml:"writeTimeout"` 62 63 // FetchTimeout is the fetch request timeout. 64 FetchTimeout *time.Duration `yaml:"fetchTimeout"` 65 66 // ConnectTimeout is the cluster connect timeout. 67 ConnectTimeout *time.Duration `yaml:"connectTimeout"` 68 69 // WriteRetry is the write retry config. 70 WriteRetry *retry.Configuration `yaml:"writeRetry"` 71 72 // FetchRetry is the fetch retry config. 73 FetchRetry *retry.Configuration `yaml:"fetchRetry"` 74 75 // LogErrorSampleRate is the log error sample rate. 76 LogErrorSampleRate sampler.Rate `yaml:"logErrorSampleRate"` 77 78 // LogHostWriteErrorSampleRate is the log write error per host sample rate. 79 LogHostWriteErrorSampleRate sampler.Rate `yaml:"logHostWriteErrorSampleRate"` 80 81 // LogHostFetchErrorSampleRate is the log fetch error per host sample rate. 82 LogHostFetchErrorSampleRate sampler.Rate `yaml:"logHostFetchErrorSampleRate"` 83 84 // BackgroundHealthCheckFailLimit is the amount of times a background check 85 // must fail before a connection is taken out of consideration. 86 BackgroundHealthCheckFailLimit *int `yaml:"backgroundHealthCheckFailLimit"` 87 88 // BackgroundHealthCheckFailThrottleFactor is the factor of the host connect 89 // time to use when sleeping between a failed health check and the next check. 90 BackgroundHealthCheckFailThrottleFactor *float64 `yaml:"backgroundHealthCheckFailThrottleFactor"` 91 92 // HashingConfiguration is the configuration for hashing of IDs to shards. 93 HashingConfiguration *HashingConfiguration `yaml:"hashing"` 94 95 // Proto contains the configuration specific to running in the ProtoDataMode. 96 Proto *ProtoConfiguration `yaml:"proto"` 97 98 // AsyncWriteWorkerPoolSize is the worker pool size for async write requests. 99 AsyncWriteWorkerPoolSize *int `yaml:"asyncWriteWorkerPoolSize"` 100 101 // AsyncWriteMaxConcurrency is the maximum concurrency for async write requests. 102 AsyncWriteMaxConcurrency *int `yaml:"asyncWriteMaxConcurrency"` 103 104 // UseV2BatchAPIs determines whether the V2 batch APIs are used. Note that the M3DB nodes must 105 // have support for the V2 APIs in order for this feature to be used. 106 UseV2BatchAPIs *bool `yaml:"useV2BatchAPIs"` 107 108 // WriteTimestampOffset offsets all writes by specified duration into the past. 109 WriteTimestampOffset *time.Duration `yaml:"writeTimestampOffset"` 110 111 // FetchSeriesBlocksBatchConcurrency sets the number of batches of blocks to retrieve 112 // in parallel from a remote peer. Defaults to NumCPU / 2. 113 FetchSeriesBlocksBatchConcurrency *int `yaml:"fetchSeriesBlocksBatchConcurrency"` 114 115 // FetchSeriesBlocksBatchSize sets the number of blocks to retrieve in a single batch 116 // from the remote peer. Defaults to 4096. 117 FetchSeriesBlocksBatchSize *int `yaml:"fetchSeriesBlocksBatchSize"` 118 119 // WriteShardsInitializing sets whether or not writes to leaving shards 120 // count towards consistency, by default they do not. 121 WriteShardsInitializing *bool `yaml:"writeShardsInitializing"` 122 123 // ShardsLeavingCountTowardsConsistency sets whether or not writes to leaving shards 124 // count towards consistency, by default they do not. 125 ShardsLeavingCountTowardsConsistency *bool `yaml:"shardsLeavingCountTowardsConsistency"` 126 127 // ShardsLeavingAndInitializingCountTowardsConsistency sets whether or not writes to leaving and initializing shards 128 // count towards consistency, by default they do not. 129 ShardsLeavingAndInitializingCountTowardsConsistency *bool `yaml:"shardsLeavingAndInitializingCountTowardsConsistency"` 130 131 // IterateEqualTimestampStrategy specifies the iterate equal timestamp strategy. 132 IterateEqualTimestampStrategy *encoding.IterateEqualTimestampStrategy `yaml:"iterateEqualTimestampStrategy"` 133 } 134 135 // ProtoConfiguration is the configuration for running with ProtoDataMode enabled. 136 type ProtoConfiguration struct { 137 // Enabled specifies whether proto is enabled. 138 Enabled bool `yaml:"enabled"` 139 // load user schema from client configuration into schema registry 140 // at startup/initialization time. 141 SchemaRegistry map[string]NamespaceProtoSchema `yaml:"schema_registry"` 142 } 143 144 // NamespaceProtoSchema is the protobuf schema for a namespace. 145 type NamespaceProtoSchema struct { 146 MessageName string `yaml:"messageName"` 147 SchemaDeployID string `yaml:"schemaDeployID"` 148 SchemaFilePath string `yaml:"schemaFilePath"` 149 } 150 151 // Validate validates the NamespaceProtoSchema. 152 func (c NamespaceProtoSchema) Validate() error { 153 if c.SchemaFilePath == "" { 154 return errors.New("schemaFilePath is required for Proto data mode") 155 } 156 157 if c.MessageName == "" { 158 return errors.New("messageName is required for Proto data mode") 159 } 160 161 return nil 162 } 163 164 // Validate validates the ProtoConfiguration. 165 func (c *ProtoConfiguration) Validate() error { 166 if c == nil || !c.Enabled { 167 return nil 168 } 169 170 for _, schema := range c.SchemaRegistry { 171 if err := schema.Validate(); err != nil { 172 return err 173 } 174 } 175 return nil 176 } 177 178 // Validate validates the configuration. 179 func (c *Configuration) Validate() error { 180 if c.WriteTimeout != nil && *c.WriteTimeout < 0 { 181 return fmt.Errorf("m3db client writeTimeout was: %d but must be >= 0", *c.WriteTimeout) 182 } 183 184 if c.FetchTimeout != nil && *c.FetchTimeout < 0 { 185 return fmt.Errorf("m3db client fetchTimeout was: %d but must be >= 0", *c.FetchTimeout) 186 } 187 188 if c.ConnectTimeout != nil && *c.ConnectTimeout < 0 { 189 return fmt.Errorf("m3db client connectTimeout was: %d but must be >= 0", *c.ConnectTimeout) 190 } 191 192 if err := c.LogErrorSampleRate.Validate(); err != nil { 193 return fmt.Errorf("m3db client error validating log error sample rate: %v", err) 194 } 195 196 if c.BackgroundHealthCheckFailLimit != nil && 197 (*c.BackgroundHealthCheckFailLimit < 0 || *c.BackgroundHealthCheckFailLimit > 10) { 198 return fmt.Errorf( 199 "m3db client backgroundHealthCheckFailLimit was: %d but must be >= 0 and <=10", 200 *c.BackgroundHealthCheckFailLimit) 201 } 202 203 if c.BackgroundHealthCheckFailThrottleFactor != nil && 204 (*c.BackgroundHealthCheckFailThrottleFactor < 0 || *c.BackgroundHealthCheckFailThrottleFactor > 10) { 205 return fmt.Errorf( 206 "m3db client backgroundHealthCheckFailThrottleFactor was: %f but must be >= 0 and <=10", 207 *c.BackgroundHealthCheckFailThrottleFactor) 208 } 209 210 if c.AsyncWriteWorkerPoolSize != nil && *c.AsyncWriteWorkerPoolSize <= 0 { 211 return fmt.Errorf("m3db client async write worker pool size was: %d but must be >0", 212 *c.AsyncWriteWorkerPoolSize) 213 } 214 215 if c.AsyncWriteMaxConcurrency != nil && *c.AsyncWriteMaxConcurrency <= 0 { 216 return fmt.Errorf("m3db client async write max concurrency was: %d but must be >0", 217 *c.AsyncWriteMaxConcurrency) 218 } 219 220 if c.ShardsLeavingCountTowardsConsistency != nil && c.ShardsLeavingAndInitializingCountTowardsConsistency != nil && 221 *c.ShardsLeavingCountTowardsConsistency && *c.ShardsLeavingAndInitializingCountTowardsConsistency { 222 return fmt.Errorf("m3db client cannot have both shardsLeavingCountTowardsConsistency and " + 223 "shardsLeavingAndInitializingCountTowardsConsistency as true") 224 } 225 226 if err := c.Proto.Validate(); err != nil { 227 return fmt.Errorf("error validating M3DB client proto configuration: %v", err) 228 } 229 230 return nil 231 } 232 233 // HashingConfiguration is the configuration for hashing 234 type HashingConfiguration struct { 235 // Murmur32 seed value 236 Seed uint32 `yaml:"seed"` 237 } 238 239 // ConfigurationParameters are optional parameters that can be specified 240 // when creating a client from configuration, this is specified using 241 // a struct so that adding fields do not cause breaking changes to callers. 242 type ConfigurationParameters struct { 243 // InstrumentOptions is a required argument when 244 // constructing a client from configuration. 245 InstrumentOptions instrument.Options 246 247 // ClockOptions is an optional argument when 248 // constructing a client from configuration. 249 ClockOptions clock.Options 250 251 // TopologyInitializer is an optional argument when 252 // constructing a client from configuration. 253 TopologyInitializer topology.Initializer 254 255 // EncodingOptions is an optional argument when 256 // constructing a client from configuration. 257 EncodingOptions encoding.Options 258 } 259 260 // CustomOption is a programatic method for setting a client 261 // option after all the options have been set by configuration. 262 type CustomOption func(v Options) Options 263 264 // CustomAdminOption is a programatic method for setting a client 265 // admin option after all the options have been set by configuration. 266 type CustomAdminOption func(v AdminOptions) AdminOptions 267 268 // NewClient creates a new M3DB client using 269 // specified params and custom options. 270 func (c Configuration) NewClient( 271 params ConfigurationParameters, 272 custom ...CustomOption, 273 ) (Client, error) { 274 customAdmin := make([]CustomAdminOption, 0, len(custom)) 275 for _, opt := range custom { 276 customAdmin = append(customAdmin, func(v AdminOptions) AdminOptions { 277 return opt(Options(v)).(AdminOptions) 278 }) 279 } 280 281 v, err := c.NewAdminClient(params, customAdmin...) 282 if err != nil { 283 return nil, err 284 } 285 286 return v, err 287 } 288 289 // NewAdminClient creates a new M3DB admin client using 290 // specified params and custom options. 291 func (c Configuration) NewAdminClient( 292 params ConfigurationParameters, 293 custom ...CustomAdminOption, 294 ) (AdminClient, error) { 295 err := c.Validate() 296 if err != nil { 297 return nil, err 298 } 299 300 iopts := params.InstrumentOptions 301 if iopts == nil { 302 iopts = instrument.NewOptions() 303 } 304 writeRequestScope := iopts.MetricsScope().SubScope("write-req") 305 fetchRequestScope := iopts.MetricsScope().SubScope("fetch-req") 306 307 cfgParams := environment.ConfigurationParameters{ 308 InstrumentOpts: iopts, 309 AllowEmptyInitialNamespaceRegistry: true, 310 } 311 if c.HashingConfiguration != nil { 312 cfgParams.HashingSeed = c.HashingConfiguration.Seed 313 } 314 315 var ( 316 syncTopoInit = params.TopologyInitializer 317 syncClientOverrides environment.ClientOverrides 318 syncNsInit namespace.Initializer 319 asyncTopoInits = []topology.Initializer{} 320 asyncClientOverrides = []environment.ClientOverrides{} 321 ) 322 323 var buildAsyncPool bool 324 if syncTopoInit == nil { 325 envCfgs, err := c.EnvironmentConfig.Configure(cfgParams) 326 if err != nil { 327 err = fmt.Errorf("unable to create topology initializer, err: %v", err) 328 return nil, err 329 } 330 331 for _, envCfg := range envCfgs { 332 if envCfg.Async { 333 asyncTopoInits = append(asyncTopoInits, envCfg.TopologyInitializer) 334 asyncClientOverrides = append(asyncClientOverrides, envCfg.ClientOverrides) 335 buildAsyncPool = true 336 } else { 337 syncTopoInit = envCfg.TopologyInitializer 338 syncClientOverrides = envCfg.ClientOverrides 339 syncNsInit = envCfg.NamespaceInitializer 340 } 341 } 342 } 343 344 v := NewAdminOptions(). 345 SetTopologyInitializer(syncTopoInit). 346 SetNamespaceInitializer(syncNsInit). 347 SetAsyncTopologyInitializers(asyncTopoInits). 348 SetInstrumentOptions(iopts). 349 SetLogErrorSampleRate(c.LogErrorSampleRate). 350 SetLogHostWriteErrorSampleRate(c.LogHostWriteErrorSampleRate). 351 SetLogHostFetchErrorSampleRate(c.LogHostFetchErrorSampleRate) 352 353 if params.ClockOptions != nil { 354 v = v.SetClockOptions(params.ClockOptions) 355 } 356 357 if c.UseV2BatchAPIs != nil { 358 v = v.SetUseV2BatchAPIs(*c.UseV2BatchAPIs) 359 } 360 361 if buildAsyncPool { 362 var size int 363 if c.AsyncWriteWorkerPoolSize == nil { 364 size = asyncWriteWorkerPoolDefaultSize 365 } else { 366 size = *c.AsyncWriteWorkerPoolSize 367 } 368 369 workerPoolInstrumentOpts := iopts.SetMetricsScope(writeRequestScope.SubScope("workerpool")) 370 workerPoolOpts := xsync.NewPooledWorkerPoolOptions(). 371 SetGrowOnDemand(true). 372 SetInstrumentOptions(workerPoolInstrumentOpts) 373 workerPool, err := xsync.NewPooledWorkerPool(size, workerPoolOpts) 374 if err != nil { 375 return nil, fmt.Errorf("unable to create async worker pool: %v", err) 376 } 377 workerPool.Init() 378 v = v.SetAsyncWriteWorkerPool(workerPool) 379 } 380 381 if c.AsyncWriteMaxConcurrency != nil { 382 v = v.SetAsyncWriteMaxConcurrency(*c.AsyncWriteMaxConcurrency) 383 } 384 385 if c.WriteConsistencyLevel != nil { 386 v = v.SetWriteConsistencyLevel(*c.WriteConsistencyLevel) 387 } 388 if c.ReadConsistencyLevel != nil { 389 v = v.SetReadConsistencyLevel(*c.ReadConsistencyLevel) 390 } 391 if c.ConnectConsistencyLevel != nil { 392 v = v.SetClusterConnectConsistencyLevel(*c.ConnectConsistencyLevel) 393 } 394 if c.BackgroundHealthCheckFailLimit != nil { 395 v = v.SetBackgroundHealthCheckFailLimit(*c.BackgroundHealthCheckFailLimit) 396 } 397 if c.BackgroundHealthCheckFailThrottleFactor != nil { 398 v = v.SetBackgroundHealthCheckFailThrottleFactor(*c.BackgroundHealthCheckFailThrottleFactor) 399 } 400 if c.WriteTimeout != nil { 401 v = v.SetWriteRequestTimeout(*c.WriteTimeout) 402 } 403 if c.FetchTimeout != nil { 404 v = v.SetFetchRequestTimeout(*c.FetchTimeout) 405 } 406 if c.ConnectTimeout != nil { 407 v = v.SetClusterConnectTimeout(*c.ConnectTimeout) 408 } 409 if c.WriteRetry != nil { 410 v = v.SetWriteRetrier(c.WriteRetry.NewRetrier(writeRequestScope)) 411 } else { 412 // Have not set write retry explicitly, but would like metrics 413 // emitted for the write retrier with the scope for write requests. 414 retrierOpts := v.WriteRetrier().Options(). 415 SetMetricsScope(writeRequestScope) 416 v = v.SetWriteRetrier(retry.NewRetrier(retrierOpts)) 417 } 418 if c.FetchRetry != nil { 419 v = v.SetFetchRetrier(c.FetchRetry.NewRetrier(fetchRequestScope)) 420 } else { 421 // Have not set fetch retry explicitly, but would like metrics 422 // emitted for the fetch retrier with the scope for fetch requests. 423 retrierOpts := v.FetchRetrier().Options(). 424 SetMetricsScope(fetchRequestScope) 425 v = v.SetFetchRetrier(retry.NewRetrier(retrierOpts)) 426 } 427 if syncClientOverrides.TargetHostQueueFlushSize != nil { 428 v = v.SetHostQueueOpsFlushSize(*syncClientOverrides.TargetHostQueueFlushSize) 429 } 430 if syncClientOverrides.HostQueueFlushInterval != nil { 431 v = v.SetHostQueueOpsFlushInterval(*syncClientOverrides.HostQueueFlushInterval) 432 } 433 434 if c.IterateEqualTimestampStrategy != nil { 435 o := v.IterationOptions() 436 o.IterateEqualTimestampStrategy = *c.IterateEqualTimestampStrategy 437 v = v.SetIterationOptions(o) 438 } 439 440 encodingOpts := params.EncodingOptions 441 if encodingOpts == nil { 442 encodingOpts = encoding.NewOptions() 443 } 444 445 v = v.SetReaderIteratorAllocate(m3tsz.DefaultReaderIteratorAllocFn(encodingOpts)) 446 447 if c.Proto != nil && c.Proto.Enabled { 448 v = v.SetEncodingProto(encodingOpts) 449 schemaRegistry := namespace.NewSchemaRegistry(true, nil) 450 // Load schema registry from file. 451 deployID := "fromfile" 452 for nsID, protoConfig := range c.Proto.SchemaRegistry { 453 err = namespace.LoadSchemaRegistryFromFile(schemaRegistry, ident.StringID(nsID), deployID, protoConfig.SchemaFilePath, protoConfig.MessageName) 454 if err != nil { 455 return nil, xerrors.Wrapf(err, "could not load schema registry from file %s for namespace %s", protoConfig.SchemaFilePath, nsID) 456 } 457 } 458 v = v.SetSchemaRegistry(schemaRegistry) 459 } 460 461 if c.WriteShardsInitializing != nil { 462 v = v.SetWriteShardsInitializing(*c.WriteShardsInitializing) 463 } 464 if c.ShardsLeavingAndInitializingCountTowardsConsistency != nil { 465 v = v.SetShardsLeavingAndInitializingCountTowardsConsistency(*c.ShardsLeavingAndInitializingCountTowardsConsistency) 466 } 467 468 if c.ShardsLeavingCountTowardsConsistency != nil { 469 v = v.SetShardsLeavingCountTowardsConsistency(*c.ShardsLeavingCountTowardsConsistency) 470 } 471 472 // Cast to admin options to apply admin config options. 473 opts := v.(AdminOptions) 474 475 if c.WriteTimestampOffset != nil { 476 opts = opts.SetWriteTimestampOffset(*c.WriteTimestampOffset) 477 } 478 479 if c.FetchSeriesBlocksBatchConcurrency != nil { 480 opts = opts.SetFetchSeriesBlocksBatchConcurrency(*c.FetchSeriesBlocksBatchConcurrency) 481 } 482 if c.FetchSeriesBlocksBatchSize != nil { 483 opts = opts.SetFetchSeriesBlocksBatchSize(*c.FetchSeriesBlocksBatchSize) 484 } 485 486 // Apply programmatic custom options last. 487 for _, opt := range custom { 488 opts = opt(opts) 489 } 490 491 asyncClusterOpts := NewOptionsForAsyncClusters(opts, asyncTopoInits, asyncClientOverrides) 492 return NewAdminClient(opts, asyncClusterOpts...) 493 }