github.com/m3db/m3@v1.5.0/src/cmd/services/m3query/config/config.go (about) 1 // Copyright (c) 2018 Uber Technologies, Inc. 2 // 3 // Permission is hereby granted, free of charge, to any person obtaining a copy 4 // of this software and associated documentation files (the "Software"), to deal 5 // in the Software without restriction, including without limitation the rights 6 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 // copies of the Software, and to permit persons to whom the Software is 8 // furnished to do so, subject to the following conditions: 9 // 10 // The above copyright notice and this permission notice shall be included in 11 // all copies or substantial portions of the Software. 12 // 13 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 // THE SOFTWARE. 20 21 package config 22 23 import ( 24 "errors" 25 "math" 26 "time" 27 28 etcdclient "github.com/m3db/m3/src/cluster/client/etcd" 29 "github.com/m3db/m3/src/cluster/placement" 30 "github.com/m3db/m3/src/cmd/services/m3coordinator/downsample" 31 ingestm3msg "github.com/m3db/m3/src/cmd/services/m3coordinator/ingest/m3msg" 32 "github.com/m3db/m3/src/cmd/services/m3coordinator/server/m3msg" 33 "github.com/m3db/m3/src/metrics/aggregation" 34 "github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions" 35 "github.com/m3db/m3/src/query/graphite/graphite" 36 "github.com/m3db/m3/src/query/models" 37 "github.com/m3db/m3/src/query/storage" 38 "github.com/m3db/m3/src/query/storage/m3" 39 "github.com/m3db/m3/src/query/storage/m3/consolidators" 40 "github.com/m3db/m3/src/query/storage/m3/storagemetadata" 41 xconfig "github.com/m3db/m3/src/x/config" 42 "github.com/m3db/m3/src/x/debug/config" 43 "github.com/m3db/m3/src/x/instrument" 44 xlog "github.com/m3db/m3/src/x/log" 45 "github.com/m3db/m3/src/x/opentracing" 46 xtime "github.com/m3db/m3/src/x/time" 47 ) 48 49 // BackendStorageType is an enum for different backends. 50 type BackendStorageType string 51 52 const ( 53 // GRPCStorageType is for backends which only support grpc endpoints. 54 GRPCStorageType BackendStorageType = "grpc" 55 // M3DBStorageType is for m3db backend. 56 M3DBStorageType BackendStorageType = "m3db" 57 // NoopEtcdStorageType is for a noop backend which returns empty results for 58 // any query and blackholes any writes, but requires that a valid etcd cluster 59 // is defined and can be connected to. Primarily used for standalone 60 // coordinators used only to serve m3admin APIs. 61 NoopEtcdStorageType BackendStorageType = "noop-etcd" 62 63 // PromRemoteStorageType is a type of storage that is backed by Prometheus Remote Write compatible API. 64 PromRemoteStorageType BackendStorageType = "prom-remote" 65 66 defaultListenAddress = "0.0.0.0:7201" 67 68 defaultCarbonIngesterListenAddress = "0.0.0.0:7204" 69 70 defaultQueryTimeout = 30 * time.Second 71 72 defaultPrometheusMaxSamplesPerQuery = 100000000 73 ) 74 75 var ( 76 defaultLogging = xlog.Configuration{ 77 Level: "info", 78 } 79 defaultMetricsSanitization = instrument.PrometheusMetricSanitization 80 defaultMetricsExtendedMetricsType = instrument.NoExtendedMetrics 81 defaultMetrics = instrument.MetricsConfiguration{ 82 RootScope: &instrument.ScopeConfiguration{ 83 Prefix: "coordinator", 84 }, 85 PrometheusReporter: &instrument.PrometheusConfiguration{ 86 HandlerPath: "/metrics", 87 // Default to coordinator (until https://github.com/m3db/m3/issues/682 is resolved) 88 ListenAddress: "0.0.0.0:7203", 89 }, 90 Sanitization: &defaultMetricsSanitization, 91 SamplingRate: 1.0, 92 ExtendedMetrics: &defaultMetricsExtendedMetricsType, 93 } 94 95 // 5m is the default lookback in Prometheus. 96 defaultLookbackDuration = 5 * time.Minute 97 98 defaultCarbonIngesterAggregationType = aggregation.Mean 99 100 // By default, cap total series to prevent results of 101 // extremely large sizes consuming too much memory. 102 defaultStorageQuerySeriesLimit = 100_000 103 defaultStorageQueryDocsLimit = 0 // Default OFF. 104 105 // By default, raise errors instead of truncating results so 106 // users do not experience see unexpected results. 107 defaultRequireExhaustive = true 108 109 defaultWriteWorkerPool = xconfig.WorkerPoolPolicy{ 110 GrowOnDemand: true, 111 Size: 4096, 112 KillWorkerProbability: 0.001, 113 } 114 115 // By default, return up to 4 metric metadata stats per request. 116 defaultMaxMetricMetadataStats = 4 117 ) 118 119 // Configuration is the configuration for the query service. 120 type Configuration struct { 121 // Metrics configuration. 122 Metrics *instrument.MetricsConfiguration `yaml:"metrics"` 123 124 // Logging configuration. 125 Logging *xlog.Configuration `yaml:"logging"` 126 127 // Tracing configures opentracing. If not provided, tracing is disabled. 128 Tracing opentracing.TracingConfiguration `yaml:"tracing"` 129 130 // Clusters is the DB cluster configurations for read, write and 131 // query endpoints. 132 Clusters m3.ClustersStaticConfiguration `yaml:"clusters"` 133 134 // LocalConfiguration is the local embedded configuration if running 135 // coordinator embedded in the DB. 136 Local *LocalConfiguration `yaml:"local"` 137 138 // ClusterManagement for placement, namespaces and database management 139 // endpoints. 140 ClusterManagement ClusterManagementConfiguration `yaml:"clusterManagement"` 141 142 // PrometheusRemoteBackend configures prometheus remote write backend. 143 // Used only when backend property is "prom-remote" 144 PrometheusRemoteBackend *PrometheusRemoteBackendConfiguration `yaml:"prometheusRemoteBackend"` 145 146 // ListenAddress is the server listen address. 147 ListenAddress *string `yaml:"listenAddress"` 148 149 // Filter is the read/write/complete tags filter configuration. 150 Filter FilterConfiguration `yaml:"filter"` 151 152 // RPC is the RPC configuration. 153 RPC *RPCConfiguration `yaml:"rpc"` 154 155 // HTTP is the HTTP configuration. 156 HTTP HTTPConfiguration `yaml:"http"` 157 158 // Backend is the backend store for query service. 159 Backend BackendStorageType `yaml:"backend"` 160 161 // TagOptions is the tag configuration options. 162 TagOptions TagOptionsConfiguration `yaml:"tagOptions"` 163 164 // ReadWorkerPool is the worker pool policy for read requests. 165 ReadWorkerPool xconfig.WorkerPoolPolicy `yaml:"readWorkerPoolPolicy"` 166 167 // WriteWorkerPool is the worker pool policy for write requests. 168 WriteWorkerPool *xconfig.WorkerPoolPolicy `yaml:"writeWorkerPoolPolicy"` 169 170 // WriteForwarding is the write forwarding options. 171 WriteForwarding WriteForwardingConfiguration `yaml:"writeForwarding"` 172 173 // Downsample configures how the metrics should be downsampled. 174 Downsample downsample.Configuration `yaml:"downsample"` 175 176 // Ingest is the ingest server. 177 Ingest *IngestConfiguration `yaml:"ingest"` 178 179 // Carbon is the carbon configuration. 180 Carbon *CarbonConfiguration `yaml:"carbon"` 181 182 // Middleware is middleware-specific configuration. 183 Middleware MiddlewareConfiguration `yaml:"middleware"` 184 185 // Query is the query configuration. 186 Query QueryConfiguration `yaml:"query"` 187 188 // Limits specifies limits on per-query resource usage. 189 Limits LimitsConfiguration `yaml:"limits"` 190 191 // LookbackDuration determines the lookback duration for queries 192 LookbackDuration *time.Duration `yaml:"lookbackDuration"` 193 194 // ResultOptions are the results options for query. 195 ResultOptions ResultOptions `yaml:"resultOptions"` 196 197 // DeprecatedExperimental is the configuration for the experimental API group. It is not used anymore 198 // and only kept for backwards-support with older configuration files. 199 DeprecatedExperimental ExperimentalAPIConfiguration `yaml:"experimental"` 200 201 // StoreMetricsType controls if metrics type is stored or not. 202 StoreMetricsType *bool `yaml:"storeMetricsType"` 203 204 // MultiProcess is the multi-process configuration. 205 MultiProcess MultiProcessConfiguration `yaml:"multiProcess"` 206 207 // Debug configuration. 208 Debug config.DebugConfiguration `yaml:"debug"` 209 } 210 211 // ListenAddressOrDefault returns the listen address or default. 212 func (c *Configuration) ListenAddressOrDefault() string { 213 if c.ListenAddress != nil { 214 return *c.ListenAddress 215 } 216 217 return defaultListenAddress 218 } 219 220 // LoggingOrDefault returns the logging config or default. 221 func (c *Configuration) LoggingOrDefault() xlog.Configuration { 222 if c.Logging != nil { 223 return *c.Logging 224 } 225 226 return defaultLogging 227 } 228 229 // MetricsOrDefault returns the metrics config or default. 230 func (c *Configuration) MetricsOrDefault() *instrument.MetricsConfiguration { 231 if c.Metrics != nil { 232 return c.Metrics 233 } 234 235 return &defaultMetrics 236 } 237 238 // WriteWorkerPoolOrDefault returns the write worker pool config or default. 239 func (c *Configuration) WriteWorkerPoolOrDefault() xconfig.WorkerPoolPolicy { 240 if c.WriteWorkerPool != nil { 241 return *c.WriteWorkerPool 242 } 243 244 return defaultWriteWorkerPool 245 } 246 247 // WriteForwardingConfiguration is the write forwarding configuration. 248 type WriteForwardingConfiguration struct { 249 PromRemoteWrite handleroptions.PromWriteHandlerForwardingOptions `yaml:"promRemoteWrite"` 250 } 251 252 // Filter is a query filter type. 253 type Filter string 254 255 const ( 256 // FilterLocalOnly is a filter that specifies local only storage should be used. 257 FilterLocalOnly Filter = "local_only" 258 // FilterRemoteOnly is a filter that specifies remote only storage should be used. 259 FilterRemoteOnly Filter = "remote_only" 260 // FilterAllowAll is a filter that specifies all storages should be used. 261 FilterAllowAll Filter = "allow_all" 262 // FilterAllowNone is a filter that specifies no storages should be used. 263 FilterAllowNone Filter = "allow_none" 264 ) 265 266 // FilterConfiguration is the filters for write/read/complete tags storage filters. 267 type FilterConfiguration struct { 268 Read Filter `yaml:"read"` 269 Write Filter `yaml:"write"` 270 CompleteTags Filter `yaml:"completeTags"` 271 } 272 273 // ResultOptions are the result options for query. 274 type ResultOptions struct { 275 // KeepNaNs keeps NaNs before returning query results. 276 // The default is false, which matches Prometheus 277 KeepNaNs bool `yaml:"keepNans"` 278 } 279 280 // QueryConfiguration is the query configuration. 281 type QueryConfiguration struct { 282 // Timeout is the query timeout. 283 Timeout *time.Duration `yaml:"timeout"` 284 // DefaultEngine is the default query engine. 285 DefaultEngine string `yaml:"defaultEngine"` 286 // ConsolidationConfiguration are configs for consolidating fetched queries. 287 ConsolidationConfiguration ConsolidationConfiguration `yaml:"consolidation"` 288 // Prometheus is prometheus client configuration. 289 Prometheus PrometheusQueryConfiguration `yaml:"prometheus"` 290 // RestrictTags is an optional configuration that can be set to restrict 291 // all queries with certain tags by. 292 RestrictTags *RestrictTagsConfiguration `yaml:"restrictTags"` 293 // RequireLabelsEndpointStartEndTime requires requests to /label(s) endpoints 294 // to specify a start and end time to prevent unbounded queries. 295 RequireLabelsEndpointStartEndTime bool `yaml:"requireLabelsEndpointStartEndTime"` 296 // RequireSeriesEndpointStartEndTime requires requests to /series endpoint 297 // to specify a start and end time to prevent unbounded queries. 298 RequireSeriesEndpointStartEndTime bool `yaml:"requireSeriesEndpointStartEndTime"` 299 } 300 301 // TimeoutOrDefault returns the configured timeout or default value. 302 func (c QueryConfiguration) TimeoutOrDefault() time.Duration { 303 if v := c.Timeout; v != nil { 304 return *v 305 } 306 return defaultQueryTimeout 307 } 308 309 // RestrictTagsAsStorageRestrictByTag returns restrict tags as 310 // storage options to restrict all queries by default. 311 func (c QueryConfiguration) RestrictTagsAsStorageRestrictByTag() (*storage.RestrictByTag, bool, error) { 312 if c.RestrictTags == nil { 313 return nil, false, nil 314 } 315 316 var ( 317 cfg = *c.RestrictTags 318 result = handleroptions.StringTagOptions{ 319 Restrict: make([]handleroptions.StringMatch, 0, len(cfg.Restrict)), 320 Strip: cfg.Strip, 321 } 322 ) 323 for _, elem := range cfg.Restrict { 324 value := handleroptions.StringMatch(elem) 325 result.Restrict = append(result.Restrict, value) 326 } 327 328 opts, err := result.StorageOptions() 329 if err != nil { 330 return nil, false, err 331 } 332 333 return opts, true, nil 334 } 335 336 // RestrictTagsConfiguration applies tag restriction to all queries. 337 type RestrictTagsConfiguration struct { 338 Restrict []StringMatch `yaml:"match"` 339 Strip []string `yaml:"strip"` 340 } 341 342 // StringMatch is an easy to use representation of models.Matcher. 343 type StringMatch struct { 344 Name string `yaml:"name"` 345 Type string `yaml:"type"` 346 Value string `yaml:"value"` 347 } 348 349 // ConsolidationConfiguration are configs for consolidating fetched queries. 350 type ConsolidationConfiguration struct { 351 // MatchType determines the options by which series should match. 352 MatchType consolidators.MatchType `yaml:"matchType"` 353 } 354 355 // PrometheusQueryConfiguration is the prometheus query engine configuration. 356 type PrometheusQueryConfiguration struct { 357 // MaxSamplesPerQuery is the limit on fetched samples per query. 358 MaxSamplesPerQuery *int `yaml:"maxSamplesPerQuery"` 359 360 // Convert configures Prometheus time series conversions. 361 Convert *PrometheusConvertConfiguration `yaml:"convert"` 362 } 363 364 // ConvertOptionsOrDefault creates storage.PromConvertOptions based on the given configuration. 365 func (c PrometheusQueryConfiguration) ConvertOptionsOrDefault() storage.PromConvertOptions { 366 opts := storage.NewPromConvertOptions() 367 368 if v := c.Convert; v != nil { 369 if value := v.ResolutionThresholdForCounterNormalization; value != nil { 370 opts = opts.SetResolutionThresholdForCounterNormalization(*value) 371 } 372 373 opts = opts.SetValueDecreaseTolerance(v.ValueDecreaseTolerance) 374 375 // Default to max time so that it's always applicable if value 376 // decrease tolerance is non-zero. 377 toleranceUntil := xtime.UnixNano(math.MaxInt64) 378 if value := v.ValueDecreaseToleranceUntil; value != nil { 379 toleranceUntil = xtime.ToUnixNano(*value) 380 } 381 opts = opts.SetValueDecreaseToleranceUntil(toleranceUntil) 382 } 383 384 return opts 385 } 386 387 // PrometheusConvertConfiguration configures Prometheus time series conversions. 388 type PrometheusConvertConfiguration struct { 389 // ResolutionThresholdForCounterNormalization sets the resolution threshold starting from which 390 // Prometheus counter normalization is performed in order to avoid Prometheus counter 391 // extrapolation artifacts. 392 ResolutionThresholdForCounterNormalization *time.Duration `yaml:"resolutionThresholdForCounterNormalization"` 393 394 // ValueDecreaseTolerance allows for setting a specific amount of tolerance 395 // to avoid returning a decrease if it's below a certain tolerance. 396 // This is useful for applications that have precision issues emitting 397 // monotonic increasing data and will accidentally make it seem like the 398 // counter value decreases when it hasn't changed. 399 ValueDecreaseTolerance float64 `yaml:"valueDecreaseTolerance"` 400 401 // ValueDecreaseToleranceUntil allows for setting a time threshold on 402 // which to apply the conditional value decrease threshold. 403 ValueDecreaseToleranceUntil *time.Time `yaml:"valueDecreaseToleranceUntil"` 404 } 405 406 // MaxSamplesPerQueryOrDefault returns the max samples per query or default. 407 func (c PrometheusQueryConfiguration) MaxSamplesPerQueryOrDefault() int { 408 if v := c.MaxSamplesPerQuery; v != nil { 409 return *v 410 } 411 412 return defaultPrometheusMaxSamplesPerQuery 413 } 414 415 // LimitsConfiguration represents limitations on resource usage in the query 416 // instance. Limits are split between per-query and global limits. 417 type LimitsConfiguration struct { 418 // PerQuery configures limits which apply to each query individually. 419 PerQuery PerQueryLimitsConfiguration `yaml:"perQuery"` 420 } 421 422 // PerQueryLimitsConfiguration represents limits on resource usage within a 423 // single query. Zero or negative values imply no limit. 424 type PerQueryLimitsConfiguration struct { 425 // MaxFetchedSeries limits the number of time series returned for any given 426 // individual storage node per query, before returning result to query 427 // service. 428 MaxFetchedSeries int `yaml:"maxFetchedSeries"` 429 430 // InstanceMultiple increases the per database instance series limit. 431 // The series limit per database instance is calculated as: 432 // 433 // InstanceSeriesLimit = MaxFetchesSeries / (instances per replica) * InstanceMultiple. 434 // 435 // A value > 1 allows a buffer in case data is not uniformly sharded across instances in a replica. 436 // If set to 0 the feature is disabled and the MaxFetchedSeries is used as the limit for database instance. 437 // For large clusters, enabling this feature can dramatically decrease the amount of wasted series read from a 438 // single database instance. 439 InstanceMultiple float32 `yaml:"instanceMultiple"` 440 441 // MaxFetchedDocs limits the number of index documents matched for any given 442 // individual storage node per query, before returning result to query 443 // service. 444 MaxFetchedDocs int `yaml:"maxFetchedDocs"` 445 446 // MaxFetchedRange limits the time range of index documents matched for any given 447 // individual storage node per query, before returning result to query 448 // service. 449 MaxFetchedRange time.Duration `yaml:"maxFetchedRange"` 450 451 // RequireExhaustive results in an error if the query exceeds any limit. 452 RequireExhaustive *bool `yaml:"requireExhaustive"` 453 454 // MaxMetricMetadataStats limits the number of metric metadata stats to return 455 // as a response header after a query. If unset, defaults to 4. If set to zero, 456 // no metric metadata stats will be returned as a response header. 457 MaxMetricMetadataStats *int `yaml:"maxMetricMetadataStats"` 458 } 459 460 // AsFetchOptionsBuilderLimitsOptions converts this configuration to 461 // handleroptions.FetchOptionsBuilderLimitsOptions. 462 func (l *PerQueryLimitsConfiguration) AsFetchOptionsBuilderLimitsOptions() handleroptions.FetchOptionsBuilderLimitsOptions { 463 seriesLimit := defaultStorageQuerySeriesLimit 464 if v := l.MaxFetchedSeries; v > 0 { 465 seriesLimit = v 466 } 467 468 docsLimit := defaultStorageQueryDocsLimit 469 if v := l.MaxFetchedDocs; v > 0 { 470 docsLimit = v 471 } 472 473 requireExhaustive := defaultRequireExhaustive 474 if r := l.RequireExhaustive; r != nil { 475 requireExhaustive = *r 476 } 477 478 maxMetricMetadataStats := defaultMaxMetricMetadataStats 479 if v := l.MaxMetricMetadataStats; v != nil { 480 maxMetricMetadataStats = *v 481 } 482 483 return handleroptions.FetchOptionsBuilderLimitsOptions{ 484 SeriesLimit: seriesLimit, 485 InstanceMultiple: l.InstanceMultiple, 486 DocsLimit: docsLimit, 487 RangeLimit: l.MaxFetchedRange, 488 RequireExhaustive: requireExhaustive, 489 MaxMetricMetadataStats: maxMetricMetadataStats, 490 } 491 } 492 493 // IngestConfiguration is the configuration for ingestion server. 494 type IngestConfiguration struct { 495 // Ingester is the configuration for storage based ingester. 496 Ingester ingestm3msg.Configuration `yaml:"ingester"` 497 498 // M3Msg is the configuration for m3msg server. 499 M3Msg m3msg.Configuration `yaml:"m3msg"` 500 } 501 502 // CarbonConfiguration is the configuration for the carbon server. 503 type CarbonConfiguration struct { 504 // Ingester if set defines an ingester to run for carbon. 505 Ingester *CarbonIngesterConfiguration `yaml:"ingester"` 506 // LimitsFind sets the limits configuration for find queries. 507 LimitsFind *LimitsConfiguration `yaml:"limitsFind"` 508 // LimitsRender sets the limits configuration for render queries. 509 LimitsRender *LimitsConfiguration `yaml:"limitsRender"` 510 // AggregateNamespacesAllData configures whether all aggregate 511 // namespaces contain entire copies of the data set. 512 // This affects whether queries can be optimized or not, if false 513 // they cannot be since it's unclear if data matching an expression 514 // sits in one or many or none of the aggregate namespaces so all 515 // must be queried, but if true then it can be determined based 516 // on the query range whether a single namespace can fulfill the 517 // entire query and if so to only fetch from that one aggregated namespace. 518 AggregateNamespacesAllData bool `yaml:"aggregateNamespacesAllData"` 519 // ShiftTimeStart sets a constant time to shift start by. 520 ShiftTimeStart time.Duration `yaml:"shiftTimeStart"` 521 // ShiftTimeEnd sets a constant time to shift end by. 522 ShiftTimeEnd time.Duration `yaml:"shiftTimeEnd"` 523 // ShiftStepsStart sets a constant set of steps to shift start by. 524 ShiftStepsStart int `yaml:"shiftStepsStart"` 525 // ShiftStepsEnd sets a constant set of steps to shift end by. 526 ShiftStepsEnd int `yaml:"shiftStepsEnd"` 527 // ShiftStepsStartWhenAtResolutionBoundary sets a constant set of steps to 528 // shift start by if and only if the start is an exact match to the 529 // resolution boundary of a query. 530 ShiftStepsStartWhenAtResolutionBoundary *int `yaml:"shiftStepsStartWhenAtResolutionBoundary"` 531 // ShiftStepsEndWhenAtResolutionBoundary sets a constant set of steps to 532 // shift end by if and only if the end is an exact match to the 533 // resolution boundary of a query. 534 ShiftStepsEndWhenAtResolutionBoundary *int `yaml:"shiftStepsEndWhenAtResolutionBoundary"` 535 // ShiftStepsStartWhenEndAtResolutionBoundary sets a constant set of steps to 536 // shift start by if and only if the end is an exact match to the resolution boundary 537 // of a query AND the start is not an exact match to the resolution boundary. 538 ShiftStepsStartWhenEndAtResolutionBoundary *int `yaml:"shiftStepsStartWhenEndAtResolutionBoundary"` 539 // ShiftStepsEndWhenStartAtResolutionBoundary sets a constant set of steps to 540 // shift end by if and only if the start is an exact match to the resolution boundary 541 // of a query AND the end is not an exact match to the resolution boundary. 542 ShiftStepsEndWhenStartAtResolutionBoundary *int `yaml:"shiftStepsEndWhenStartAtResolutionBoundary"` 543 // RenderPartialStart sets whether to render partial datapoints when 544 // the start time is between a datapoint's resolution step size. 545 RenderPartialStart bool `yaml:"renderPartialStart"` 546 // RenderPartialEnd sets whether to render partial datapoints when 547 // the end time is between a datapoint's resolution step size. 548 RenderPartialEnd bool `yaml:"renderPartialEnd"` 549 // RenderSeriesAllNaNs will render series that have only NaNs for entire 550 // output instead of returning an empty array of datapoints. 551 RenderSeriesAllNaNs bool `yaml:"renderSeriesAllNaNs"` 552 // CompileEscapeAllNotOnlyQuotes will escape all characters when using a backslash 553 // in a quoted string rather than just reserving for escaping quotes. 554 CompileEscapeAllNotOnlyQuotes bool `yaml:"compileEscapeAllNotOnlyQuotes"` 555 // FindResultsIncludeBothExpandableAndLeaf will include both an expandable 556 // node and a leaf node if there is a duplicate path node that is both an 557 // expandable node and a leaf node. 558 FindResultsIncludeBothExpandableAndLeaf bool `yaml:"findResultsIncludeBothExpandableAndLeaf"` 559 } 560 561 // MiddlewareConfiguration is middleware-specific configuration. 562 type MiddlewareConfiguration struct { 563 // Logging configures the logging middleware. 564 Logging LoggingMiddlewareConfiguration `yaml:"logging"` 565 // Metrics configures the metrics middleware. 566 Metrics MetricsMiddlewareConfiguration `yaml:"metrics"` 567 // Prometheus configures prometheus-related middleware. 568 Prometheus PrometheusMiddlewareConfiguration `yaml:"prometheus"` 569 } 570 571 // LoggingMiddlewareConfiguration configures the logging middleware. 572 type LoggingMiddlewareConfiguration struct { 573 // Threshold defines the latency threshold for logging the response. If zero, the default of 1s is used. To disable 574 // response logging set Disabled. 575 Threshold time.Duration 576 // Disabled turns off response logging by default for endpoints. 577 Disabled bool 578 } 579 580 // MetricsMiddlewareConfiguration configures the metrics middleware. 581 type MetricsMiddlewareConfiguration struct { 582 // QueryEndpointsClassification contains the configuration for sizing queries to 583 // the query and query_range Prometheus endpoints. 584 QueryEndpointsClassification QueryClassificationConfig `yaml:"queryEndpointsClassification"` 585 // LabelEndpointsClassification contains the configuration for sizing queries to 586 // the label names and label values Prometheus endpoints. 587 LabelEndpointsClassification QueryClassificationConfig `yaml:"labelEndpointsClassification"` 588 // AddStatusToLatencies will add a tag with the query's response code to 589 // middleware latency metrics. 590 // NB: Setting this to true will increase cardinality by the number of 591 // expected response codes (likely around ~10). 592 AddStatusToLatencies bool `yaml:"addStatusToLatencies"` 593 } 594 595 // QueryClassificationConfig contains the buckets used to group a query into a bucket for 596 // the sake of understanding the size of the query based on a specific dimension. Currently, 597 // we have two sets of buckets: results and duration. The results buckets help us understand 598 // the size of the query based on the number of results returned whereas the duration buckets help 599 // us understand the size of the query based on the time range of the query. Dimension values are 600 // rounded down to the nearest bucket. If the value is smaller than all buckets, then it is 601 // allocated to the first bucket. Buckets are expected to be ordered in ascending order. 602 type QueryClassificationConfig struct { 603 // ResultsBuckets contains the buckets to be compared with the number of results (e.g. number of 604 // time series or labels) returned by a specific endpoint. 605 ResultsBuckets []int `yaml:"resultsBuckets"` 606 // DurationBuckets contains the buckets to be compared with time range of a query for a 607 // specific endpoint. 608 DurationBuckets []time.Duration `yaml:"durationBuckets"` 609 } 610 611 // Enabled returns true if classification buckets were specified. 612 func (q *QueryClassificationConfig) Enabled() bool { 613 return len(q.DurationBuckets) > 0 || len(q.ResultsBuckets) > 0 614 } 615 616 // PrometheusMiddlewareConfiguration configures the range rewriting middleware. 617 type PrometheusMiddlewareConfiguration struct { 618 // ResolutionMultiplier is the multiple that will be applied to the range if it's determined 619 // that it needs to be updated. If this value is greater than 0, the range in a query will be 620 // updated if the namespaces used to service the request have resolution(s) 621 // that are greater than the range. The range will be updated to the largest resolution 622 // of the namespaces to service the request * the multiplier specified here. If this multiplier 623 // is 0, then this feature is disabled. 624 ResolutionMultiplier int `yaml:"resolutionMultiplier"` 625 } 626 627 // CarbonIngesterConfiguration is the configuration struct for carbon ingestion. 628 type CarbonIngesterConfiguration struct { 629 ListenAddress string `yaml:"listenAddress"` 630 MaxConcurrency int `yaml:"maxConcurrency"` 631 Rewrite CarbonIngesterRewriteConfiguration `yaml:"rewrite"` 632 Rules []CarbonIngesterRuleConfiguration `yaml:"rules"` 633 } 634 635 // CarbonIngesterRewriteConfiguration is the configuration for rewriting 636 // metrics at ingestion. 637 type CarbonIngesterRewriteConfiguration struct { 638 // Cleanup will perform: 639 // - Trailing/leading dot elimination. 640 // - Double dot elimination. 641 // - Irregular char replacement with underscores (_), currently irregular 642 // is defined as not being in [0-9a-zA-Z-_:#]. 643 Cleanup bool `yaml:"cleanup"` 644 } 645 646 // LookbackDurationOrDefault validates the LookbackDuration 647 func (c Configuration) LookbackDurationOrDefault() (time.Duration, error) { 648 if c.LookbackDuration == nil { 649 return defaultLookbackDuration, nil 650 } 651 652 v := *c.LookbackDuration 653 if v < 0 { 654 return 0, errors.New("lookbackDuration must be > 0") 655 } 656 657 return v, nil 658 } 659 660 // ListenAddressOrDefault returns the specified carbon ingester listen address if provided, or the 661 // default value if not. 662 func (c *CarbonIngesterConfiguration) ListenAddressOrDefault() string { 663 if c.ListenAddress != "" { 664 return c.ListenAddress 665 } 666 667 return defaultCarbonIngesterListenAddress 668 } 669 670 // RulesOrDefault returns the specified carbon ingester rules if provided, or generates reasonable 671 // defaults using the provided aggregated namespaces if not. 672 func (c *CarbonIngesterConfiguration) RulesOrDefault(namespaces m3.ClusterNamespaces) []CarbonIngesterRuleConfiguration { 673 if len(c.Rules) > 0 { 674 return c.Rules 675 } 676 677 if namespaces.NumAggregatedClusterNamespaces() == 0 { 678 return nil 679 } 680 681 // Default to fanning out writes for all metrics to all aggregated namespaces if any exists. 682 policies := make([]CarbonIngesterStoragePolicyConfiguration, 0, len(namespaces)) 683 for _, ns := range namespaces { 684 if ns.Options().Attributes().MetricsType == storagemetadata.AggregatedMetricsType { 685 policies = append(policies, CarbonIngesterStoragePolicyConfiguration{ 686 Resolution: ns.Options().Attributes().Resolution, 687 Retention: ns.Options().Attributes().Retention, 688 }) 689 } 690 } 691 692 if len(policies) == 0 { 693 return nil 694 } 695 696 // Create a single catch-all rule with a policy for each of the aggregated namespaces we 697 // enumerated above. 698 aggregationEnabled := true 699 return []CarbonIngesterRuleConfiguration{ 700 { 701 Pattern: graphite.MatchAllPattern, 702 Aggregation: CarbonIngesterAggregationConfiguration{ 703 Enabled: &aggregationEnabled, 704 Type: &defaultCarbonIngesterAggregationType, 705 }, 706 Policies: policies, 707 }, 708 } 709 } 710 711 // CarbonIngesterRuleConfiguration is the configuration struct for a carbon 712 // ingestion rule. 713 type CarbonIngesterRuleConfiguration struct { 714 Pattern string `yaml:"pattern"` 715 Contains string `yaml:"contains"` 716 Continue bool `yaml:"continue"` 717 Aggregation CarbonIngesterAggregationConfiguration `yaml:"aggregation"` 718 Policies []CarbonIngesterStoragePolicyConfiguration `yaml:"policies"` 719 } 720 721 // CarbonIngesterAggregationConfiguration is the configuration struct 722 // for the aggregation for a carbon ingest rule's storage policy. 723 type CarbonIngesterAggregationConfiguration struct { 724 Enabled *bool `yaml:"enabled"` 725 Type *aggregation.Type `yaml:"type"` 726 } 727 728 // EnabledOrDefault returns whether aggregation should be enabled based on the provided configuration, 729 // or a default value otherwise. 730 func (c *CarbonIngesterAggregationConfiguration) EnabledOrDefault() bool { 731 if c.Enabled != nil { 732 return *c.Enabled 733 } 734 735 return true 736 } 737 738 // TypeOrDefault returns the aggregation type that should be used based on the provided configuration, 739 // or a default value otherwise. 740 func (c *CarbonIngesterAggregationConfiguration) TypeOrDefault() aggregation.Type { 741 if c.Type != nil { 742 return *c.Type 743 } 744 745 return defaultCarbonIngesterAggregationType 746 } 747 748 // CarbonIngesterStoragePolicyConfiguration is the configuration struct for 749 // a carbon rule's storage policies. 750 type CarbonIngesterStoragePolicyConfiguration struct { 751 Resolution time.Duration `yaml:"resolution" validate:"nonzero"` 752 Retention time.Duration `yaml:"retention" validate:"nonzero"` 753 } 754 755 // LocalConfiguration is the local embedded configuration if running 756 // coordinator embedded in the DB. 757 type LocalConfiguration struct { 758 // Namespaces is the list of namespaces that the local embedded DB has. 759 Namespaces []m3.ClusterStaticNamespaceConfiguration `yaml:"namespaces"` 760 } 761 762 // ClusterManagementConfiguration is configuration for the placement, 763 // namespaces and database management endpoints (optional). 764 type ClusterManagementConfiguration struct { 765 // Etcd is the client configuration for etcd. 766 Etcd *etcdclient.Configuration `yaml:"etcd"` 767 768 // Placement is the cluster placement configuration. 769 Placement placement.Configuration `yaml:"placement"` 770 } 771 772 // RemoteConfigurations is a set of remote host configurations. 773 type RemoteConfigurations []RemoteConfiguration 774 775 // RemoteConfiguration is the configuration for a single remote host. 776 type RemoteConfiguration struct { 777 // Name is the name for the remote zone. 778 Name string `yaml:"name"` 779 // RemoteListenAddresses is the remote listen addresses to call for remote 780 // coordinator calls in the remote zone. 781 RemoteListenAddresses []string `yaml:"remoteListenAddresses"` 782 // ErrorBehavior overrides the default error behavior for this host. 783 // 784 // NB: defaults to warning on error. 785 ErrorBehavior *storage.ErrorBehavior `yaml:"errorBehavior"` 786 } 787 788 // RPCConfiguration is the RPC configuration for the coordinator for 789 // the GRPC server used for remote coordinator to coordinator calls. 790 type RPCConfiguration struct { 791 // Enabled determines if coordinator RPC is enabled for remote calls. 792 // 793 // NB: this is no longer necessary to set to true if RPC is desired; enabled 794 // status is inferred based on which other options are provided; 795 // this remains for back-compat, and for disabling any existing RPC options. 796 Enabled *bool `yaml:"enabled"` 797 798 // ListenAddress is the RPC server listen address. 799 ListenAddress string `yaml:"listenAddress"` 800 801 // Remotes are the configuration settings for remote coordinator zones. 802 Remotes RemoteConfigurations `yaml:"remotes"` 803 804 // RemoteListenAddresses is the remote listen addresses to call for 805 // remote coordinator calls. 806 // 807 // NB: this is deprecated in favor of using RemoteZones, as setting 808 // RemoteListenAddresses will only allow for a single remote zone to be used. 809 RemoteListenAddresses []string `yaml:"remoteListenAddresses"` 810 811 // ErrorBehavior overrides the default error behavior for all rpc hosts. 812 // 813 // NB: defaults to warning on error. 814 ErrorBehavior *storage.ErrorBehavior `yaml:"errorBehavior"` 815 816 // ReflectionEnabled will enable reflection on the GRPC server, useful 817 // for testing connectivity with grpcurl, etc. 818 ReflectionEnabled bool `yaml:"reflectionEnabled"` 819 } 820 821 // PrometheusRemoteBackendConfiguration configures prometheus remote write backend. 822 type PrometheusRemoteBackendConfiguration struct { 823 Endpoints []PrometheusRemoteBackendEndpointConfiguration `yaml:"endpoints"` 824 RequestTimeout *time.Duration `yaml:"requestTimeout"` 825 ConnectTimeout *time.Duration `yaml:"connectTimeout"` 826 KeepAlive *time.Duration `yaml:"keepAlive"` 827 IdleConnTimeout *time.Duration `yaml:"idleConnTimeout"` 828 MaxIdleConns *int `yaml:"maxIdleConns"` 829 } 830 831 // PrometheusRemoteBackendEndpointConfiguration configures single endpoint. 832 type PrometheusRemoteBackendEndpointConfiguration struct { 833 Name string `yaml:"name"` 834 Address string `yaml:"address"` 835 // When nil all unaggregated data will be sent to this endpoint. 836 StoragePolicy *PrometheusRemoteBackendStoragePolicyConfiguration `yaml:"storagePolicy"` 837 } 838 839 // PrometheusRemoteBackendStoragePolicyConfiguration configures storage policy for single endpoint. 840 type PrometheusRemoteBackendStoragePolicyConfiguration struct { 841 Resolution time.Duration `yaml:"resolution" validate:"nonzero"` 842 Retention time.Duration `yaml:"retention" validate:"nonzero"` 843 844 // Downsample is downsampling options to be used with this storage policy. 845 Downsample *m3.DownsampleClusterStaticNamespaceConfiguration `yaml:"downsample"` 846 } 847 848 // HTTPConfiguration is the HTTP configuration for configuring 849 // the HTTP server used by the coordinator to serve incoming requests. 850 type HTTPConfiguration struct { 851 // EnableH2C enables support for the HTTP/2 cleartext protocol. H2C 852 // enables the use of HTTP/2 without requiring TLS. 853 EnableH2C bool `yaml:"enableH2C"` 854 } 855 856 // TagOptionsConfiguration is the configuration for shared tag options 857 // Currently only name, but can expand to cover deduplication settings, or other 858 // relevant options. 859 type TagOptionsConfiguration struct { 860 // MetricName specifies the tag name that corresponds to the metric's name tag 861 // If not provided, defaults to `__name__`. 862 MetricName string `yaml:"metricName"` 863 864 // BucketName specifies the tag name that corresponds to the metric's bucket. 865 // If not provided, defaults to `le`. 866 BucketName string `yaml:"bucketName"` 867 868 // Scheme determines the default ID generation scheme. Defaults to TypeQuoted. 869 Scheme models.IDSchemeType `yaml:"idScheme"` 870 871 // Filters are optional tag filters, removing all series with tags 872 // matching the filter from computations. 873 Filters []TagFilter `yaml:"filters"` 874 875 // AllowTagNameDuplicates allows for duplicate tags to appear on series. 876 AllowTagNameDuplicates bool `yaml:"allowTagNameDuplicates"` 877 878 // AllowTagValueEmpty allows for empty tags to appear on series. 879 AllowTagValueEmpty bool `yaml:"allowTagValueEmpty"` 880 } 881 882 // TagFilter is a tag filter. 883 type TagFilter struct { 884 // Values are the values to filter. 885 // 886 // NB:If this is unset, all series containing 887 // a tag with given `Name` are filtered. 888 Values []string `yaml:"values"` 889 // Name is the tag name. 890 Name string `yaml:"name"` 891 } 892 893 // TagOptionsFromConfig translates tag option configuration into tag options. 894 func TagOptionsFromConfig(cfg TagOptionsConfiguration) (models.TagOptions, error) { 895 opts := models.NewTagOptions() 896 name := cfg.MetricName 897 if name != "" { 898 opts = opts.SetMetricName([]byte(name)) 899 } 900 901 bucket := cfg.BucketName 902 if bucket != "" { 903 opts = opts.SetBucketName([]byte(bucket)) 904 } 905 906 if cfg.Scheme == models.TypeDefault { 907 // Default to quoted if unspecified. 908 cfg.Scheme = models.TypeQuoted 909 } 910 911 opts = opts.SetIDSchemeType(cfg.Scheme) 912 if err := opts.Validate(); err != nil { 913 return nil, err 914 } 915 916 if cfg.Filters != nil { 917 filters := make([]models.Filter, 0, len(cfg.Filters)) 918 for _, filter := range cfg.Filters { 919 values := make([][]byte, 0, len(filter.Values)) 920 for _, strVal := range filter.Values { 921 values = append(values, []byte(strVal)) 922 } 923 924 filters = append(filters, models.Filter{ 925 Name: []byte(filter.Name), 926 Values: values, 927 }) 928 } 929 930 opts = opts.SetFilters(filters) 931 } 932 933 opts = opts.SetAllowTagNameDuplicates(cfg.AllowTagNameDuplicates) 934 opts = opts.SetAllowTagValueEmpty(cfg.AllowTagValueEmpty) 935 936 return opts, nil 937 } 938 939 // ExperimentalAPIConfiguration is the configuration for the experimental API group. 940 type ExperimentalAPIConfiguration struct { 941 Enabled bool `yaml:"enabled"` 942 } 943 944 // MultiProcessConfiguration is the multi-process configuration which 945 // allows running multiple sub-processes of an instance reusing the 946 // same listen ports. 947 type MultiProcessConfiguration struct { 948 // Enabled is whether to enable multi-process execution. 949 Enabled bool `yaml:"enabled"` 950 // Count is the number of sub-processes to run, leave zero 951 // to auto-detect based on number of CPUs. 952 Count int `yaml:"count" validate:"min=0"` 953 // PerCPU is the factor of processes to run per CPU, leave 954 // zero to use the default of 0.5 per CPU (i.e. one process for 955 // every two CPUs). 956 PerCPU float64 `yaml:"perCPU" validate:"min=0.0, max=1.0"` 957 // GoMaxProcs if set will explicitly set the child GOMAXPROCs env var. 958 GoMaxProcs int `yaml:"goMaxProcs"` 959 }