github.com/thanos-io/thanos@v0.32.5/test/e2e/e2ethanos/services.go (about) 1 // Copyright (c) The Thanos Authors. 2 // Licensed under the Apache License 2.0. 3 4 package e2ethanos 5 6 import ( 7 "encoding/json" 8 "fmt" 9 "os" 10 "path/filepath" 11 "strconv" 12 "strings" 13 "time" 14 15 "github.com/efficientgo/core/backoff" 16 "github.com/efficientgo/e2e" 17 e2edb "github.com/efficientgo/e2e/db" 18 e2emon "github.com/efficientgo/e2e/monitoring" 19 e2eobs "github.com/efficientgo/e2e/observable" 20 "github.com/pkg/errors" 21 "github.com/prometheus/common/model" 22 "github.com/prometheus/prometheus/config" 23 "github.com/prometheus/prometheus/discovery/targetgroup" 24 "github.com/prometheus/prometheus/model/relabel" 25 "gopkg.in/yaml.v2" 26 27 "github.com/thanos-io/objstore/client" 28 "github.com/thanos-io/objstore/providers/s3" 29 30 "github.com/thanos-io/objstore/exthttp" 31 32 "github.com/thanos-io/thanos/pkg/alert" 33 "github.com/thanos-io/thanos/pkg/httpconfig" 34 35 "github.com/thanos-io/thanos/pkg/queryfrontend" 36 "github.com/thanos-io/thanos/pkg/receive" 37 ) 38 39 const ( 40 infoLogLevel = "info" 41 ) 42 43 // Same as default for now. 44 var defaultBackoffConfig = backoff.Config{ 45 Min: 300 * time.Millisecond, 46 Max: 600 * time.Millisecond, 47 MaxRetries: 50, 48 } 49 50 // TODO(bwplotka): Make strconv.Itoa(os.Getuid()) pattern in e2e? 51 func wrapWithDefaults(opt e2e.StartOptions) e2e.StartOptions { 52 if opt.User == "" { 53 opt.User = strconv.Itoa(os.Getuid()) 54 } 55 if opt.WaitReadyBackoff == nil { 56 opt.WaitReadyBackoff = &defaultBackoffConfig 57 } 58 return opt 59 } 60 61 const ( 62 // FeatureExemplarStorage is a feature flag that enables exemplar storage on Prometheus. 63 FeatureExemplarStorage = "exemplar-storage" 64 ) 65 66 // DefaultPrometheusImage sets default Prometheus image used in e2e service. 67 func DefaultPrometheusImage() string { 68 return "quay.io/prometheus/prometheus:v2.41.0" 69 } 70 71 // DefaultAlertmanagerImage sets default Alertmanager image used in e2e service. 72 func DefaultAlertmanagerImage() string { 73 return "quay.io/prometheus/alertmanager:v0.20.0" 74 } 75 76 // DefaultImage returns the local docker image to use to run Thanos. 77 func DefaultImage() string { 78 // Get the Thanos image from the THANOS_IMAGE env variable. 79 if os.Getenv("THANOS_IMAGE") != "" { 80 return os.Getenv("THANOS_IMAGE") 81 } 82 83 return "thanos" 84 } 85 86 func defaultPromHttpConfig() string { 87 // username: test, secret: test(bcrypt hash) 88 return `basic_auth: 89 username: test 90 password: test 91 ` 92 } 93 94 func NewPrometheus(e e2e.Environment, name, promConfig, webConfig, promImage string, enableFeatures ...string) *e2emon.InstrumentedRunnable { 95 f := e.Runnable(name).WithPorts(map[string]int{"http": 9090}).Future() 96 if err := os.MkdirAll(f.Dir(), 0750); err != nil { 97 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(name, errors.Wrap(err, "create prometheus dir"))} 98 } 99 100 if err := os.WriteFile(filepath.Join(f.Dir(), "prometheus.yml"), []byte(promConfig), 0600); err != nil { 101 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(name, errors.Wrap(err, "creating prom config"))} 102 } 103 104 if len(webConfig) > 0 { 105 if err := os.WriteFile(filepath.Join(f.Dir(), "web-config.yml"), []byte(webConfig), 0600); err != nil { 106 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(name, errors.Wrap(err, "creating web-config"))} 107 } 108 } 109 110 probe := e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200) 111 args := e2e.BuildArgs(map[string]string{ 112 "--config.file": filepath.Join(f.InternalDir(), "prometheus.yml"), 113 "--storage.tsdb.path": f.InternalDir(), 114 "--storage.tsdb.max-block-duration": "2h", 115 "--log.level": infoLogLevel, 116 "--web.listen-address": ":9090", 117 }) 118 119 if len(enableFeatures) > 0 { 120 args = append(args, fmt.Sprintf("--enable-feature=%s", strings.Join(enableFeatures, ","))) 121 } 122 if len(webConfig) > 0 { 123 args = append(args, fmt.Sprintf("--web.config.file=%s", filepath.Join(f.InternalDir(), "web-config.yml"))) 124 // If auth is enabled then prober would get 401 error. 125 probe = e2e.NewHTTPReadinessProbe("http", "/-/ready", 401, 401) 126 } 127 return e2emon.AsInstrumented(f.Init(wrapWithDefaults(e2e.StartOptions{ 128 Image: promImage, 129 Command: e2e.NewCommandWithoutEntrypoint("prometheus", args...), 130 Readiness: probe, 131 })), "http") 132 } 133 134 func NewPrometheusWithSidecar(e e2e.Environment, name, promConfig, webConfig, promImage, minTime string, enableFeatures ...string) (*e2emon.InstrumentedRunnable, *e2emon.InstrumentedRunnable) { 135 return NewPrometheusWithSidecarCustomImage(e, name, promConfig, webConfig, promImage, minTime, DefaultImage(), enableFeatures...) 136 } 137 138 func NewPrometheusWithSidecarCustomImage(e e2e.Environment, name, promConfig, webConfig, promImage, minTime string, sidecarImage string, enableFeatures ...string) (*e2emon.InstrumentedRunnable, *e2emon.InstrumentedRunnable) { 139 prom := NewPrometheus(e, name, promConfig, webConfig, promImage, enableFeatures...) 140 141 args := map[string]string{ 142 "--debug.name": fmt.Sprintf("sidecar-%v", name), 143 "--grpc-address": ":9091", 144 "--grpc-grace-period": "0s", 145 "--http-address": ":8080", 146 "--prometheus.url": "http://" + prom.InternalEndpoint("http"), 147 "--tsdb.path": prom.InternalDir(), 148 "--log.level": "debug", 149 } 150 if len(webConfig) > 0 { 151 args["--prometheus.http-client"] = defaultPromHttpConfig() 152 } 153 if minTime != "" { 154 args["--min-time"] = minTime 155 } 156 sidecarRunnable := e.Runnable(fmt.Sprintf("sidecar-%s", name)). 157 WithPorts(map[string]int{"http": 8080, "grpc": 9091}). 158 Init(wrapWithDefaults(e2e.StartOptions{ 159 Image: sidecarImage, 160 Command: e2e.NewCommand("sidecar", e2e.BuildArgs(args)...), 161 Readiness: e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), 162 })) 163 sidecar := e2emon.AsInstrumented(sidecarRunnable, "http") 164 return prom, sidecar 165 } 166 167 type AvalancheOptions struct { 168 MetricCount string 169 SeriesCount string 170 MetricInterval string 171 SeriesInterval string 172 ValueInterval string 173 174 RemoteURL string 175 RemoteWriteInterval string 176 RemoteBatchSize string 177 RemoteRequestCount string 178 179 TenantID string 180 } 181 182 func NewAvalanche(e e2e.Environment, name string, o AvalancheOptions) *e2emon.InstrumentedRunnable { 183 f := e.Runnable(name).WithPorts(map[string]int{"http": 9001}).Future() 184 185 args := e2e.BuildArgs(map[string]string{ 186 "--metric-count": o.MetricCount, 187 "--series-count": o.SeriesCount, 188 "--remote-url": o.RemoteURL, 189 "--remote-write-interval": o.RemoteWriteInterval, 190 "--remote-batch-size": o.RemoteBatchSize, 191 "--remote-requests-count": o.RemoteRequestCount, 192 "--value-interval": o.ValueInterval, 193 "--metric-interval": o.MetricInterval, 194 "--series-interval": o.SeriesInterval, 195 "--remote-tenant-header": "THANOS-TENANT", 196 "--remote-tenant": o.TenantID, 197 }) 198 199 return e2emon.AsInstrumented(f.Init(wrapWithDefaults(e2e.StartOptions{ 200 Image: "quay.io/prometheuscommunity/avalanche:main", 201 Command: e2e.NewCommandWithoutEntrypoint("avalanche", args...), 202 })), "http") 203 } 204 205 func NewPrometheusWithJaegerTracingSidecarCustomImage(e e2e.Environment, name, promConfig, webConfig, 206 promImage, minTime, sidecarImage, jaegerConfig string, enableFeatures ...string) ( 207 *e2emon.InstrumentedRunnable, *e2emon.InstrumentedRunnable) { 208 prom := NewPrometheus(e, name, promConfig, webConfig, promImage, enableFeatures...) 209 210 args := map[string]string{ 211 "--debug.name": fmt.Sprintf("sidecar-%v", name), 212 "--grpc-address": ":9091", 213 "--grpc-grace-period": "0s", 214 "--http-address": ":8080", 215 "--prometheus.url": "http://" + prom.InternalEndpoint("http"), 216 "--tsdb.path": prom.InternalDir(), 217 "--log.level": "debug", 218 "--tracing.config": jaegerConfig, 219 } 220 if len(webConfig) > 0 { 221 args["--prometheus.http-client"] = defaultPromHttpConfig() 222 } 223 if minTime != "" { 224 args["--min-time"] = minTime 225 } 226 227 sidecarRunnable := e.Runnable(fmt.Sprintf("sidecar-%s", name)). 228 WithPorts(map[string]int{"http": 8080, "grpc": 9091}). 229 Init(wrapWithDefaults(e2e.StartOptions{ 230 Image: sidecarImage, 231 Command: e2e.NewCommand("sidecar", e2e.BuildArgs(args)...), 232 Readiness: e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), 233 })) 234 sidecar := e2emon.AsInstrumented(sidecarRunnable, "http") 235 236 return prom, sidecar 237 } 238 239 type QuerierBuilder struct { 240 name string 241 routePrefix string 242 externalPrefix string 243 image string 244 245 storeAddresses []string 246 proxyStrategy string 247 disablePartialResponses bool 248 fileSDStoreAddresses []string 249 ruleAddresses []string 250 metadataAddresses []string 251 envVars map[string]string 252 targetAddresses []string 253 exemplarAddresses []string 254 enableFeatures []string 255 endpoints []string 256 strictEndpoints []string 257 258 engine string 259 queryMode string 260 261 replicaLabels []string 262 tracingConfig string 263 264 telemetryDurationQuantiles []float64 265 telemetrySamplesQuantiles []float64 266 telemetrySeriesQuantiles []float64 267 268 e2e.Linkable 269 f e2e.FutureRunnable 270 } 271 272 func NewQuerierBuilder(e e2e.Environment, name string, storeAddresses ...string) *QuerierBuilder { 273 f := e.Runnable(fmt.Sprintf("querier-%v", name)). 274 WithPorts(map[string]int{"http": 8080, "grpc": 9091}). 275 Future() 276 return &QuerierBuilder{ 277 Linkable: f, 278 f: f, 279 name: name, 280 storeAddresses: storeAddresses, 281 image: DefaultImage(), 282 replicaLabels: []string{replicaLabel}, 283 } 284 } 285 286 func (q *QuerierBuilder) WithProxyStrategy(strategy string) *QuerierBuilder { 287 q.proxyStrategy = strategy 288 return q 289 } 290 291 func (q *QuerierBuilder) WithEnabledFeatures(enableFeatures []string) *QuerierBuilder { 292 q.enableFeatures = enableFeatures 293 return q 294 } 295 296 func (q *QuerierBuilder) WithImage(image string) *QuerierBuilder { 297 q.image = image 298 return q 299 } 300 301 func (q *QuerierBuilder) WithStoreAddresses(storeAddresses ...string) *QuerierBuilder { 302 q.storeAddresses = storeAddresses 303 return q 304 } 305 306 func (q *QuerierBuilder) WithFileSDStoreAddresses(fileSDStoreAddresses ...string) *QuerierBuilder { 307 q.fileSDStoreAddresses = fileSDStoreAddresses 308 return q 309 } 310 311 func (q *QuerierBuilder) WithRuleAddresses(ruleAddresses ...string) *QuerierBuilder { 312 q.ruleAddresses = ruleAddresses 313 return q 314 } 315 316 func (q *QuerierBuilder) WithTargetAddresses(targetAddresses ...string) *QuerierBuilder { 317 q.targetAddresses = targetAddresses 318 return q 319 } 320 321 func (q *QuerierBuilder) WithExemplarAddresses(exemplarAddresses ...string) *QuerierBuilder { 322 q.exemplarAddresses = exemplarAddresses 323 return q 324 } 325 326 func (q *QuerierBuilder) WithMetadataAddresses(metadataAddresses ...string) *QuerierBuilder { 327 q.metadataAddresses = metadataAddresses 328 return q 329 } 330 331 func (q *QuerierBuilder) WithEndpoints(endpoints ...string) *QuerierBuilder { 332 q.endpoints = endpoints 333 return q 334 } 335 336 func (q *QuerierBuilder) WithStrictEndpoints(strictEndpoints ...string) *QuerierBuilder { 337 q.strictEndpoints = strictEndpoints 338 return q 339 } 340 341 func (q *QuerierBuilder) WithRoutePrefix(routePrefix string) *QuerierBuilder { 342 q.routePrefix = routePrefix 343 return q 344 } 345 346 func (q *QuerierBuilder) WithExternalPrefix(externalPrefix string) *QuerierBuilder { 347 q.externalPrefix = externalPrefix 348 return q 349 } 350 351 func (q *QuerierBuilder) WithTracingConfig(tracingConfig string) *QuerierBuilder { 352 q.tracingConfig = tracingConfig 353 return q 354 } 355 356 // WithReplicaLabels replaces default [replica] replica label configuration for the querier. 357 func (q *QuerierBuilder) WithReplicaLabels(labels ...string) *QuerierBuilder { 358 q.replicaLabels = labels 359 return q 360 } 361 362 func (q *QuerierBuilder) WithDisablePartialResponses(disable bool) *QuerierBuilder { 363 q.disablePartialResponses = disable 364 return q 365 } 366 367 func (q *QuerierBuilder) WithEngine(engine string) *QuerierBuilder { 368 q.engine = engine 369 return q 370 } 371 372 func (q *QuerierBuilder) WithQueryMode(mode string) *QuerierBuilder { 373 q.queryMode = mode 374 return q 375 } 376 377 func (q *QuerierBuilder) WithEnvVars(envVars map[string]string) *QuerierBuilder { 378 q.envVars = envVars 379 return q 380 } 381 382 func (q *QuerierBuilder) WithTelemetryQuantiles(duration []float64, samples []float64, series []float64) *QuerierBuilder { 383 q.telemetryDurationQuantiles = duration 384 q.telemetrySamplesQuantiles = samples 385 q.telemetrySeriesQuantiles = series 386 return q 387 } 388 389 func (q *QuerierBuilder) Init() *e2emon.InstrumentedRunnable { 390 args, err := q.collectArgs() 391 if err != nil { 392 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(q.name, err)} 393 } 394 395 return e2emon.AsInstrumented(q.f.Init(wrapWithDefaults(e2e.StartOptions{ 396 Image: q.image, 397 Command: e2e.NewCommand("query", args...), 398 Readiness: e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), 399 EnvVars: q.envVars, 400 })), "http") 401 } 402 403 const replicaLabel = "replica" 404 405 func (q *QuerierBuilder) collectArgs() ([]string, error) { 406 args := e2e.BuildArgs(map[string]string{ 407 "--debug.name": fmt.Sprintf("querier-%v", q.name), 408 "--grpc-address": ":9091", 409 "--grpc-grace-period": "0s", 410 "--http-address": ":8080", 411 "--store.sd-dns-interval": "5s", 412 "--log.level": infoLogLevel, 413 "--query.max-concurrent": "1", 414 "--store.sd-interval": "5s", 415 }) 416 417 for _, repl := range q.replicaLabels { 418 args = append(args, "--query.replica-label="+repl) 419 } 420 for _, addr := range q.storeAddresses { 421 args = append(args, "--store="+addr) 422 } 423 for _, addr := range q.ruleAddresses { 424 args = append(args, "--rule="+addr) 425 } 426 for _, addr := range q.targetAddresses { 427 args = append(args, "--target="+addr) 428 } 429 for _, addr := range q.metadataAddresses { 430 args = append(args, "--metadata="+addr) 431 } 432 for _, addr := range q.exemplarAddresses { 433 args = append(args, "--exemplar="+addr) 434 } 435 for _, feature := range q.enableFeatures { 436 args = append(args, "--enable-feature="+feature) 437 } 438 if q.proxyStrategy != "" { 439 args = append(args, "--grpc.proxy-strategy="+q.proxyStrategy) 440 } 441 if q.disablePartialResponses { 442 args = append(args, "--no-query.partial-response") 443 } 444 for _, addr := range q.endpoints { 445 args = append(args, "--endpoint="+addr) 446 } 447 for _, addr := range q.strictEndpoints { 448 args = append(args, "--endpoint-strict="+addr) 449 } 450 if len(q.fileSDStoreAddresses) > 0 { 451 if err := os.MkdirAll(q.Dir(), 0750); err != nil { 452 return nil, errors.Wrap(err, "create query dir failed") 453 } 454 455 fileSD := []*targetgroup.Group{{}} 456 for _, a := range q.fileSDStoreAddresses { 457 fileSD[0].Targets = append(fileSD[0].Targets, model.LabelSet{model.AddressLabel: model.LabelValue(a)}) 458 } 459 460 b, err := yaml.Marshal(fileSD) 461 if err != nil { 462 return nil, err 463 } 464 465 if err := os.WriteFile(q.Dir()+"/filesd.yaml", b, 0600); err != nil { 466 return nil, errors.Wrap(err, "creating query SD config failed") 467 } 468 469 args = append(args, "--store.sd-files="+filepath.Join(q.InternalDir(), "filesd.yaml")) 470 } 471 if q.routePrefix != "" { 472 args = append(args, "--web.route-prefix="+q.routePrefix) 473 } 474 if q.externalPrefix != "" { 475 args = append(args, "--web.external-prefix="+q.externalPrefix) 476 } 477 if q.tracingConfig != "" { 478 args = append(args, "--tracing.config="+q.tracingConfig) 479 } 480 for _, bucket := range q.telemetryDurationQuantiles { 481 args = append(args, "--query.telemetry.request-duration-seconds-quantiles="+strconv.FormatFloat(bucket, 'f', -1, 64)) 482 } 483 for _, bucket := range q.telemetrySamplesQuantiles { 484 args = append(args, "--query.telemetry.request-samples-quantiles="+strconv.FormatFloat(bucket, 'f', -1, 64)) 485 } 486 for _, bucket := range q.telemetrySeriesQuantiles { 487 args = append(args, "--query.telemetry.request-series-seconds-quantiles="+strconv.FormatFloat(bucket, 'f', -1, 64)) 488 } 489 return args, nil 490 } 491 492 func RemoteWriteEndpoint(addr string) string { return fmt.Sprintf("http://%s/api/v1/receive", addr) } 493 494 func RemoteWriteEndpoints(addrs ...string) string { 495 var endpoints []string 496 for _, addr := range addrs { 497 endpoints = append(endpoints, RemoteWriteEndpoint(addr)) 498 } 499 return strings.Join(endpoints, ",") 500 } 501 502 type ReceiveBuilder struct { 503 e2e.Linkable 504 505 f e2e.FutureRunnable 506 507 maxExemplars int 508 ingestion bool 509 limit int 510 tenantsLimits receive.TenantsWriteLimitsConfig 511 metaMonitoring string 512 metaMonitoringQuery string 513 hashringConfigs []receive.HashringConfig 514 relabelConfigs []*relabel.Config 515 replication int 516 image string 517 nativeHistograms bool 518 labels []string 519 } 520 521 func NewReceiveBuilder(e e2e.Environment, name string) *ReceiveBuilder { 522 f := e.Runnable(fmt.Sprintf("receive-%v", name)). 523 WithPorts(map[string]int{"http": 8080, "grpc": 9091, "remote-write": 8081}). 524 Future() 525 return &ReceiveBuilder{ 526 Linkable: f, 527 f: f, 528 replication: 1, 529 image: DefaultImage(), 530 } 531 } 532 533 func (r *ReceiveBuilder) WithImage(image string) *ReceiveBuilder { 534 r.image = image 535 return r 536 } 537 538 func (r *ReceiveBuilder) WithExemplarsInMemStorage(maxExemplars int) *ReceiveBuilder { 539 r.maxExemplars = maxExemplars 540 r.ingestion = true 541 return r 542 } 543 544 func (r *ReceiveBuilder) WithIngestionEnabled() *ReceiveBuilder { 545 r.ingestion = true 546 return r 547 } 548 549 func (r *ReceiveBuilder) WithLabel(name, value string) *ReceiveBuilder { 550 r.labels = append(r.labels, fmt.Sprintf(`%s="%s"`, name, value)) 551 return r 552 } 553 554 func (r *ReceiveBuilder) WithRouting(replication int, hashringConfigs ...receive.HashringConfig) *ReceiveBuilder { 555 r.hashringConfigs = hashringConfigs 556 r.replication = replication 557 return r 558 } 559 560 func (r *ReceiveBuilder) WithRelabelConfigs(relabelConfigs []*relabel.Config) *ReceiveBuilder { 561 r.relabelConfigs = relabelConfigs 562 return r 563 } 564 565 func (r *ReceiveBuilder) WithValidationEnabled(limit int, metaMonitoring string, tenantsLimits receive.TenantsWriteLimitsConfig, query ...string) *ReceiveBuilder { 566 r.limit = limit 567 r.metaMonitoring = metaMonitoring 568 r.tenantsLimits = tenantsLimits 569 if len(query) > 0 { 570 r.metaMonitoringQuery = query[0] 571 } 572 return r 573 } 574 575 func (r *ReceiveBuilder) WithNativeHistograms() *ReceiveBuilder { 576 r.nativeHistograms = true 577 return r 578 } 579 580 // Init creates a Thanos Receive instance. 581 // If ingestion is enabled it will be configured for ingesting samples. 582 // If routing is configured (i.e. hashring configuration is provided) it routes samples to other receivers. 583 // If none, it errors out. 584 func (r *ReceiveBuilder) Init() *e2emon.InstrumentedRunnable { 585 if !r.ingestion && len(r.hashringConfigs) == 0 { 586 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(r.Name(), errors.New("enable ingestion or configure routing for this receiver"))} 587 } 588 589 args := map[string]string{ 590 "--debug.name": r.Name(), 591 "--grpc-address": ":9091", 592 "--grpc-grace-period": "0s", 593 "--http-address": ":8080", 594 "--remote-write.address": ":8081", 595 "--label": fmt.Sprintf(`receive="%s"`, r.Name()), 596 "--tsdb.path": filepath.Join(r.InternalDir(), "data"), 597 "--log.level": infoLogLevel, 598 "--tsdb.max-exemplars": fmt.Sprintf("%v", r.maxExemplars), 599 } 600 601 if len(r.labels) > 0 { 602 args["--label"] = fmt.Sprintf("%s,%s", args["--label"], strings.Join(r.labels, ",")) 603 } 604 605 hashring := r.hashringConfigs 606 if len(hashring) > 0 && r.ingestion { 607 args["--receive.local-endpoint"] = r.InternalEndpoint("grpc") 608 } 609 610 if r.limit != 0 && r.metaMonitoring != "" { 611 cfg := receive.RootLimitsConfig{ 612 WriteLimits: receive.WriteLimitsConfig{ 613 GlobalLimits: receive.GlobalLimitsConfig{ 614 MetaMonitoringURL: r.metaMonitoring, 615 MetaMonitoringLimitQuery: r.metaMonitoringQuery, 616 }, 617 DefaultLimits: receive.DefaultLimitsConfig{ 618 HeadSeriesLimit: uint64(r.limit), 619 }, 620 }, 621 } 622 623 if r.tenantsLimits != nil { 624 cfg.WriteLimits.TenantsLimits = r.tenantsLimits 625 } 626 627 b, err := yaml.Marshal(cfg) 628 if err != nil { 629 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(r.Name(), errors.Wrapf(err, "generate limiting file: %v", hashring))} 630 } 631 632 if err := os.WriteFile(filepath.Join(r.Dir(), "limits.yaml"), b, 0600); err != nil { 633 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(r.Name(), errors.Wrap(err, "creating limitin config"))} 634 } 635 636 args["--receive.limits-config-file"] = filepath.Join(r.InternalDir(), "limits.yaml") 637 } 638 639 if err := os.MkdirAll(filepath.Join(r.Dir(), "data"), 0750); err != nil { 640 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(r.Name(), errors.Wrap(err, "create receive dir"))} 641 } 642 643 if len(hashring) > 0 { 644 b, err := json.Marshal(hashring) 645 if err != nil { 646 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(r.Name(), errors.Wrapf(err, "generate hashring file: %v", hashring))} 647 } 648 649 if err := os.WriteFile(filepath.Join(r.Dir(), "hashrings.json"), b, 0600); err != nil { 650 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(r.Name(), errors.Wrap(err, "creating receive config"))} 651 } 652 653 args["--receive.hashrings-file"] = filepath.Join(r.InternalDir(), "hashrings.json") 654 args["--receive.hashrings-file-refresh-interval"] = "5s" 655 args["--receive.replication-factor"] = strconv.Itoa(r.replication) 656 } 657 658 if len(r.relabelConfigs) > 0 { 659 relabelConfigBytes, err := yaml.Marshal(r.relabelConfigs) 660 if err != nil { 661 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(r.Name(), errors.Wrapf(err, "generate relabel configs: %v", relabelConfigBytes))} 662 } 663 args["--receive.relabel-config"] = string(relabelConfigBytes) 664 } 665 666 if r.nativeHistograms { 667 args["--tsdb.enable-native-histograms"] = "" 668 } 669 670 return e2emon.AsInstrumented(r.f.Init(wrapWithDefaults(e2e.StartOptions{ 671 Image: r.image, 672 Command: e2e.NewCommand("receive", e2e.BuildKingpinArgs(args)...), 673 Readiness: e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), 674 })), "http") 675 } 676 677 type RulerBuilder struct { 678 e2e.Linkable 679 680 f e2e.FutureRunnable 681 682 amCfg []alert.AlertmanagerConfig 683 replicaLabel string 684 image string 685 resendDelay string 686 evalInterval string 687 forGracePeriod string 688 restoreIgnoredLabels []string 689 } 690 691 // NewRulerBuilder is a Ruler future that allows extra configuration before initialization. 692 func NewRulerBuilder(e e2e.Environment, name string) *RulerBuilder { 693 f := e.Runnable(fmt.Sprintf("rule-%s", name)). 694 WithPorts(map[string]int{"http": 8080, "grpc": 9091}). 695 Future() 696 return &RulerBuilder{ 697 replicaLabel: name, 698 Linkable: f, 699 f: f, 700 image: DefaultImage(), 701 } 702 } 703 704 func (r *RulerBuilder) WithImage(image string) *RulerBuilder { 705 r.image = image 706 return r 707 } 708 709 func (r *RulerBuilder) WithAlertManagerConfig(amCfg []alert.AlertmanagerConfig) *RulerBuilder { 710 r.amCfg = amCfg 711 return r 712 } 713 714 func (r *RulerBuilder) WithReplicaLabel(replicaLabel string) *RulerBuilder { 715 r.replicaLabel = replicaLabel 716 return r 717 } 718 719 func (r *RulerBuilder) WithResendDelay(resendDelay string) *RulerBuilder { 720 r.resendDelay = resendDelay 721 return r 722 } 723 724 func (r *RulerBuilder) WithEvalInterval(evalInterval string) *RulerBuilder { 725 r.evalInterval = evalInterval 726 return r 727 } 728 729 func (r *RulerBuilder) WithForGracePeriod(forGracePeriod string) *RulerBuilder { 730 r.forGracePeriod = forGracePeriod 731 return r 732 } 733 734 func (r *RulerBuilder) WithRestoreIgnoredLabels(labels ...string) *RulerBuilder { 735 r.restoreIgnoredLabels = labels 736 return r 737 } 738 739 func (r *RulerBuilder) InitTSDB(internalRuleDir string, queryCfg []httpconfig.Config) *e2emon.InstrumentedRunnable { 740 return r.initRule(internalRuleDir, queryCfg, nil) 741 } 742 743 func (r *RulerBuilder) InitStateless(internalRuleDir string, queryCfg []httpconfig.Config, remoteWriteCfg []*config.RemoteWriteConfig) *e2emon.InstrumentedRunnable { 744 return r.initRule(internalRuleDir, queryCfg, remoteWriteCfg) 745 } 746 747 func (r *RulerBuilder) initRule(internalRuleDir string, queryCfg []httpconfig.Config, remoteWriteCfg []*config.RemoteWriteConfig) *e2emon.InstrumentedRunnable { 748 if err := os.MkdirAll(r.f.Dir(), 0750); err != nil { 749 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(r.Name(), errors.Wrap(err, "create rule dir"))} 750 } 751 752 amCfgBytes, err := yaml.Marshal(alert.AlertingConfig{ 753 Alertmanagers: r.amCfg, 754 }) 755 if err != nil { 756 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(r.Name(), errors.Wrapf(err, "generate am file: %v", r.amCfg))} 757 } 758 759 queryCfgBytes, err := yaml.Marshal(queryCfg) 760 if err != nil { 761 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(r.Name(), errors.Wrapf(err, "generate query file: %v", queryCfg))} 762 } 763 764 ruleArgs := map[string]string{ 765 "--debug.name": r.Name(), 766 "--grpc-address": ":9091", 767 "--grpc-grace-period": "0s", 768 "--http-address": ":8080", 769 "--data-dir": r.InternalDir(), 770 "--rule-file": filepath.Join(internalRuleDir, "*.yaml"), 771 "--eval-interval": "1s", 772 "--alertmanagers.config": string(amCfgBytes), 773 "--alertmanagers.sd-dns-interval": "1s", 774 "--log.level": infoLogLevel, 775 "--query.config": string(queryCfgBytes), 776 "--query.sd-dns-interval": "1s", 777 "--resend-delay": "5s", 778 "--for-grace-period": "1s", 779 } 780 if r.replicaLabel != "" { 781 ruleArgs["--label"] = fmt.Sprintf(`%s="%s"`, replicaLabel, r.replicaLabel) 782 } 783 784 if r.resendDelay != "" { 785 ruleArgs["--resend-delay"] = r.resendDelay 786 } 787 788 if r.evalInterval != "" { 789 ruleArgs["--eval-interval"] = r.evalInterval 790 } 791 792 if r.forGracePeriod != "" { 793 ruleArgs["--for-grace-period"] = r.forGracePeriod 794 } 795 796 if remoteWriteCfg != nil { 797 rwCfgBytes, err := yaml.Marshal(struct { 798 RemoteWriteConfigs []*config.RemoteWriteConfig `yaml:"remote_write,omitempty"` 799 }{remoteWriteCfg}) 800 if err != nil { 801 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(r.Name(), errors.Wrapf(err, "generate remote write config: %v", remoteWriteCfg))} 802 } 803 ruleArgs["--remote-write.config"] = string(rwCfgBytes) 804 } 805 806 args := e2e.BuildArgs(ruleArgs) 807 808 for _, label := range r.restoreIgnoredLabels { 809 args = append(args, "--restore-ignored-label="+label) 810 } 811 812 return e2emon.AsInstrumented(r.f.Init(wrapWithDefaults(e2e.StartOptions{ 813 Image: r.image, 814 Command: e2e.NewCommand("rule", args...), 815 Readiness: e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), 816 })), "http") 817 } 818 819 func NewAlertmanager(e e2e.Environment, name string) *e2emon.InstrumentedRunnable { 820 f := e.Runnable(fmt.Sprintf("alertmanager-%v", name)). 821 WithPorts(map[string]int{"http": 8080}). 822 Future() 823 824 if err := os.MkdirAll(f.Dir(), 0750); err != nil { 825 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(name, errors.Wrap(err, "create am dir"))} 826 } 827 const config = ` 828 route: 829 group_by: ['alertname'] 830 group_wait: 1s 831 group_interval: 1s 832 receiver: 'null' 833 receivers: 834 - name: 'null' 835 ` 836 if err := os.WriteFile(filepath.Join(f.Dir(), "config.yaml"), []byte(config), 0600); err != nil { 837 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(name, errors.Wrap(err, "creating alertmanager config file failed"))} 838 } 839 840 return e2emon.AsInstrumented(f.Init(wrapWithDefaults(e2e.StartOptions{ 841 Image: DefaultAlertmanagerImage(), 842 Command: e2e.NewCommandWithoutEntrypoint("/bin/alertmanager", e2e.BuildArgs(map[string]string{ 843 "--config.file": filepath.Join(f.InternalDir(), "config.yaml"), 844 "--web.listen-address": "0.0.0.0:8080", 845 "--log.level": infoLogLevel, 846 "--storage.path": f.InternalDir(), 847 "--web.get-concurrency": "1", 848 "--web.timeout": "2m", 849 })...), 850 Readiness: e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), 851 User: strconv.Itoa(os.Geteuid()), 852 WaitReadyBackoff: &defaultBackoffConfig, 853 })), "http") 854 } 855 856 func NewStoreGW(e e2e.Environment, name string, bucketConfig client.BucketConfig, cacheConfig, indexCacheConfig string, extArgs []string, relabelConfig ...relabel.Config) *e2emon.InstrumentedRunnable { 857 f := e.Runnable(fmt.Sprintf("store-gw-%v", name)). 858 WithPorts(map[string]int{"http": 8080, "grpc": 9091}). 859 Future() 860 861 if err := os.MkdirAll(f.Dir(), 0750); err != nil { 862 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(name, errors.Wrap(err, "create store dir"))} 863 } 864 865 bktConfigBytes, err := yaml.Marshal(bucketConfig) 866 if err != nil { 867 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(name, errors.Wrapf(err, "generate store config file: %v", bucketConfig))} 868 } 869 870 relabelConfigBytes, err := yaml.Marshal(relabelConfig) 871 if err != nil { 872 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(name, errors.Wrapf(err, "generate store relabel file: %v", relabelConfig))} 873 } 874 875 args := append(e2e.BuildArgs(map[string]string{ 876 "--debug.name": fmt.Sprintf("store-gw-%v", name), 877 "--grpc-address": ":9091", 878 "--grpc-grace-period": "0s", 879 "--http-address": ":8080", 880 "--log.level": infoLogLevel, 881 "--data-dir": f.InternalDir(), 882 "--objstore.config": string(bktConfigBytes), 883 // Accelerated sync time for quicker test (3m by default). 884 "--sync-block-duration": "3s", 885 "--block-sync-concurrency": "1", 886 "--store.grpc.series-max-concurrency": "1", 887 "--selector.relabel-config": string(relabelConfigBytes), 888 "--consistency-delay": "30m", 889 }), extArgs...) 890 891 if cacheConfig != "" { 892 args = append(args, "--store.caching-bucket.config", cacheConfig) 893 } 894 895 if indexCacheConfig != "" { 896 args = append(args, "--index-cache.config", indexCacheConfig) 897 } 898 899 return e2emon.AsInstrumented(f.Init(wrapWithDefaults(e2e.StartOptions{ 900 Image: DefaultImage(), 901 Command: e2e.NewCommand("store", args...), 902 Readiness: e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), 903 })), "http") 904 } 905 906 type CompactorBuilder struct { 907 e2e.Linkable 908 f e2e.FutureRunnable 909 } 910 911 func NewCompactorBuilder(e e2e.Environment, name string) *CompactorBuilder { 912 f := e.Runnable(fmt.Sprintf("compact-%s", name)). 913 WithPorts(map[string]int{"http": 8080}). 914 Future() 915 return &CompactorBuilder{ 916 Linkable: f, 917 f: f, 918 } 919 } 920 921 func (c *CompactorBuilder) Init(bucketConfig client.BucketConfig, relabelConfig []relabel.Config, extArgs ...string) *e2emon.InstrumentedRunnable { 922 if err := os.MkdirAll(c.Dir(), 0750); err != nil { 923 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(c.Name(), errors.Wrap(err, "create compact dir"))} 924 } 925 926 bktConfigBytes, err := yaml.Marshal(bucketConfig) 927 if err != nil { 928 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(c.Name(), errors.Wrapf(err, "generate compact config file: %v", bucketConfig))} 929 } 930 931 relabelConfigBytes, err := yaml.Marshal(relabelConfig) 932 if err != nil { 933 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(c.Name(), errors.Wrapf(err, "generate compact relabel file: %v", relabelConfig))} 934 } 935 936 return e2emon.AsInstrumented(c.f.Init(wrapWithDefaults(e2e.StartOptions{ 937 Image: DefaultImage(), 938 Command: e2e.NewCommand("compact", append(e2e.BuildArgs(map[string]string{ 939 "--debug.name": c.Name(), 940 "--log.level": infoLogLevel, 941 "--data-dir": c.InternalDir(), 942 "--objstore.config": string(bktConfigBytes), 943 "--http-address": ":8080", 944 "--compact.cleanup-interval": "15s", 945 "--selector.relabel-config": string(relabelConfigBytes), 946 "--wait": "", 947 }), extArgs...)...), 948 Readiness: e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), 949 })), "http") 950 } 951 952 func NewQueryFrontend(e e2e.Environment, name, downstreamURL string, config queryfrontend.Config, cacheConfig queryfrontend.CacheProviderConfig) *e2eobs.Observable { 953 cacheConfigBytes, err := yaml.Marshal(cacheConfig) 954 if err != nil { 955 return &e2eobs.Observable{Runnable: e2e.NewFailedRunnable(name, errors.Wrapf(err, "marshal response cache config file: %v", cacheConfig))} 956 } 957 958 flags := map[string]string{ 959 "--debug.name": fmt.Sprintf("query-frontend-%s", name), 960 "--http-address": ":8080", 961 "--query-frontend.downstream-url": downstreamURL, 962 "--log.level": infoLogLevel, 963 "--query-range.response-cache-config": string(cacheConfigBytes), 964 } 965 966 if !config.QueryRangeConfig.AlignRangeWithStep { 967 flags["--no-query-range.align-range-with-step"] = "" 968 } 969 970 if config.NumShards > 0 { 971 flags["--query-frontend.vertical-shards"] = strconv.Itoa(config.NumShards) 972 } 973 974 if config.QueryRangeConfig.MinQuerySplitInterval != 0 { 975 flags["--query-range.min-split-interval"] = config.QueryRangeConfig.MinQuerySplitInterval.String() 976 flags["--query-range.max-split-interval"] = config.QueryRangeConfig.MaxQuerySplitInterval.String() 977 flags["--query-range.horizontal-shards"] = strconv.FormatInt(config.QueryRangeConfig.HorizontalShards, 10) 978 flags["--query-range.split-interval"] = "0" 979 } 980 981 return e2eobs.AsObservable(e.Runnable(fmt.Sprintf("query-frontend-%s", name)). 982 WithPorts(map[string]int{"http": 8080}). 983 Init(e2e.StartOptions{ 984 Image: DefaultImage(), 985 Command: e2e.NewCommand("query-frontend", e2e.BuildArgs(flags)...), 986 Readiness: e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), 987 User: strconv.Itoa(os.Getuid()), 988 WaitReadyBackoff: &defaultBackoffConfig, 989 }), "http") 990 } 991 992 func NewReverseProxy(e e2e.Environment, name, tenantID, target string) *e2emon.InstrumentedRunnable { 993 conf := fmt.Sprintf(` 994 events { 995 worker_connections 1024; 996 } 997 998 http { 999 server { 1000 listen 80; 1001 server_name _; 1002 1003 location / { 1004 proxy_set_header THANOS-TENANT %s; 1005 proxy_pass %s; 1006 } 1007 } 1008 } 1009 `, tenantID, target) 1010 1011 f := e.Runnable(fmt.Sprintf("nginx-%s", name)). 1012 WithPorts(map[string]int{"http": 80}). 1013 Future() 1014 1015 if err := os.MkdirAll(f.Dir(), 0750); err != nil { 1016 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(name, errors.Wrap(err, "create store dir"))} 1017 } 1018 1019 if err := os.WriteFile(filepath.Join(f.Dir(), "nginx.conf"), []byte(conf), 0600); err != nil { 1020 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(name, errors.Wrap(err, "creating nginx config file failed"))} 1021 } 1022 1023 return e2emon.AsInstrumented(f.Init(e2e.StartOptions{ 1024 Image: "docker.io/nginx:1.21.1-alpine", 1025 Volumes: []string{filepath.Join(f.Dir(), "/nginx.conf") + ":/etc/nginx/nginx.conf:ro"}, 1026 WaitReadyBackoff: &defaultBackoffConfig, 1027 }), "http") 1028 } 1029 1030 func NewMemcached(e e2e.Environment, name string) *e2emon.InstrumentedRunnable { 1031 return e2emon.AsInstrumented(e.Runnable(fmt.Sprintf("memcached-%s", name)). 1032 WithPorts(map[string]int{"memcached": 11211}). 1033 Init(e2e.StartOptions{ 1034 Image: "docker.io/memcached:1.6.3-alpine", 1035 Command: e2e.NewCommand("memcached", []string{"-m 1024", "-I 1m", "-c 1024", "-v"}...), 1036 User: strconv.Itoa(os.Getuid()), 1037 WaitReadyBackoff: &defaultBackoffConfig, 1038 }), "memcached") 1039 } 1040 1041 func NewToolsBucketWeb( 1042 e e2e.Environment, 1043 name string, 1044 bucketConfig client.BucketConfig, 1045 routePrefix, 1046 externalPrefix string, 1047 minTime string, 1048 maxTime string, 1049 relabelConfig string, 1050 ) *e2emon.InstrumentedRunnable { 1051 bktConfigBytes, err := yaml.Marshal(bucketConfig) 1052 if err != nil { 1053 return &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(name, errors.Wrapf(err, "generate tools bucket web config file: %v", bucketConfig))} 1054 } 1055 1056 f := e.Runnable(fmt.Sprintf("toolsBucketWeb-%s", name)). 1057 WithPorts(map[string]int{"http": 8080, "grpc": 9091}). 1058 Future() 1059 1060 args := e2e.BuildArgs(map[string]string{ 1061 "--debug.name": fmt.Sprintf("toolsBucketWeb-%s", name), 1062 "--http-address": ":8080", 1063 "--log.level": infoLogLevel, 1064 "--objstore.config": string(bktConfigBytes), 1065 }) 1066 if routePrefix != "" { 1067 args = append(args, "--web.route-prefix="+routePrefix) 1068 } 1069 1070 if externalPrefix != "" { 1071 args = append(args, "--web.external-prefix="+externalPrefix) 1072 } 1073 1074 if minTime != "" { 1075 args = append(args, "--min-time="+minTime) 1076 } 1077 1078 if maxTime != "" { 1079 args = append(args, "--max-time="+maxTime) 1080 } 1081 1082 if relabelConfig != "" { 1083 args = append(args, "--selector.relabel-config="+relabelConfig) 1084 } 1085 1086 args = append([]string{"bucket", "web"}, args...) 1087 1088 return e2emon.AsInstrumented(f.Init(wrapWithDefaults(e2e.StartOptions{ 1089 Image: DefaultImage(), 1090 Command: e2e.NewCommand("tools", args...), 1091 Readiness: e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), 1092 })), "http") 1093 } 1094 1095 func NewS3Config(bucket, endpoint, basePath string) s3.Config { 1096 httpDefaultConf := s3.DefaultConfig.HTTPConfig 1097 httpDefaultConf.TLSConfig = exthttp.TLSConfig{ 1098 CAFile: filepath.Join(basePath, "certs", "CAs", "ca.crt"), 1099 CertFile: filepath.Join(basePath, "certs", "public.crt"), 1100 KeyFile: filepath.Join(basePath, "certs", "private.key"), 1101 } 1102 1103 return s3.Config{ 1104 Bucket: bucket, 1105 AccessKey: e2edb.MinioAccessKey, 1106 SecretKey: e2edb.MinioSecretKey, 1107 Endpoint: endpoint, 1108 Insecure: false, 1109 HTTPConfig: httpDefaultConf, 1110 BucketLookupType: s3.AutoLookup, 1111 } 1112 } 1113 1114 // NOTE: by using aggregation all results are now unsorted. 1115 var QueryUpWithoutInstance = func() string { return "sum(up) without (instance)" } 1116 1117 // LocalPrometheusTarget is a constant to be used in the Prometheus config if you 1118 // wish to enable Prometheus to scrape itself in a test. 1119 const LocalPrometheusTarget = "localhost:9090" 1120 1121 // DefaultPromConfig returns Prometheus config that sets Prometheus to: 1122 // * expose 2 external labels, source and replica. 1123 // * optionally scrape self. This will produce up == 0 metric which we can assert on. 1124 // * optionally remote write endpoint to write into. 1125 func DefaultPromConfig(name string, replica int, remoteWriteEndpoint, ruleFile string, scrapeTargets ...string) string { 1126 var targets string 1127 if len(scrapeTargets) > 0 { 1128 targets = strings.Join(scrapeTargets, ",") 1129 } 1130 1131 config := fmt.Sprintf(` 1132 global: 1133 external_labels: 1134 prometheus: %v 1135 replica: %v 1136 `, name, replica) 1137 1138 if targets != "" { 1139 config = fmt.Sprintf(` 1140 %s 1141 scrape_configs: 1142 - job_name: 'myself' 1143 # Quick scrapes for test purposes. 1144 scrape_interval: 1s 1145 scrape_timeout: 1s 1146 static_configs: 1147 - targets: [%s] 1148 relabel_configs: 1149 - source_labels: ['__address__'] 1150 regex: '^localhost:80$' 1151 action: drop 1152 `, config, targets) 1153 } 1154 1155 if remoteWriteEndpoint != "" { 1156 config = fmt.Sprintf(` 1157 %s 1158 remote_write:`, config) 1159 for _, url := range strings.Split(remoteWriteEndpoint, ",") { 1160 config = fmt.Sprintf(` 1161 %s 1162 - url: "%s" 1163 # Don't spam receiver on mistake. 1164 queue_config: 1165 min_backoff: 2s 1166 max_backoff: 10s`, config, url) 1167 } 1168 } 1169 1170 if ruleFile != "" { 1171 config = fmt.Sprintf(` 1172 %s 1173 rule_files: 1174 - "%s" 1175 `, config, ruleFile) 1176 } 1177 1178 return config 1179 } 1180 1181 func NewRedis(e e2e.Environment, name string) e2e.Runnable { 1182 return e.Runnable(fmt.Sprintf("redis-%s", name)).WithPorts(map[string]int{"redis": 6379}).Init( 1183 e2e.StartOptions{ 1184 Image: "docker.io/redis:7.0.4-alpine", 1185 Command: e2e.NewCommand("redis-server", "*:6379"), 1186 User: strconv.Itoa(os.Getuid()), 1187 WaitReadyBackoff: &defaultBackoffConfig, 1188 }, 1189 ) 1190 }