github.com/thanos-io/thanos@v0.32.5/test/e2e/store_gateway_test.go (about) 1 // Copyright (c) The Thanos Authors. 2 // Licensed under the Apache License 2.0. 3 4 package e2e_test 5 6 import ( 7 "bytes" 8 "context" 9 "fmt" 10 "net/http" 11 "os" 12 "path" 13 "path/filepath" 14 "strings" 15 "testing" 16 "time" 17 18 "github.com/efficientgo/core/testutil" 19 "github.com/efficientgo/e2e" 20 e2edb "github.com/efficientgo/e2e/db" 21 e2emon "github.com/efficientgo/e2e/monitoring" 22 "github.com/efficientgo/e2e/monitoring/matchers" 23 "github.com/go-kit/log" 24 "github.com/prometheus/common/model" 25 "github.com/prometheus/prometheus/model/labels" 26 "github.com/prometheus/prometheus/model/relabel" 27 "github.com/prometheus/prometheus/model/timestamp" 28 29 "github.com/thanos-io/objstore" 30 "github.com/thanos-io/objstore/client" 31 "github.com/thanos-io/objstore/providers/s3" 32 33 "github.com/thanos-io/thanos/pkg/block" 34 "github.com/thanos-io/thanos/pkg/block/metadata" 35 "github.com/thanos-io/thanos/pkg/cacheutil" 36 "github.com/thanos-io/thanos/pkg/promclient" 37 "github.com/thanos-io/thanos/pkg/runutil" 38 "github.com/thanos-io/thanos/pkg/store/storepb" 39 "github.com/thanos-io/thanos/pkg/testutil/e2eutil" 40 "github.com/thanos-io/thanos/test/e2e/e2ethanos" 41 ) 42 43 const testQuery = "{a=\"1\"}" 44 45 // TODO(bwplotka): Extend this test to have multiple stores. 46 func TestStoreGateway(t *testing.T) { 47 t.Parallel() 48 49 e, err := e2e.NewDockerEnvironment("store-gateway") 50 testutil.Ok(t, err) 51 t.Cleanup(e2ethanos.CleanScenario(t, e)) 52 53 const bucket = "store-gateway-test" 54 m := e2edb.NewMinio(e, "thanos-minio", bucket, e2edb.WithMinioTLS()) 55 testutil.Ok(t, e2e.StartAndWaitReady(m)) 56 57 memcached := e2ethanos.NewMemcached(e, "1") 58 testutil.Ok(t, e2e.StartAndWaitReady(memcached)) 59 60 memcachedConfig := fmt.Sprintf(`type: MEMCACHED 61 config: 62 addresses: [%s] 63 blocks_iter_ttl: 0s 64 metafile_exists_ttl: 0s 65 metafile_doesnt_exist_ttl: 0s 66 metafile_content_ttl: 0s`, memcached.InternalEndpoint("memcached")) 67 68 s1 := e2ethanos.NewStoreGW( 69 e, 70 "1", 71 client.BucketConfig{ 72 Type: client.S3, 73 Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()), 74 }, 75 memcachedConfig, 76 "", 77 nil, 78 relabel.Config{ 79 Action: relabel.Drop, 80 Regex: relabel.MustNewRegexp("value2"), 81 SourceLabels: model.LabelNames{"ext1"}, 82 }, 83 ) 84 testutil.Ok(t, e2e.StartAndWaitReady(s1)) 85 // Ensure bucket UI. 86 ensureGETStatusCode(t, http.StatusOK, "http://"+path.Join(s1.Endpoint("http"), "loaded")) 87 88 q := e2ethanos.NewQuerierBuilder(e, "1", s1.InternalEndpoint("grpc")).WithEnabledFeatures([]string{"promql-negative-offset", "promql-at-modifier"}).Init() 89 testutil.Ok(t, e2e.StartAndWaitReady(q)) 90 91 dir := filepath.Join(e.SharedDir(), "tmp") 92 testutil.Ok(t, os.MkdirAll(filepath.Join(e.SharedDir(), dir), os.ModePerm)) 93 94 floatSeries := []labels.Labels{labels.FromStrings("a", "1", "b", "2")} 95 nativeHistogramSeries := []labels.Labels{labels.FromStrings("a", "1", "b", "3")} 96 extLset := labels.FromStrings("ext1", "value1", "replica", "1") 97 extLset2 := labels.FromStrings("ext1", "value1", "replica", "2") 98 extLset3 := labels.FromStrings("ext1", "value2", "replica", "3") 99 extLset4 := labels.FromStrings("ext1", "value1", "replica", "3") 100 extLset5 := labels.FromStrings("ext1", "value3", "replica", "1") 101 102 ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) 103 t.Cleanup(cancel) 104 105 now := time.Now() 106 id1, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, floatSeries, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset, 0, metadata.NoneFunc) 107 testutil.Ok(t, err) 108 id2, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, floatSeries, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset2, 0, metadata.NoneFunc) 109 testutil.Ok(t, err) 110 id3, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, floatSeries, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset3, 0, metadata.NoneFunc) 111 testutil.Ok(t, err) 112 id4, err := e2eutil.CreateBlock(ctx, dir, floatSeries, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), extLset, 0, metadata.NoneFunc) 113 testutil.Ok(t, err) 114 id5, err := e2eutil.CreateHistogramBlockWithDelay(ctx, dir, nativeHistogramSeries, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset5, 0, metadata.NoneFunc) 115 testutil.Ok(t, err) 116 l := log.NewLogfmtLogger(os.Stdout) 117 bkt, err := s3.NewBucketWithConfig(l, 118 e2ethanos.NewS3Config(bucket, m.Endpoint("http"), m.Dir()), "test-feed") 119 testutil.Ok(t, err) 120 121 testutil.Ok(t, objstore.UploadDir(ctx, l, bkt, path.Join(dir, id1.String()), id1.String())) 122 testutil.Ok(t, objstore.UploadDir(ctx, l, bkt, path.Join(dir, id2.String()), id2.String())) 123 testutil.Ok(t, objstore.UploadDir(ctx, l, bkt, path.Join(dir, id3.String()), id3.String())) 124 testutil.Ok(t, objstore.UploadDir(ctx, l, bkt, path.Join(dir, id4.String()), id4.String())) 125 testutil.Ok(t, objstore.UploadDir(ctx, l, bkt, path.Join(dir, id5.String()), id5.String())) 126 127 // Wait for store to sync blocks. 128 // thanos_blocks_meta_synced: 2x loadedMeta 1x labelExcludedMeta 1x TooFreshMeta. 129 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(5), "thanos_blocks_meta_synced")) 130 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_blocks_meta_sync_failures_total")) 131 132 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(3), "thanos_bucket_store_blocks_loaded")) 133 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_bucket_store_block_drops_total")) 134 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_bucket_store_block_load_failures_total")) 135 136 t.Run("query works", func(t *testing.T) { 137 queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { return fmt.Sprintf("%s @ end()", testQuery) }, 138 time.Now, promclient.QueryOptions{ 139 Deduplicate: false, 140 }, 141 []model.Metric{ 142 { 143 "a": "1", 144 "b": "2", 145 "ext1": "value1", 146 "replica": "1", 147 }, 148 { 149 "a": "1", 150 "b": "2", 151 "ext1": "value1", 152 "replica": "2", 153 }, 154 { 155 "a": "1", 156 "b": "3", 157 "ext1": "value3", 158 "replica": "1", 159 }, 160 }, 161 ) 162 163 // 2 x postings, 3 x series, 2 x chunks. 164 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(9), "thanos_bucket_store_series_data_touched")) 165 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(9), "thanos_bucket_store_series_data_fetched")) 166 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(3), "thanos_bucket_store_series_blocks_queried")) 167 168 queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { return testQuery }, 169 time.Now, promclient.QueryOptions{ 170 Deduplicate: true, 171 }, 172 []model.Metric{ 173 { 174 "a": "1", 175 "b": "2", 176 "ext1": "value1", 177 }, 178 { 179 "a": "1", 180 "b": "3", 181 "ext1": "value3", 182 }, 183 }, 184 ) 185 186 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(18), "thanos_bucket_store_series_data_touched")) 187 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(12), "thanos_bucket_store_series_data_fetched")) 188 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(3+3), "thanos_bucket_store_series_blocks_queried")) 189 }) 190 t.Run("remove meta.json from id1 block", func(t *testing.T) { 191 testutil.Ok(t, bkt.Delete(ctx, filepath.Join(id1.String(), block.MetaFilename))) 192 193 // Wait for store to sync blocks. 194 // thanos_blocks_meta_synced: 1x loadedMeta 1x labelExcludedMeta 1x TooFreshMeta 1x noMeta. 195 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(5), "thanos_blocks_meta_synced")) 196 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_blocks_meta_sync_failures_total")) 197 198 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(2), "thanos_bucket_store_blocks_loaded")) 199 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(1), "thanos_bucket_store_block_drops_total")) 200 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_bucket_store_block_load_failures_total")) 201 202 // TODO(bwplotka): Entries are still in LRU cache. 203 queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { return testQuery }, 204 time.Now, promclient.QueryOptions{ 205 Deduplicate: false, 206 }, 207 []model.Metric{ 208 { 209 "a": "1", 210 "b": "2", 211 "ext1": "value1", 212 "replica": "2", 213 }, 214 { 215 "a": "1", 216 "b": "3", 217 "ext1": "value3", 218 "replica": "1", 219 }, 220 }, 221 ) 222 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(4+4), "thanos_bucket_store_series_blocks_queried")) 223 }) 224 t.Run("upload block id5, similar to id1", func(t *testing.T) { 225 id5, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, floatSeries, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset4, 0, metadata.NoneFunc) 226 testutil.Ok(t, err) 227 testutil.Ok(t, objstore.UploadDir(ctx, l, bkt, path.Join(dir, id5.String()), id5.String())) 228 229 // Wait for store to sync blocks. 230 // thanos_blocks_meta_synced: 2x loadedMeta 1x labelExcludedMeta 1x TooFreshMeta 1x noMeta. 231 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(5), "thanos_blocks_meta_synced")) 232 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_blocks_meta_sync_failures_total")) 233 234 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(2), "thanos_bucket_store_blocks_loaded")) 235 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(1), "thanos_bucket_store_block_drops_total")) 236 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_bucket_store_block_load_failures_total")) 237 238 queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { return testQuery }, 239 time.Now, promclient.QueryOptions{ 240 Deduplicate: false, 241 }, 242 []model.Metric{ 243 { 244 "a": "1", 245 "b": "2", 246 "ext1": "value1", 247 "replica": "2", 248 }, 249 { 250 "a": "1", 251 "b": "2", 252 "ext1": "value1", 253 "replica": "3", // New block. 254 }, 255 { 256 "a": "1", 257 "b": "3", 258 "ext1": "value3", 259 "replica": "1", 260 }, 261 }, 262 ) 263 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(11+2), "thanos_bucket_store_series_blocks_queried")) 264 }) 265 t.Run("delete whole id2 block #yolo", func(t *testing.T) { 266 testutil.Ok(t, block.Delete(ctx, l, bkt, id2)) 267 268 // Wait for store to sync blocks. 269 // thanos_blocks_meta_synced: 1x loadedMeta 1x labelExcludedMeta 1x TooFreshMeta 1x noMeta. 270 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(5), "thanos_blocks_meta_synced")) 271 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_blocks_meta_sync_failures_total")) 272 273 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(2), "thanos_bucket_store_blocks_loaded")) 274 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(1+1), "thanos_bucket_store_block_drops_total")) 275 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_bucket_store_block_load_failures_total")) 276 277 queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { return testQuery }, 278 time.Now, promclient.QueryOptions{ 279 Deduplicate: false, 280 }, 281 []model.Metric{ 282 { 283 "a": "1", 284 "b": "2", 285 "ext1": "value1", 286 "replica": "3", 287 }, 288 { 289 "a": "1", 290 "b": "3", 291 "ext1": "value3", 292 "replica": "1", 293 }, 294 }, 295 ) 296 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(14+1), "thanos_bucket_store_series_blocks_queried")) 297 }) 298 299 t.Run("negative offset should work", func(t *testing.T) { 300 queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { return "{a=\"1\"} offset -4h" }, 301 func() time.Time { return time.Now().Add(-4 * time.Hour) }, promclient.QueryOptions{ 302 Deduplicate: false, 303 }, 304 []model.Metric{ 305 { 306 "a": "1", 307 "b": "2", 308 "ext1": "value1", 309 "replica": "3", 310 }, 311 { 312 "a": "1", 313 "b": "3", 314 "ext1": "value3", 315 "replica": "1", 316 }, 317 }, 318 ) 319 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(15+2), "thanos_bucket_store_series_blocks_queried")) 320 }) 321 322 // TODO(khyati) Let's add some case for compaction-meta.json once the PR will be merged: https://github.com/thanos-io/thanos/pull/2136. 323 } 324 325 // Test store with `--no-cache-index-header` flag. 326 func TestStoreGatewayNoCacheFile(t *testing.T) { 327 t.Parallel() 328 329 e, err := e2e.NewDockerEnvironment("store-no-cache") 330 testutil.Ok(t, err) 331 t.Cleanup(e2ethanos.CleanScenario(t, e)) 332 333 const bucket = "store-no-cache-test" 334 m := e2edb.NewMinio(e, "thanos-minio", bucket, e2edb.WithMinioTLS()) 335 testutil.Ok(t, e2e.StartAndWaitReady(m)) 336 337 s1 := e2ethanos.NewStoreGW( 338 e, 339 "1", 340 client.BucketConfig{ 341 Type: client.S3, 342 Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()), 343 }, 344 "", 345 "", 346 []string{"--no-cache-index-header"}, 347 relabel.Config{ 348 Action: relabel.Drop, 349 Regex: relabel.MustNewRegexp("value2"), 350 SourceLabels: model.LabelNames{"ext1"}, 351 }, 352 ) 353 testutil.Ok(t, e2e.StartAndWaitReady(s1)) 354 355 q := e2ethanos.NewQuerierBuilder(e, "1", s1.InternalEndpoint("grpc")).WithEnabledFeatures([]string{"promql-negative-offset", "promql-at-modifier"}).Init() 356 testutil.Ok(t, e2e.StartAndWaitReady(q)) 357 358 dir := filepath.Join(e.SharedDir(), "tmp") 359 testutil.Ok(t, os.MkdirAll(filepath.Join(e.SharedDir(), dir), os.ModePerm)) 360 361 series := []labels.Labels{labels.FromStrings("a", "1", "b", "2")} 362 extLset := labels.FromStrings("ext1", "value1", "replica", "1") 363 extLset2 := labels.FromStrings("ext1", "value1", "replica", "2") 364 extLset3 := labels.FromStrings("ext1", "value2", "replica", "3") 365 extLset4 := labels.FromStrings("ext1", "value1", "replica", "3") 366 367 ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) 368 t.Cleanup(cancel) 369 370 now := time.Now() 371 id1, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, series, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset, 0, metadata.NoneFunc) 372 testutil.Ok(t, err) 373 id2, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, series, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset2, 0, metadata.NoneFunc) 374 testutil.Ok(t, err) 375 id3, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, series, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset3, 0, metadata.NoneFunc) 376 testutil.Ok(t, err) 377 id4, err := e2eutil.CreateBlock(ctx, dir, series, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), extLset, 0, metadata.NoneFunc) 378 testutil.Ok(t, err) 379 l := log.NewLogfmtLogger(os.Stdout) 380 bkt, err := s3.NewBucketWithConfig(l, 381 e2ethanos.NewS3Config(bucket, m.Endpoint("http"), m.Dir()), "test-feed") 382 testutil.Ok(t, err) 383 384 testutil.Ok(t, objstore.UploadDir(ctx, l, bkt, path.Join(dir, id1.String()), id1.String())) 385 testutil.Ok(t, objstore.UploadDir(ctx, l, bkt, path.Join(dir, id2.String()), id2.String())) 386 testutil.Ok(t, objstore.UploadDir(ctx, l, bkt, path.Join(dir, id3.String()), id3.String())) 387 testutil.Ok(t, objstore.UploadDir(ctx, l, bkt, path.Join(dir, id4.String()), id4.String())) 388 389 // Wait for store to sync blocks. 390 // thanos_blocks_meta_synced: 2x loadedMeta 1x labelExcludedMeta 1x TooFreshMeta. 391 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(4), "thanos_blocks_meta_synced")) 392 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_blocks_meta_sync_failures_total")) 393 394 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(2), "thanos_bucket_store_blocks_loaded")) 395 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_bucket_store_block_drops_total")) 396 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_bucket_store_block_load_failures_total")) 397 398 t.Run("query works", func(t *testing.T) { 399 queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { return fmt.Sprintf("%s @ end()", testQuery) }, 400 time.Now, promclient.QueryOptions{ 401 Deduplicate: false, 402 }, 403 []model.Metric{ 404 { 405 "a": "1", 406 "b": "2", 407 "ext1": "value1", 408 "replica": "1", 409 }, 410 { 411 "a": "1", 412 "b": "2", 413 "ext1": "value1", 414 "replica": "2", 415 }, 416 }, 417 ) 418 419 // 2 x postings, 2 x series, 2x chunks. 420 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(6), "thanos_bucket_store_series_data_touched")) 421 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(6), "thanos_bucket_store_series_data_fetched")) 422 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(2), "thanos_bucket_store_series_blocks_queried")) 423 424 queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { return testQuery }, 425 time.Now, promclient.QueryOptions{ 426 Deduplicate: true, 427 }, 428 []model.Metric{ 429 { 430 "a": "1", 431 "b": "2", 432 "ext1": "value1", 433 }, 434 }, 435 ) 436 437 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(12), "thanos_bucket_store_series_data_touched")) 438 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(8), "thanos_bucket_store_series_data_fetched")) 439 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(2+2), "thanos_bucket_store_series_blocks_queried")) 440 }) 441 t.Run("remove meta.json from id1 block", func(t *testing.T) { 442 testutil.Ok(t, bkt.Delete(ctx, filepath.Join(id1.String(), block.MetaFilename))) 443 444 // Wait for store to sync blocks. 445 // thanos_blocks_meta_synced: 1x loadedMeta 1x labelExcludedMeta 1x TooFreshMeta 1x noMeta. 446 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(4), "thanos_blocks_meta_synced")) 447 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_blocks_meta_sync_failures_total")) 448 449 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(1), "thanos_bucket_store_blocks_loaded")) 450 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(1), "thanos_bucket_store_block_drops_total")) 451 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_bucket_store_block_load_failures_total")) 452 453 // TODO(bwplotka): Entries are still in LRU cache. 454 queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { return testQuery }, 455 time.Now, promclient.QueryOptions{ 456 Deduplicate: false, 457 }, 458 []model.Metric{ 459 { 460 "a": "1", 461 "b": "2", 462 "ext1": "value1", 463 "replica": "2", 464 }, 465 }, 466 ) 467 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(4+1), "thanos_bucket_store_series_blocks_queried")) 468 }) 469 t.Run("upload block id5, similar to id1", func(t *testing.T) { 470 id5, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, series, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset4, 0, metadata.NoneFunc) 471 testutil.Ok(t, err) 472 testutil.Ok(t, objstore.UploadDir(ctx, l, bkt, path.Join(dir, id5.String()), id5.String())) 473 474 // Wait for store to sync blocks. 475 // thanos_blocks_meta_synced: 2x loadedMeta 1x labelExcludedMeta 1x TooFreshMeta 1x noMeta. 476 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(5), "thanos_blocks_meta_synced")) 477 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_blocks_meta_sync_failures_total")) 478 479 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(2), "thanos_bucket_store_blocks_loaded")) 480 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(1), "thanos_bucket_store_block_drops_total")) 481 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_bucket_store_block_load_failures_total")) 482 483 queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { return testQuery }, 484 time.Now, promclient.QueryOptions{ 485 Deduplicate: false, 486 }, 487 []model.Metric{ 488 { 489 "a": "1", 490 "b": "2", 491 "ext1": "value1", 492 "replica": "2", 493 }, 494 { 495 "a": "1", 496 "b": "2", 497 "ext1": "value1", 498 "replica": "3", // New block. 499 }, 500 }, 501 ) 502 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(5+2), "thanos_bucket_store_series_blocks_queried")) 503 }) 504 t.Run("delete whole id2 block #yolo", func(t *testing.T) { 505 testutil.Ok(t, block.Delete(ctx, l, bkt, id2)) 506 507 // Wait for store to sync blocks. 508 // thanos_blocks_meta_synced: 1x loadedMeta 1x labelExcludedMeta 1x TooFreshMeta 1x noMeta. 509 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(4), "thanos_blocks_meta_synced")) 510 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_blocks_meta_sync_failures_total")) 511 512 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(1), "thanos_bucket_store_blocks_loaded")) 513 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(1+1), "thanos_bucket_store_block_drops_total")) 514 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_bucket_store_block_load_failures_total")) 515 516 queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { return testQuery }, 517 time.Now, promclient.QueryOptions{ 518 Deduplicate: false, 519 }, 520 []model.Metric{ 521 { 522 "a": "1", 523 "b": "2", 524 "ext1": "value1", 525 "replica": "3", 526 }, 527 }, 528 ) 529 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(7+1), "thanos_bucket_store_series_blocks_queried")) 530 }) 531 532 t.Run("negative offset should work", func(t *testing.T) { 533 queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { return "{a=\"1\"} offset -4h" }, 534 func() time.Time { return time.Now().Add(-4 * time.Hour) }, promclient.QueryOptions{ 535 Deduplicate: false, 536 }, 537 []model.Metric{ 538 { 539 "a": "1", 540 "b": "2", 541 "ext1": "value1", 542 "replica": "3", 543 }, 544 }, 545 ) 546 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(7+2), "thanos_bucket_store_series_blocks_queried")) 547 }) 548 } 549 550 func TestStoreGatewayMemcachedCache(t *testing.T) { 551 t.Parallel() 552 553 e, err := e2e.NewDockerEnvironment("store-memcached") 554 testutil.Ok(t, err) 555 t.Cleanup(e2ethanos.CleanScenario(t, e)) 556 557 const bucket = "store-gateway-memcached-cache-test" 558 m := e2edb.NewMinio(e, "thanos-minio", bucket, e2edb.WithMinioTLS()) 559 testutil.Ok(t, e2e.StartAndWaitReady(m)) 560 561 memcached := e2ethanos.NewMemcached(e, "1") 562 testutil.Ok(t, e2e.StartAndWaitReady(memcached)) 563 564 memcachedConfig := fmt.Sprintf(`type: MEMCACHED 565 config: 566 addresses: [%s] 567 blocks_iter_ttl: 0s`, memcached.InternalEndpoint("memcached")) 568 569 s1 := e2ethanos.NewStoreGW( 570 e, 571 "1", 572 client.BucketConfig{ 573 Type: client.S3, 574 Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()), 575 }, 576 memcachedConfig, 577 "", 578 nil, 579 ) 580 testutil.Ok(t, e2e.StartAndWaitReady(s1)) 581 582 q := e2ethanos.NewQuerierBuilder(e, "1", s1.InternalEndpoint("grpc")).Init() 583 testutil.Ok(t, e2e.StartAndWaitReady(q)) 584 585 dir := filepath.Join(e.SharedDir(), "tmp") 586 testutil.Ok(t, os.MkdirAll(dir, os.ModePerm)) 587 588 series := []labels.Labels{labels.FromStrings("a", "1", "b", "2")} 589 extLset := labels.FromStrings("ext1", "value1", "replica", "1") 590 591 ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) 592 t.Cleanup(cancel) 593 594 now := time.Now() 595 id, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, series, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset, 0, metadata.NoneFunc) 596 testutil.Ok(t, err) 597 598 l := log.NewLogfmtLogger(os.Stdout) 599 bkt, err := s3.NewBucketWithConfig(l, 600 e2ethanos.NewS3Config(bucket, m.Endpoint("http"), m.Dir()), "test-feed") 601 testutil.Ok(t, err) 602 603 testutil.Ok(t, objstore.UploadDir(ctx, l, bkt, path.Join(dir, id.String()), id.String())) 604 605 // Wait for store to sync blocks. 606 // thanos_blocks_meta_synced: 1x loadedMeta 0x labelExcludedMeta 0x TooFreshMeta. 607 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(1), "thanos_blocks_meta_synced")) 608 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_blocks_meta_sync_failures_total")) 609 610 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(1), "thanos_bucket_store_blocks_loaded")) 611 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_bucket_store_block_drops_total")) 612 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_bucket_store_block_load_failures_total")) 613 614 t.Run("query with cache miss", func(t *testing.T) { 615 queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { return testQuery }, 616 time.Now, promclient.QueryOptions{ 617 Deduplicate: false, 618 }, 619 []model.Metric{ 620 { 621 "a": "1", 622 "b": "2", 623 "ext1": "value1", 624 "replica": "1", 625 }, 626 }, 627 ) 628 629 testutil.Ok(t, s1.WaitSumMetricsWithOptions(e2emon.Equals(0), []string{`thanos_store_bucket_cache_operation_hits_total`}, e2emon.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "config", "chunks")))) 630 }) 631 632 t.Run("query with cache hit", func(t *testing.T) { 633 queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { return testQuery }, 634 time.Now, promclient.QueryOptions{ 635 Deduplicate: false, 636 }, 637 []model.Metric{ 638 { 639 "a": "1", 640 "b": "2", 641 "ext1": "value1", 642 "replica": "1", 643 }, 644 }, 645 ) 646 647 testutil.Ok(t, s1.WaitSumMetricsWithOptions(e2emon.Greater(0), []string{`thanos_store_bucket_cache_operation_hits_total`}, e2emon.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "config", "chunks")))) 648 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Greater(0), "thanos_cache_memcached_hits_total")) 649 }) 650 651 } 652 653 func TestStoreGatewayGroupCache(t *testing.T) { 654 t.Parallel() 655 656 e, err := e2e.NewDockerEnvironment("store-groupcache") 657 testutil.Ok(t, err) 658 t.Cleanup(e2ethanos.CleanScenario(t, e)) 659 660 const bucket = "store-gateway-groupcache-test" 661 m := e2edb.NewMinio(e, "thanos-minio", bucket, e2edb.WithMinioTLS()) 662 testutil.Ok(t, e2e.StartAndWaitReady(m)) 663 664 groupcacheConfig := `type: GROUPCACHE 665 config: 666 self_url: http://e2e-test-store-gateway-groupcache-store-gw-%d:8080 667 peers: 668 - http://e2e-test-store-gateway-groupcache-store-gw-1:8080 669 - http://e2e-test-store-gateway-groupcache-store-gw-2:8080 670 - http://e2e-test-store-gateway-groupcache-store-gw-3:8080 671 groupcache_group: groupcache_test_group 672 dns_interval: 1s 673 blocks_iter_ttl: 0s 674 metafile_exists_ttl: 0s 675 metafile_doesnt_exist_ttl: 0s 676 metafile_content_ttl: 0s` 677 678 store1 := e2ethanos.NewStoreGW( 679 e, 680 "1", 681 client.BucketConfig{ 682 Type: client.S3, 683 Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()), 684 }, 685 fmt.Sprintf(groupcacheConfig, 1), 686 "", 687 nil, 688 ) 689 store2 := e2ethanos.NewStoreGW( 690 e, 691 "2", 692 client.BucketConfig{ 693 Type: client.S3, 694 Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()), 695 }, 696 fmt.Sprintf(groupcacheConfig, 2), 697 "", 698 nil, 699 ) 700 store3 := e2ethanos.NewStoreGW( 701 e, 702 "3", 703 client.BucketConfig{ 704 Type: client.S3, 705 Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()), 706 }, 707 fmt.Sprintf(groupcacheConfig, 3), 708 "", 709 nil, 710 ) 711 testutil.Ok(t, e2e.StartAndWaitReady(store1, store2, store3)) 712 713 q := e2ethanos.NewQuerierBuilder(e, "1", 714 store1.InternalEndpoint("grpc"), 715 store2.InternalEndpoint("grpc"), 716 store3.InternalEndpoint("grpc"), 717 ).Init() 718 testutil.Ok(t, err) 719 testutil.Ok(t, e2e.StartAndWaitReady(q)) 720 721 dir := filepath.Join(e.SharedDir(), "tmp") 722 testutil.Ok(t, os.MkdirAll(dir, os.ModePerm)) 723 724 series := []labels.Labels{labels.FromStrings("a", "1", "b", "2")} 725 extLset := labels.FromStrings("ext1", "value1", "replica", "1") 726 727 ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) 728 t.Cleanup(cancel) 729 730 now := time.Now() 731 id, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, series, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset, 0, metadata.NoneFunc) 732 testutil.Ok(t, err) 733 734 l := log.NewLogfmtLogger(os.Stdout) 735 bkt, err := s3.NewBucketWithConfig(l, e2ethanos.NewS3Config(bucket, m.Endpoint("http"), m.Dir()), "test-feed") 736 testutil.Ok(t, err) 737 738 testutil.Ok(t, objstore.UploadDir(ctx, l, bkt, path.Join(dir, id.String()), id.String())) 739 740 // Wait for store to sync blocks. 741 // thanos_blocks_meta_synced: 1x loadedMeta 0x labelExcludedMeta 0x TooFreshMeta. 742 for _, st := range []*e2emon.InstrumentedRunnable{store1, store2, store3} { 743 t.Run(st.Name(), func(t *testing.T) { 744 testutil.Ok(t, st.WaitSumMetrics(e2emon.Equals(1), "thanos_blocks_meta_synced")) 745 testutil.Ok(t, st.WaitSumMetrics(e2emon.Equals(0), "thanos_blocks_meta_sync_failures_total")) 746 747 testutil.Ok(t, st.WaitSumMetrics(e2emon.Equals(1), "thanos_bucket_store_blocks_loaded")) 748 testutil.Ok(t, st.WaitSumMetrics(e2emon.Equals(0), "thanos_bucket_store_block_drops_total")) 749 testutil.Ok(t, st.WaitSumMetrics(e2emon.Equals(0), "thanos_bucket_store_block_load_failures_total")) 750 }) 751 } 752 753 t.Run("query with groupcache loading from object storage", func(t *testing.T) { 754 queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { return testQuery }, 755 time.Now, promclient.QueryOptions{ 756 Deduplicate: false, 757 }, 758 []model.Metric{ 759 { 760 "a": "1", 761 "b": "2", 762 "ext1": "value1", 763 "replica": "1", 764 }, 765 }, 766 ) 767 768 for _, st := range []*e2emon.InstrumentedRunnable{store1, store2, store3} { 769 testutil.Ok(t, st.WaitSumMetricsWithOptions(e2emon.Greater(0), []string{`thanos_cache_groupcache_loads_total`})) 770 testutil.Ok(t, st.WaitSumMetricsWithOptions(e2emon.Greater(0), []string{`thanos_store_bucket_cache_operation_hits_total`}, e2emon.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "config", "chunks")))) 771 } 772 }) 773 774 t.Run("try to load file with slashes", func(t *testing.T) { 775 resp, err := http.Get(fmt.Sprintf("http://%s/_galaxycache/groupcache_test_group/content:%s/meta.json", store1.Endpoint("http"), id.String())) 776 testutil.Ok(t, err) 777 testutil.Equals(t, 200, resp.StatusCode) 778 }) 779 } 780 781 func TestStoreGatewayBytesLimit(t *testing.T) { 782 t.Parallel() 783 784 const cacheCfg = `type: IN-MEMORY 785 config: 786 max_size: 2B 787 max_item_size: 1B` 788 789 e, err := e2e.NewDockerEnvironment("store-limit") 790 testutil.Ok(t, err) 791 t.Cleanup(e2ethanos.CleanScenario(t, e)) 792 793 const bucket = "store-gateway-test" 794 m := e2edb.NewMinio(e, "thanos-minio", bucket, e2edb.WithMinioTLS()) 795 testutil.Ok(t, e2e.StartAndWaitReady(m)) 796 797 store1 := e2ethanos.NewStoreGW( 798 e, 799 "1", 800 client.BucketConfig{ 801 Type: client.S3, 802 Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()), 803 }, 804 string(cacheCfg), 805 "", 806 []string{"--store.grpc.downloaded-bytes-limit=1B"}, 807 ) 808 809 store2 := e2ethanos.NewStoreGW( 810 e, 811 "2", 812 client.BucketConfig{ 813 Type: client.S3, 814 Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()), 815 }, 816 string(cacheCfg), 817 "", 818 []string{"--store.grpc.downloaded-bytes-limit=100B"}, 819 ) 820 store3 := e2ethanos.NewStoreGW( 821 e, 822 "3", 823 client.BucketConfig{ 824 Type: client.S3, 825 Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()), 826 }, 827 string(cacheCfg), 828 "", 829 []string{"--store.grpc.downloaded-bytes-limit=310176B"}, 830 ) 831 832 testutil.Ok(t, e2e.StartAndWaitReady(store1, store2, store3)) 833 834 q1 := e2ethanos.NewQuerierBuilder(e, "1", store1.InternalEndpoint("grpc")).Init() 835 q2 := e2ethanos.NewQuerierBuilder(e, "2", store2.InternalEndpoint("grpc")).Init() 836 q3 := e2ethanos.NewQuerierBuilder(e, "3", store3.InternalEndpoint("grpc")).Init() 837 testutil.Ok(t, e2e.StartAndWaitReady(q1, q2, q3)) 838 839 dir := filepath.Join(e.SharedDir(), "tmp") 840 testutil.Ok(t, os.MkdirAll(filepath.Join(e.SharedDir(), dir), os.ModePerm)) 841 842 series := []labels.Labels{labels.FromStrings("a", "1", "b", "2")} 843 extLset := labels.FromStrings("ext1", "value1", "replica", "1") 844 extLset2 := labels.FromStrings("ext1", "value1", "replica", "2") 845 extLset3 := labels.FromStrings("ext1", "value2", "replica", "3") 846 extLset4 := labels.FromStrings("ext1", "value2", "replica", "4") 847 848 ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) 849 t.Cleanup(cancel) 850 851 now := time.Now() 852 id1, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, series, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset, 0, metadata.NoneFunc) 853 testutil.Ok(t, err) 854 id2, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, series, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset2, 0, metadata.NoneFunc) 855 testutil.Ok(t, err) 856 id3, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, series, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset3, 0, metadata.NoneFunc) 857 testutil.Ok(t, err) 858 id4, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, series, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset4, 0, metadata.NoneFunc) 859 testutil.Ok(t, err) 860 l := log.NewLogfmtLogger(os.Stdout) 861 bkt, err := s3.NewBucketWithConfig(l, 862 e2ethanos.NewS3Config(bucket, m.Endpoint("http"), m.Dir()), "test-feed") 863 testutil.Ok(t, err) 864 865 testutil.Ok(t, objstore.UploadDir(ctx, l, bkt, path.Join(dir, id1.String()), id1.String())) 866 testutil.Ok(t, objstore.UploadDir(ctx, l, bkt, path.Join(dir, id2.String()), id2.String())) 867 testutil.Ok(t, objstore.UploadDir(ctx, l, bkt, path.Join(dir, id3.String()), id3.String())) 868 testutil.Ok(t, objstore.UploadDir(ctx, l, bkt, path.Join(dir, id4.String()), id4.String())) 869 870 // Wait for store to sync blocks. 871 testutil.Ok(t, store1.WaitSumMetrics(e2emon.Equals(4), "thanos_blocks_meta_synced")) 872 testutil.Ok(t, store2.WaitSumMetrics(e2emon.Equals(4), "thanos_blocks_meta_synced")) 873 testutil.Ok(t, store3.WaitSumMetrics(e2emon.Equals(4), "thanos_blocks_meta_synced")) 874 opts := promclient.QueryOptions{Deduplicate: true, PartialResponseStrategy: storepb.PartialResponseStrategy_ABORT} 875 876 t.Run("Series() limits", func(t *testing.T) { 877 878 testutil.Ok(t, runutil.RetryWithLog(log.NewLogfmtLogger(os.Stdout), 5*time.Second, ctx.Done(), func() error { 879 if _, _, _, err := promclient.NewDefaultClient().QueryInstant(ctx, urlParse(t, "http://"+q1.Endpoint("http")), testQuery, now, opts); err != nil { 880 e := err.Error() 881 if strings.Contains(e, "expanded matching posting: get postings") && strings.Contains(e, "exceeded bytes limit while fetching postings: limit 1 violated") { 882 return nil 883 } 884 return err 885 } 886 return fmt.Errorf("expected an error") 887 })) 888 889 testutil.Ok(t, runutil.RetryWithLog(log.NewLogfmtLogger(os.Stdout), 5*time.Second, ctx.Done(), func() error { 890 if _, _, _, err := promclient.NewDefaultClient().QueryInstant(ctx, urlParse(t, "http://"+q2.Endpoint("http")), testQuery, now, opts); err != nil { 891 e := err.Error() 892 if strings.Contains(e, "preload series") && strings.Contains(e, "exceeded bytes limit while fetching series: limit 100 violated") { 893 return nil 894 } 895 return err 896 } 897 return fmt.Errorf("expected an error") 898 })) 899 900 testutil.Ok(t, runutil.RetryWithLog(log.NewLogfmtLogger(os.Stdout), 5*time.Second, ctx.Done(), func() error { 901 if _, _, _, err := promclient.NewDefaultClient().QueryInstant(ctx, urlParse(t, "http://"+q3.Endpoint("http")), testQuery, now, opts); err != nil { 902 if err != nil { 903 t.Logf("got error: %s", err) 904 } 905 e := err.Error() 906 if strings.Contains(e, "load chunks") && strings.Contains(e, "exceeded bytes limit while fetching chunks: limit 310176 violated") { 907 return nil 908 } 909 return err 910 } 911 return fmt.Errorf("expected an error") 912 })) 913 }) 914 } 915 916 func TestRedisClient_Rueidis(t *testing.T) { 917 t.Parallel() 918 919 e, err := e2e.NewDockerEnvironment("redis-client") 920 testutil.Ok(t, err) 921 t.Cleanup(e2ethanos.CleanScenario(t, e)) 922 923 r := e2ethanos.NewRedis(e, "redis") 924 testutil.Ok(t, r.Start()) 925 926 redisClient, err := cacheutil.NewRedisClientWithConfig(log.NewLogfmtLogger(os.Stderr), "redis", cacheutil.RedisClientConfig{ 927 Addr: r.Endpoint("redis"), 928 MaxAsyncBufferSize: 10, 929 MaxAsyncConcurrency: 1, 930 }, nil) 931 testutil.Ok(t, err) 932 933 testutil.Ok(t, redisClient.SetAsync("foo", []byte(`bar`), 1*time.Minute)) 934 testutil.Ok(t, runutil.Retry(1*time.Second, make(<-chan struct{}), func() error { 935 returnedVals := redisClient.GetMulti(context.TODO(), []string{"foo"}) 936 if len(returnedVals) != 1 { 937 return fmt.Errorf("got zero responses") 938 } 939 if !bytes.Equal(returnedVals["foo"], []byte("bar")) { 940 return fmt.Errorf("got wrong response, expected bar: %v", returnedVals["foo"]) 941 } 942 return nil 943 })) 944 } 945 946 func TestStoreGatewayMemcachedIndexCacheExpandedPostings(t *testing.T) { 947 t.Parallel() 948 949 e, err := e2e.NewDockerEnvironment("memcached-exp") 950 testutil.Ok(t, err) 951 t.Cleanup(e2ethanos.CleanScenario(t, e)) 952 953 const bucket = "store-gateway-memcached-index-cache-expanded-postings-test" 954 m := e2edb.NewMinio(e, "thanos-minio", bucket, e2edb.WithMinioTLS()) 955 testutil.Ok(t, e2e.StartAndWaitReady(m)) 956 957 memcached := e2ethanos.NewMemcached(e, "1") 958 testutil.Ok(t, e2e.StartAndWaitReady(memcached)) 959 960 indexCacheConfig := fmt.Sprintf(`type: MEMCACHED 961 config: 962 addresses: [%s] 963 max_async_concurrency: 10 964 dns_provider_update_interval: 1s 965 auto_discovery: false`, memcached.InternalEndpoint("memcached")) 966 967 s1 := e2ethanos.NewStoreGW( 968 e, 969 "1", 970 client.BucketConfig{ 971 Type: client.S3, 972 Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()), 973 }, 974 "", 975 indexCacheConfig, 976 nil, 977 ) 978 testutil.Ok(t, e2e.StartAndWaitReady(s1)) 979 980 q := e2ethanos.NewQuerierBuilder(e, "1", s1.InternalEndpoint("grpc")).Init() 981 testutil.Ok(t, e2e.StartAndWaitReady(q)) 982 983 dir := filepath.Join(e.SharedDir(), "tmp") 984 testutil.Ok(t, os.MkdirAll(dir, os.ModePerm)) 985 986 series := []labels.Labels{labels.FromStrings("a", "1", "b", "2")} 987 extLset := labels.FromStrings("ext1", "value1", "replica", "1") 988 989 ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) 990 t.Cleanup(cancel) 991 992 now := time.Now() 993 id, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, series, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset, 0, metadata.NoneFunc) 994 testutil.Ok(t, err) 995 996 l := log.NewLogfmtLogger(os.Stdout) 997 bkt, err := s3.NewBucketWithConfig(l, 998 e2ethanos.NewS3Config(bucket, m.Endpoint("http"), m.Dir()), "test-feed") 999 testutil.Ok(t, err) 1000 1001 testutil.Ok(t, objstore.UploadDir(ctx, l, bkt, path.Join(dir, id.String()), id.String())) 1002 1003 // Wait for store to sync blocks. 1004 // thanos_blocks_meta_synced: 1x loadedMeta 0x labelExcludedMeta 0x TooFreshMeta. 1005 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(1), "thanos_blocks_meta_synced")) 1006 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_blocks_meta_sync_failures_total")) 1007 1008 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(1), "thanos_bucket_store_blocks_loaded")) 1009 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_bucket_store_block_drops_total")) 1010 testutil.Ok(t, s1.WaitSumMetrics(e2emon.Equals(0), "thanos_bucket_store_block_load_failures_total")) 1011 1012 t.Run("query with cache miss", func(t *testing.T) { 1013 queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { return testQuery }, 1014 time.Now, promclient.QueryOptions{ 1015 Deduplicate: false, 1016 }, 1017 []model.Metric{ 1018 { 1019 "a": "1", 1020 "b": "2", 1021 "ext1": "value1", 1022 "replica": "1", 1023 }, 1024 }, 1025 ) 1026 1027 testutil.Ok(t, s1.WaitSumMetricsWithOptions(e2emon.Equals(1), []string{`thanos_store_index_cache_requests_total`}, e2emon.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "item_type", "ExpandedPostings")))) 1028 testutil.Ok(t, s1.WaitSumMetricsWithOptions(e2emon.Equals(0), []string{`thanos_store_index_cache_hits_total`}, e2emon.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "item_type", "ExpandedPostings")))) 1029 }) 1030 1031 t.Run("query with cache hit", func(t *testing.T) { 1032 queryAndAssertSeries(t, ctx, q.Endpoint("http"), func() string { return testQuery }, 1033 time.Now, promclient.QueryOptions{ 1034 Deduplicate: false, 1035 }, 1036 []model.Metric{ 1037 { 1038 "a": "1", 1039 "b": "2", 1040 "ext1": "value1", 1041 "replica": "1", 1042 }, 1043 }, 1044 ) 1045 1046 testutil.Ok(t, s1.WaitSumMetricsWithOptions(e2emon.Equals(2), []string{`thanos_store_index_cache_requests_total`}, e2emon.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "item_type", "ExpandedPostings")))) 1047 testutil.Ok(t, s1.WaitSumMetricsWithOptions(e2emon.Equals(1), []string{`thanos_store_index_cache_hits_total`}, e2emon.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "item_type", "ExpandedPostings")))) 1048 }) 1049 }