github.com/yankunsam/loki/v2@v2.6.3-0.20220817130409-389df5235c27/pkg/storage/store_test.go (about)

     1  package storage
     2  
     3  import (
     4  	"context"
     5  	"log"
     6  	"math"
     7  	"net/http"
     8  	_ "net/http/pprof"
     9  	"path"
    10  	"runtime"
    11  	"testing"
    12  	"time"
    13  
    14  	"github.com/cespare/xxhash/v2"
    15  	"github.com/prometheus/common/model"
    16  	"github.com/prometheus/prometheus/model/labels"
    17  	"github.com/stretchr/testify/require"
    18  	"github.com/weaveworks/common/user"
    19  
    20  	"github.com/grafana/dskit/flagext"
    21  
    22  	"github.com/grafana/loki/pkg/iter"
    23  	"github.com/grafana/loki/pkg/logproto"
    24  	"github.com/grafana/loki/pkg/logql"
    25  	"github.com/grafana/loki/pkg/querier/astmapper"
    26  	"github.com/grafana/loki/pkg/storage/chunk"
    27  	"github.com/grafana/loki/pkg/storage/chunk/client/local"
    28  	"github.com/grafana/loki/pkg/storage/config"
    29  	"github.com/grafana/loki/pkg/storage/stores/indexshipper"
    30  	"github.com/grafana/loki/pkg/storage/stores/shipper"
    31  	util_log "github.com/grafana/loki/pkg/util/log"
    32  	"github.com/grafana/loki/pkg/util/marshal"
    33  	"github.com/grafana/loki/pkg/validation"
    34  )
    35  
    36  var (
    37  	start      = model.Time(1523750400000)
    38  	m          runtime.MemStats
    39  	ctx        = user.InjectOrgID(context.Background(), "fake")
    40  	cm         = NewClientMetrics()
    41  	chunkStore = getLocalStore(cm)
    42  )
    43  
    44  // go test -bench=. -benchmem -memprofile memprofile.out -cpuprofile profile.out
    45  func Benchmark_store_SelectLogsRegexBackward(b *testing.B) {
    46  	benchmarkStoreQuery(b, &logproto.QueryRequest{
    47  		Selector:  `{foo="bar"} |~ "fuzz"`,
    48  		Limit:     1000,
    49  		Start:     time.Unix(0, start.UnixNano()),
    50  		End:       time.Unix(0, (24*time.Hour.Nanoseconds())+start.UnixNano()),
    51  		Direction: logproto.BACKWARD,
    52  	})
    53  }
    54  
    55  func Benchmark_store_SelectLogsLogQLBackward(b *testing.B) {
    56  	benchmarkStoreQuery(b, &logproto.QueryRequest{
    57  		Selector:  `{foo="bar"} |= "test" != "toto" |= "fuzz"`,
    58  		Limit:     1000,
    59  		Start:     time.Unix(0, start.UnixNano()),
    60  		End:       time.Unix(0, (24*time.Hour.Nanoseconds())+start.UnixNano()),
    61  		Direction: logproto.BACKWARD,
    62  	})
    63  }
    64  
    65  func Benchmark_store_SelectLogsRegexForward(b *testing.B) {
    66  	benchmarkStoreQuery(b, &logproto.QueryRequest{
    67  		Selector:  `{foo="bar"} |~ "fuzz"`,
    68  		Limit:     1000,
    69  		Start:     time.Unix(0, start.UnixNano()),
    70  		End:       time.Unix(0, (24*time.Hour.Nanoseconds())+start.UnixNano()),
    71  		Direction: logproto.FORWARD,
    72  	})
    73  }
    74  
    75  func Benchmark_store_SelectLogsForward(b *testing.B) {
    76  	benchmarkStoreQuery(b, &logproto.QueryRequest{
    77  		Selector:  `{foo="bar"}`,
    78  		Limit:     1000,
    79  		Start:     time.Unix(0, start.UnixNano()),
    80  		End:       time.Unix(0, (24*time.Hour.Nanoseconds())+start.UnixNano()),
    81  		Direction: logproto.FORWARD,
    82  	})
    83  }
    84  
    85  func Benchmark_store_SelectLogsBackward(b *testing.B) {
    86  	benchmarkStoreQuery(b, &logproto.QueryRequest{
    87  		Selector:  `{foo="bar"}`,
    88  		Limit:     1000,
    89  		Start:     time.Unix(0, start.UnixNano()),
    90  		End:       time.Unix(0, (24*time.Hour.Nanoseconds())+start.UnixNano()),
    91  		Direction: logproto.BACKWARD,
    92  	})
    93  }
    94  
    95  // rm -Rf /tmp/benchmark/chunks/ /tmp/benchmark/index
    96  // go run  -mod=vendor ./pkg/storage/hack/main.go
    97  // go test -benchmem -run=^$ -mod=vendor  ./pkg/storage -bench=Benchmark_store_SelectSample   -memprofile memprofile.out -cpuprofile cpuprofile.out
    98  func Benchmark_store_SelectSample(b *testing.B) {
    99  	var sampleRes []logproto.Sample
   100  	for _, test := range []string{
   101  		`count_over_time({foo="bar"}[5m])`,
   102  		`rate({foo="bar"}[5m])`,
   103  		`bytes_rate({foo="bar"}[5m])`,
   104  		`bytes_over_time({foo="bar"}[5m])`,
   105  	} {
   106  		b.Run(test, func(b *testing.B) {
   107  			for i := 0; i < b.N; i++ {
   108  				iter, err := chunkStore.SelectSamples(ctx, logql.SelectSampleParams{
   109  					SampleQueryRequest: newSampleQuery(test, time.Unix(0, start.UnixNano()), time.Unix(0, (24*time.Hour.Nanoseconds())+start.UnixNano()), nil),
   110  				})
   111  				if err != nil {
   112  					b.Fatal(err)
   113  				}
   114  
   115  				for iter.Next() {
   116  					sampleRes = append(sampleRes, iter.Sample())
   117  				}
   118  				iter.Close()
   119  			}
   120  		})
   121  	}
   122  	log.Print("sample processed ", len(sampleRes))
   123  }
   124  
   125  func benchmarkStoreQuery(b *testing.B, query *logproto.QueryRequest) {
   126  	b.ReportAllocs()
   127  	// force to run gc 10x more often this can be useful to detect fast allocation vs leak.
   128  	// debug.SetGCPercent(10)
   129  	stop := make(chan struct{})
   130  	go func() {
   131  		_ = http.ListenAndServe(":6060", http.DefaultServeMux)
   132  	}()
   133  	go func() {
   134  		ticker := time.NewTicker(time.Millisecond)
   135  		for {
   136  			select {
   137  			case <-ticker.C:
   138  				// print and capture the max in use heap size
   139  				printHeap(b, false)
   140  			case <-stop:
   141  				ticker.Stop()
   142  				return
   143  			}
   144  		}
   145  	}()
   146  	for i := 0; i < b.N; i++ {
   147  		iter, err := chunkStore.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: query})
   148  		if err != nil {
   149  			b.Fatal(err)
   150  		}
   151  		res := []logproto.Entry{}
   152  		printHeap(b, true)
   153  		j := uint32(0)
   154  		for iter.Next() {
   155  			j++
   156  			printHeap(b, false)
   157  			res = append(res, iter.Entry())
   158  			// limit result like the querier would do.
   159  			if j == query.Limit {
   160  				break
   161  			}
   162  		}
   163  		iter.Close()
   164  		printHeap(b, true)
   165  		log.Println("line fetched", len(res))
   166  	}
   167  	close(stop)
   168  }
   169  
   170  var maxHeapInuse uint64
   171  
   172  func printHeap(b *testing.B, show bool) {
   173  	runtime.ReadMemStats(&m)
   174  	if m.HeapInuse > maxHeapInuse {
   175  		maxHeapInuse = m.HeapInuse
   176  	}
   177  	if show {
   178  		log.Printf("Benchmark %d maxHeapInuse: %d Mbytes\n", b.N, maxHeapInuse/1024/1024)
   179  		log.Printf("Benchmark %d currentHeapInuse: %d Mbytes\n", b.N, m.HeapInuse/1024/1024)
   180  	}
   181  }
   182  
   183  func getLocalStore(cm ClientMetrics) Store {
   184  	limits, err := validation.NewOverrides(validation.Limits{
   185  		MaxQueryLength: model.Duration(6000 * time.Hour),
   186  	}, nil)
   187  	if err != nil {
   188  		panic(err)
   189  	}
   190  
   191  	storeConfig := Config{
   192  		BoltDBConfig:      local.BoltDBConfig{Directory: "/tmp/benchmark/index"},
   193  		FSConfig:          local.FSConfig{Directory: "/tmp/benchmark/chunks"},
   194  		MaxChunkBatchSize: 10,
   195  	}
   196  
   197  	schemaConfig := config.SchemaConfig{
   198  		Configs: []config.PeriodConfig{
   199  			{
   200  				From:       config.DayTime{Time: start},
   201  				IndexType:  "boltdb",
   202  				ObjectType: "filesystem",
   203  				Schema:     "v9",
   204  				IndexTables: config.PeriodicTableConfig{
   205  					Prefix: "index_",
   206  					Period: time.Hour * 168,
   207  				},
   208  			},
   209  		},
   210  	}
   211  
   212  	store, err := NewStore(storeConfig, config.ChunkStoreConfig{}, schemaConfig, limits, cm, nil, util_log.Logger)
   213  	if err != nil {
   214  		panic(err)
   215  	}
   216  	return store
   217  }
   218  
   219  func Test_store_SelectLogs(t *testing.T) {
   220  	tests := []struct {
   221  		name     string
   222  		req      *logproto.QueryRequest
   223  		expected []logproto.Stream
   224  	}{
   225  		{
   226  			"all",
   227  			newQuery("{foo=~\"ba.*\"}", from, from.Add(6*time.Millisecond), nil, nil),
   228  			[]logproto.Stream{
   229  				{
   230  					Labels: "{foo=\"bar\"}",
   231  					Entries: []logproto.Entry{
   232  						{
   233  							Timestamp: from,
   234  							Line:      "1",
   235  						},
   236  						{
   237  							Timestamp: from.Add(time.Millisecond),
   238  							Line:      "2",
   239  						},
   240  						{
   241  							Timestamp: from.Add(2 * time.Millisecond),
   242  							Line:      "3",
   243  						},
   244  						{
   245  							Timestamp: from.Add(3 * time.Millisecond),
   246  							Line:      "4",
   247  						},
   248  						{
   249  							Timestamp: from.Add(4 * time.Millisecond),
   250  							Line:      "5",
   251  						},
   252  						{
   253  							Timestamp: from.Add(5 * time.Millisecond),
   254  							Line:      "6",
   255  						},
   256  					},
   257  				},
   258  				{
   259  					Labels: "{foo=\"bazz\"}",
   260  					Entries: []logproto.Entry{
   261  						{
   262  							Timestamp: from,
   263  							Line:      "1",
   264  						},
   265  						{
   266  							Timestamp: from.Add(time.Millisecond),
   267  							Line:      "2",
   268  						},
   269  						{
   270  							Timestamp: from.Add(2 * time.Millisecond),
   271  							Line:      "3",
   272  						},
   273  						{
   274  							Timestamp: from.Add(3 * time.Millisecond),
   275  							Line:      "4",
   276  						},
   277  						{
   278  							Timestamp: from.Add(4 * time.Millisecond),
   279  							Line:      "5",
   280  						},
   281  						{
   282  							Timestamp: from.Add(5 * time.Millisecond),
   283  							Line:      "6",
   284  						},
   285  					},
   286  				},
   287  			},
   288  		},
   289  		{
   290  			"filter regex",
   291  			newQuery("{foo=~\"ba.*\"} |~ \"1|2|3\" !~ \"2|3\"", from, from.Add(6*time.Millisecond), nil, nil),
   292  			[]logproto.Stream{
   293  				{
   294  					Labels: "{foo=\"bar\"}",
   295  					Entries: []logproto.Entry{
   296  						{
   297  							Timestamp: from,
   298  							Line:      "1",
   299  						},
   300  					},
   301  				},
   302  				{
   303  					Labels: "{foo=\"bazz\"}",
   304  					Entries: []logproto.Entry{
   305  						{
   306  							Timestamp: from,
   307  							Line:      "1",
   308  						},
   309  					},
   310  				},
   311  			},
   312  		},
   313  		{
   314  			"filter matcher",
   315  			newQuery("{foo=\"bar\"}", from, from.Add(6*time.Millisecond), nil, nil),
   316  			[]logproto.Stream{
   317  				{
   318  					Labels: "{foo=\"bar\"}",
   319  					Entries: []logproto.Entry{
   320  						{
   321  							Timestamp: from,
   322  							Line:      "1",
   323  						},
   324  						{
   325  							Timestamp: from.Add(time.Millisecond),
   326  							Line:      "2",
   327  						},
   328  						{
   329  							Timestamp: from.Add(2 * time.Millisecond),
   330  							Line:      "3",
   331  						},
   332  						{
   333  							Timestamp: from.Add(3 * time.Millisecond),
   334  							Line:      "4",
   335  						},
   336  						{
   337  							Timestamp: from.Add(4 * time.Millisecond),
   338  							Line:      "5",
   339  						},
   340  						{
   341  							Timestamp: from.Add(5 * time.Millisecond),
   342  							Line:      "6",
   343  						},
   344  					},
   345  				},
   346  			},
   347  		},
   348  		{
   349  			"filter time",
   350  			newQuery("{foo=~\"ba.*\"}", from, from.Add(time.Millisecond), nil, nil),
   351  			[]logproto.Stream{
   352  				{
   353  					Labels: "{foo=\"bar\"}",
   354  					Entries: []logproto.Entry{
   355  						{
   356  							Timestamp: from,
   357  							Line:      "1",
   358  						},
   359  					},
   360  				},
   361  				{
   362  					Labels: "{foo=\"bazz\"}",
   363  					Entries: []logproto.Entry{
   364  						{
   365  							Timestamp: from,
   366  							Line:      "1",
   367  						},
   368  					},
   369  				},
   370  			},
   371  		},
   372  		{
   373  			"delete covers whole time range",
   374  			newQuery(
   375  				"{foo=~\"ba.*\"}",
   376  				from,
   377  				from.Add(6*time.Millisecond),
   378  				nil,
   379  				[]*logproto.Delete{
   380  					{
   381  						Selector: `{foo="bar"}`,
   382  						Start:    from.Add(-1 * time.Millisecond).UnixNano(),
   383  						End:      from.Add(7 * time.Millisecond).UnixNano(),
   384  					},
   385  					{
   386  						Selector: `{foo="bazz"} |= "6"`,
   387  						Start:    from.Add(-1 * time.Millisecond).UnixNano(),
   388  						End:      from.Add(7 * time.Millisecond).UnixNano(),
   389  					},
   390  				}),
   391  			[]logproto.Stream{
   392  				{
   393  					Labels: "{foo=\"bazz\"}",
   394  					Entries: []logproto.Entry{
   395  						{
   396  							Timestamp: from,
   397  							Line:      "1",
   398  						},
   399  						{
   400  							Timestamp: from.Add(time.Millisecond),
   401  							Line:      "2",
   402  						},
   403  						{
   404  							Timestamp: from.Add(2 * time.Millisecond),
   405  							Line:      "3",
   406  						},
   407  						{
   408  							Timestamp: from.Add(3 * time.Millisecond),
   409  							Line:      "4",
   410  						},
   411  						{
   412  							Timestamp: from.Add(4 * time.Millisecond),
   413  							Line:      "5",
   414  						},
   415  					},
   416  				},
   417  			},
   418  		},
   419  		{
   420  			"delete covers partial time range",
   421  			newQuery(
   422  				"{foo=~\"ba.*\"}",
   423  				from,
   424  				from.Add(6*time.Millisecond),
   425  				nil,
   426  				[]*logproto.Delete{
   427  					{
   428  						Selector: `{foo="bar"}`,
   429  						Start:    from.Add(-1 * time.Millisecond).UnixNano(),
   430  						End:      from.Add(3 * time.Millisecond).UnixNano(),
   431  					},
   432  					{
   433  						Selector: `{foo="bazz"} |= "2"`,
   434  						Start:    from.Add(-1 * time.Millisecond).UnixNano(),
   435  						End:      from.Add(3 * time.Millisecond).UnixNano(),
   436  					},
   437  				}),
   438  			[]logproto.Stream{
   439  				{
   440  					Labels: "{foo=\"bar\"}",
   441  					Entries: []logproto.Entry{
   442  						{
   443  							Timestamp: from.Add(4 * time.Millisecond),
   444  							Line:      "5",
   445  						},
   446  						{
   447  							Timestamp: from.Add(5 * time.Millisecond),
   448  							Line:      "6",
   449  						},
   450  					},
   451  				},
   452  				{
   453  					Labels: "{foo=\"bazz\"}",
   454  					Entries: []logproto.Entry{
   455  						{
   456  							Timestamp: from,
   457  							Line:      "1",
   458  						},
   459  						{
   460  							Timestamp: from.Add(2 * time.Millisecond),
   461  							Line:      "3",
   462  						},
   463  						{
   464  							Timestamp: from.Add(3 * time.Millisecond),
   465  							Line:      "4",
   466  						},
   467  						{
   468  							Timestamp: from.Add(4 * time.Millisecond),
   469  							Line:      "5",
   470  						},
   471  						{
   472  							Timestamp: from.Add(5 * time.Millisecond),
   473  							Line:      "6",
   474  						},
   475  					},
   476  				},
   477  			},
   478  		},
   479  	}
   480  
   481  	for _, tt := range tests {
   482  		t.Run(tt.name, func(t *testing.T) {
   483  			s := &store{
   484  				Store: storeFixture,
   485  				cfg: Config{
   486  					MaxChunkBatchSize: 10,
   487  				},
   488  				chunkMetrics: NilMetrics,
   489  			}
   490  
   491  			ctx = user.InjectOrgID(context.Background(), "test-user")
   492  			it, err := s.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: tt.req})
   493  			if err != nil {
   494  				t.Errorf("store.LazyQuery() error = %v", err)
   495  				return
   496  			}
   497  
   498  			streams, _, err := iter.ReadBatch(it, tt.req.Limit)
   499  			_ = it.Close()
   500  			if err != nil {
   501  				t.Fatalf("error reading batch %s", err)
   502  			}
   503  			assertStream(t, tt.expected, streams.Streams)
   504  		})
   505  	}
   506  }
   507  
   508  func Test_store_SelectSample(t *testing.T) {
   509  	tests := []struct {
   510  		name     string
   511  		req      *logproto.SampleQueryRequest
   512  		expected []logproto.Series
   513  	}{
   514  		{
   515  			"all",
   516  			newSampleQuery("count_over_time({foo=~\"ba.*\"}[5m])", from, from.Add(6*time.Millisecond), nil),
   517  			[]logproto.Series{
   518  				{
   519  					Labels: "{foo=\"bar\"}",
   520  					Samples: []logproto.Sample{
   521  						{
   522  							Timestamp: from.UnixNano(),
   523  							Hash:      xxhash.Sum64String("1"),
   524  							Value:     1.,
   525  						},
   526  						{
   527  							Timestamp: from.Add(time.Millisecond).UnixNano(),
   528  							Hash:      xxhash.Sum64String("2"),
   529  							Value:     1.,
   530  						},
   531  						{
   532  							Timestamp: from.Add(2 * time.Millisecond).UnixNano(),
   533  							Hash:      xxhash.Sum64String("3"),
   534  							Value:     1.,
   535  						},
   536  						{
   537  							Timestamp: from.Add(3 * time.Millisecond).UnixNano(),
   538  							Hash:      xxhash.Sum64String("4"),
   539  							Value:     1.,
   540  						},
   541  						{
   542  							Timestamp: from.Add(4 * time.Millisecond).UnixNano(),
   543  							Hash:      xxhash.Sum64String("5"),
   544  							Value:     1.,
   545  						},
   546  						{
   547  							Timestamp: from.Add(5 * time.Millisecond).UnixNano(),
   548  							Hash:      xxhash.Sum64String("6"),
   549  							Value:     1.,
   550  						},
   551  					},
   552  				},
   553  				{
   554  					Labels: "{foo=\"bazz\"}",
   555  					Samples: []logproto.Sample{
   556  						{
   557  							Timestamp: from.UnixNano(),
   558  							Hash:      xxhash.Sum64String("1"),
   559  							Value:     1.,
   560  						},
   561  						{
   562  							Timestamp: from.Add(time.Millisecond).UnixNano(),
   563  							Hash:      xxhash.Sum64String("2"),
   564  							Value:     1.,
   565  						},
   566  						{
   567  							Timestamp: from.Add(2 * time.Millisecond).UnixNano(),
   568  							Hash:      xxhash.Sum64String("3"),
   569  							Value:     1.,
   570  						},
   571  						{
   572  							Timestamp: from.Add(3 * time.Millisecond).UnixNano(),
   573  							Hash:      xxhash.Sum64String("4"),
   574  							Value:     1.,
   575  						},
   576  						{
   577  							Timestamp: from.Add(4 * time.Millisecond).UnixNano(),
   578  							Hash:      xxhash.Sum64String("5"),
   579  							Value:     1.,
   580  						},
   581  						{
   582  							Timestamp: from.Add(5 * time.Millisecond).UnixNano(),
   583  							Hash:      xxhash.Sum64String("6"),
   584  							Value:     1.,
   585  						},
   586  					},
   587  				},
   588  			},
   589  		},
   590  		{
   591  			"filter regex",
   592  			newSampleQuery("rate({foo=~\"ba.*\"} |~ \"1|2|3\" !~ \"2|3\"[1m])", from, from.Add(6*time.Millisecond), nil),
   593  			[]logproto.Series{
   594  				{
   595  					Labels: "{foo=\"bar\"}",
   596  					Samples: []logproto.Sample{
   597  						{
   598  							Timestamp: from.UnixNano(),
   599  							Hash:      xxhash.Sum64String("1"),
   600  							Value:     1.,
   601  						},
   602  					},
   603  				},
   604  				{
   605  					Labels: "{foo=\"bazz\"}",
   606  					Samples: []logproto.Sample{
   607  						{
   608  							Timestamp: from.UnixNano(),
   609  							Hash:      xxhash.Sum64String("1"),
   610  							Value:     1.,
   611  						},
   612  					},
   613  				},
   614  			},
   615  		},
   616  		{
   617  			"filter matcher",
   618  			newSampleQuery("count_over_time({foo=\"bar\"}[10m])", from, from.Add(6*time.Millisecond), nil),
   619  			[]logproto.Series{
   620  				{
   621  					Labels: "{foo=\"bar\"}",
   622  					Samples: []logproto.Sample{
   623  						{
   624  							Timestamp: from.UnixNano(),
   625  							Hash:      xxhash.Sum64String("1"),
   626  							Value:     1.,
   627  						},
   628  						{
   629  							Timestamp: from.Add(time.Millisecond).UnixNano(),
   630  							Hash:      xxhash.Sum64String("2"),
   631  							Value:     1.,
   632  						},
   633  						{
   634  							Timestamp: from.Add(2 * time.Millisecond).UnixNano(),
   635  							Hash:      xxhash.Sum64String("3"),
   636  							Value:     1.,
   637  						},
   638  						{
   639  							Timestamp: from.Add(3 * time.Millisecond).UnixNano(),
   640  							Hash:      xxhash.Sum64String("4"),
   641  							Value:     1.,
   642  						},
   643  
   644  						{
   645  							Timestamp: from.Add(4 * time.Millisecond).UnixNano(),
   646  							Hash:      xxhash.Sum64String("5"),
   647  							Value:     1.,
   648  						},
   649  						{
   650  							Timestamp: from.Add(5 * time.Millisecond).UnixNano(),
   651  							Hash:      xxhash.Sum64String("6"),
   652  							Value:     1.,
   653  						},
   654  					},
   655  				},
   656  			},
   657  		},
   658  		{
   659  			"filter time",
   660  			newSampleQuery("count_over_time({foo=~\"ba.*\"}[1s])", from, from.Add(time.Millisecond), nil),
   661  			[]logproto.Series{
   662  				{
   663  					Labels: "{foo=\"bar\"}",
   664  					Samples: []logproto.Sample{
   665  						{
   666  							Timestamp: from.UnixNano(),
   667  							Hash:      xxhash.Sum64String("1"),
   668  							Value:     1.,
   669  						},
   670  					},
   671  				},
   672  				{
   673  					Labels: "{foo=\"bazz\"}",
   674  					Samples: []logproto.Sample{
   675  						{
   676  							Timestamp: from.UnixNano(),
   677  							Hash:      xxhash.Sum64String("1"),
   678  							Value:     1.,
   679  						},
   680  					},
   681  				},
   682  			},
   683  		},
   684  		{
   685  			"delete covers whole time range",
   686  			newSampleQuery(
   687  				"count_over_time({foo=~\"ba.*\"}[5m])",
   688  				from,
   689  				from.Add(6*time.Millisecond),
   690  				[]*logproto.Delete{
   691  					{
   692  						Selector: `{foo="bar"}`,
   693  						Start:    from.Add(-1 * time.Millisecond).UnixNano(),
   694  						End:      from.Add(7 * time.Millisecond).UnixNano(),
   695  					},
   696  					{
   697  						Selector: `{foo="bazz"} |= "6"`,
   698  						Start:    from.Add(-1 * time.Millisecond).UnixNano(),
   699  						End:      from.Add(7 * time.Millisecond).UnixNano(),
   700  					},
   701  				}),
   702  			[]logproto.Series{
   703  				{
   704  					Labels: "{foo=\"bazz\"}",
   705  					Samples: []logproto.Sample{
   706  						{
   707  							Timestamp: from.UnixNano(),
   708  							Hash:      xxhash.Sum64String("1"),
   709  							Value:     1.,
   710  						},
   711  
   712  						{
   713  							Timestamp: from.Add(time.Millisecond).UnixNano(),
   714  							Hash:      xxhash.Sum64String("2"),
   715  							Value:     1.,
   716  						},
   717  						{
   718  							Timestamp: from.Add(2 * time.Millisecond).UnixNano(),
   719  							Hash:      xxhash.Sum64String("3"),
   720  							Value:     1.,
   721  						},
   722  						{
   723  							Timestamp: from.Add(3 * time.Millisecond).UnixNano(),
   724  							Hash:      xxhash.Sum64String("4"),
   725  							Value:     1.,
   726  						},
   727  
   728  						{
   729  							Timestamp: from.Add(4 * time.Millisecond).UnixNano(),
   730  							Hash:      xxhash.Sum64String("5"),
   731  							Value:     1.,
   732  						},
   733  					},
   734  				},
   735  			},
   736  		},
   737  		{
   738  			"delete covers partial time range",
   739  			newSampleQuery(
   740  				"count_over_time({foo=~\"ba.*\"}[5m])",
   741  				from,
   742  				from.Add(6*time.Millisecond),
   743  				[]*logproto.Delete{
   744  					{
   745  						Selector: `{foo="bar"}`,
   746  						Start:    from.Add(-1 * time.Millisecond).UnixNano(),
   747  						End:      from.Add(3 * time.Millisecond).UnixNano(),
   748  					},
   749  					{
   750  						Selector: `{foo="bazz"} |= "2"`,
   751  						Start:    from.Add(-1 * time.Millisecond).UnixNano(),
   752  						End:      from.Add(3 * time.Millisecond).UnixNano(),
   753  					},
   754  				}),
   755  			[]logproto.Series{
   756  				{
   757  					Labels: "{foo=\"bar\"}",
   758  					Samples: []logproto.Sample{
   759  						{
   760  							Timestamp: from.Add(4 * time.Millisecond).UnixNano(),
   761  							Hash:      xxhash.Sum64String("5"),
   762  							Value:     1.,
   763  						},
   764  						{
   765  							Timestamp: from.Add(5 * time.Millisecond).UnixNano(),
   766  							Hash:      xxhash.Sum64String("6"),
   767  							Value:     1.,
   768  						},
   769  					},
   770  				},
   771  				{
   772  					Labels: "{foo=\"bazz\"}",
   773  					Samples: []logproto.Sample{
   774  						{
   775  							Timestamp: from.UnixNano(),
   776  							Hash:      xxhash.Sum64String("1"),
   777  							Value:     1.,
   778  						},
   779  						{
   780  							Timestamp: from.Add(2 * time.Millisecond).UnixNano(),
   781  							Hash:      xxhash.Sum64String("3"),
   782  							Value:     1.,
   783  						},
   784  						{
   785  							Timestamp: from.Add(3 * time.Millisecond).UnixNano(),
   786  							Hash:      xxhash.Sum64String("4"),
   787  							Value:     1.,
   788  						},
   789  						{
   790  							Timestamp: from.Add(4 * time.Millisecond).UnixNano(),
   791  							Hash:      xxhash.Sum64String("5"),
   792  							Value:     1.,
   793  						},
   794  						{
   795  							Timestamp: from.Add(5 * time.Millisecond).UnixNano(),
   796  							Hash:      xxhash.Sum64String("6"),
   797  							Value:     1.,
   798  						},
   799  					},
   800  				},
   801  			},
   802  		},
   803  	}
   804  
   805  	for _, tt := range tests {
   806  		t.Run(tt.name, func(t *testing.T) {
   807  			s := &store{
   808  				Store: storeFixture,
   809  				cfg: Config{
   810  					MaxChunkBatchSize: 10,
   811  				},
   812  				chunkMetrics: NilMetrics,
   813  			}
   814  
   815  			ctx = user.InjectOrgID(context.Background(), "test-user")
   816  			it, err := s.SelectSamples(ctx, logql.SelectSampleParams{SampleQueryRequest: tt.req})
   817  			if err != nil {
   818  				t.Errorf("store.LazyQuery() error = %v", err)
   819  				return
   820  			}
   821  
   822  			series, _, err := iter.ReadSampleBatch(it, uint32(100000))
   823  			_ = it.Close()
   824  			if err != nil {
   825  				t.Fatalf("error reading batch %s", err)
   826  			}
   827  			assertSeries(t, tt.expected, series.Series)
   828  		})
   829  	}
   830  }
   831  
   832  type fakeChunkFilterer struct{}
   833  
   834  func (f fakeChunkFilterer) ForRequest(ctx context.Context) chunk.Filterer {
   835  	return f
   836  }
   837  
   838  func (f fakeChunkFilterer) ShouldFilter(metric labels.Labels) bool {
   839  	return metric.Get("foo") == "bazz"
   840  }
   841  
   842  func Test_ChunkFilterer(t *testing.T) {
   843  	s := &store{
   844  		Store: storeFixture,
   845  		cfg: Config{
   846  			MaxChunkBatchSize: 10,
   847  		},
   848  		chunkMetrics: NilMetrics,
   849  	}
   850  	s.SetChunkFilterer(&fakeChunkFilterer{})
   851  	ctx = user.InjectOrgID(context.Background(), "test-user")
   852  	it, err := s.SelectSamples(ctx, logql.SelectSampleParams{SampleQueryRequest: newSampleQuery("count_over_time({foo=~\"ba.*\"}[1s])", from, from.Add(1*time.Hour), nil)})
   853  	if err != nil {
   854  		t.Errorf("store.SelectSamples() error = %v", err)
   855  		return
   856  	}
   857  	defer it.Close()
   858  	for it.Next() {
   859  		v := mustParseLabels(it.Labels())["foo"]
   860  		require.NotEqual(t, "bazz", v)
   861  	}
   862  
   863  	logit, err := s.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: newQuery("{foo=~\"ba.*\"}", from, from.Add(1*time.Hour), nil, nil)})
   864  	if err != nil {
   865  		t.Errorf("store.SelectLogs() error = %v", err)
   866  		return
   867  	}
   868  	defer logit.Close()
   869  	for logit.Next() {
   870  		v := mustParseLabels(it.Labels())["foo"]
   871  		require.NotEqual(t, "bazz", v)
   872  	}
   873  	ids, err := s.Series(ctx, logql.SelectLogParams{QueryRequest: newQuery("{foo=~\"ba.*\"}", from, from.Add(1*time.Hour), nil, nil)})
   874  	require.NoError(t, err)
   875  	for _, id := range ids {
   876  		v := id.Labels["foo"]
   877  		require.NotEqual(t, "bazz", v)
   878  	}
   879  }
   880  
   881  func Test_store_GetSeries(t *testing.T) {
   882  	tests := []struct {
   883  		name      string
   884  		req       *logproto.QueryRequest
   885  		expected  []logproto.SeriesIdentifier
   886  		batchSize int
   887  	}{
   888  		{
   889  			"all",
   890  			newQuery("{foo=~\"ba.*\"}", from, from.Add(6*time.Millisecond), nil, nil),
   891  			[]logproto.SeriesIdentifier{
   892  				{Labels: mustParseLabels("{foo=\"bar\"}")},
   893  				{Labels: mustParseLabels("{foo=\"bazz\"}")},
   894  			},
   895  			1,
   896  		},
   897  		{
   898  			"all-single-batch",
   899  			newQuery("{foo=~\"ba.*\"}", from, from.Add(6*time.Millisecond), nil, nil),
   900  			[]logproto.SeriesIdentifier{
   901  				{Labels: mustParseLabels("{foo=\"bar\"}")},
   902  				{Labels: mustParseLabels("{foo=\"bazz\"}")},
   903  			},
   904  			5,
   905  		},
   906  		{
   907  			"regexp filter (post chunk fetching)",
   908  			newQuery("{foo=~\"bar.*\"}", from, from.Add(6*time.Millisecond), nil, nil),
   909  			[]logproto.SeriesIdentifier{
   910  				{Labels: mustParseLabels("{foo=\"bar\"}")},
   911  			},
   912  			1,
   913  		},
   914  		{
   915  			"filter matcher",
   916  			newQuery("{foo=\"bar\"}", from, from.Add(6*time.Millisecond), nil, nil),
   917  			[]logproto.SeriesIdentifier{
   918  				{Labels: mustParseLabels("{foo=\"bar\"}")},
   919  			},
   920  			1,
   921  		},
   922  	}
   923  	for _, tt := range tests {
   924  		t.Run(tt.name, func(t *testing.T) {
   925  			s := &store{
   926  				Store: newMockChunkStore(streamsFixture),
   927  				cfg: Config{
   928  					MaxChunkBatchSize: tt.batchSize,
   929  				},
   930  				chunkMetrics: NilMetrics,
   931  			}
   932  			ctx = user.InjectOrgID(context.Background(), "test-user")
   933  			out, err := s.Series(ctx, logql.SelectLogParams{QueryRequest: tt.req})
   934  			if err != nil {
   935  				t.Errorf("store.GetSeries() error = %v", err)
   936  				return
   937  			}
   938  			require.Equal(t, tt.expected, out)
   939  		})
   940  	}
   941  }
   942  
   943  func Test_store_decodeReq_Matchers(t *testing.T) {
   944  	tests := []struct {
   945  		name     string
   946  		req      *logproto.QueryRequest
   947  		matchers []*labels.Matcher
   948  	}{
   949  		{
   950  			"unsharded",
   951  			newQuery("{foo=~\"ba.*\"}", from, from.Add(6*time.Millisecond), nil, nil),
   952  			[]*labels.Matcher{
   953  				labels.MustNewMatcher(labels.MatchRegexp, "foo", "ba.*"),
   954  				labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "logs"),
   955  			},
   956  		},
   957  		{
   958  			"sharded",
   959  			newQuery(
   960  				"{foo=~\"ba.*\"}", from, from.Add(6*time.Millisecond),
   961  				[]astmapper.ShardAnnotation{
   962  					{Shard: 1, Of: 2},
   963  				},
   964  				nil,
   965  			),
   966  			[]*labels.Matcher{
   967  				labels.MustNewMatcher(labels.MatchRegexp, "foo", "ba.*"),
   968  				labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "logs"),
   969  				labels.MustNewMatcher(
   970  					labels.MatchEqual,
   971  					astmapper.ShardLabel,
   972  					astmapper.ShardAnnotation{Shard: 1, Of: 2}.String(),
   973  				),
   974  			},
   975  		},
   976  	}
   977  	for _, tt := range tests {
   978  		t.Run(tt.name, func(t *testing.T) {
   979  			ms, _, _, err := decodeReq(logql.SelectLogParams{QueryRequest: tt.req})
   980  			if err != nil {
   981  				t.Errorf("store.GetSeries() error = %v", err)
   982  				return
   983  			}
   984  			require.Equal(t, tt.matchers, ms)
   985  		})
   986  	}
   987  }
   988  
   989  type timeRange struct {
   990  	from, to time.Time
   991  }
   992  
   993  func TestStore_MultipleBoltDBShippersInConfig(t *testing.T) {
   994  	tempDir := t.TempDir()
   995  
   996  	limits, err := validation.NewOverrides(validation.Limits{}, nil)
   997  	require.NoError(t, err)
   998  
   999  	// config for BoltDB Shipper
  1000  	boltdbShipperConfig := shipper.Config{}
  1001  	flagext.DefaultValues(&boltdbShipperConfig)
  1002  	boltdbShipperConfig.ActiveIndexDirectory = path.Join(tempDir, "index")
  1003  	boltdbShipperConfig.SharedStoreType = "filesystem"
  1004  	boltdbShipperConfig.CacheLocation = path.Join(tempDir, "boltdb-shipper-cache")
  1005  	boltdbShipperConfig.Mode = indexshipper.ModeReadWrite
  1006  
  1007  	// dates for activation of boltdb shippers
  1008  	firstStoreDate := parseDate("2019-01-01")
  1009  	secondStoreDate := parseDate("2019-01-02")
  1010  
  1011  	cfg := Config{
  1012  		FSConfig:            local.FSConfig{Directory: path.Join(tempDir, "chunks")},
  1013  		BoltDBShipperConfig: boltdbShipperConfig,
  1014  	}
  1015  
  1016  	schemaConfig := config.SchemaConfig{
  1017  		Configs: []config.PeriodConfig{
  1018  			{
  1019  				From:       config.DayTime{Time: timeToModelTime(firstStoreDate)},
  1020  				IndexType:  "boltdb-shipper",
  1021  				ObjectType: "filesystem",
  1022  				Schema:     "v9",
  1023  				IndexTables: config.PeriodicTableConfig{
  1024  					Prefix: "index_",
  1025  					Period: time.Hour * 168,
  1026  				},
  1027  			},
  1028  			{
  1029  				From:       config.DayTime{Time: timeToModelTime(secondStoreDate)},
  1030  				IndexType:  "boltdb-shipper",
  1031  				ObjectType: "filesystem",
  1032  				Schema:     "v11",
  1033  				IndexTables: config.PeriodicTableConfig{
  1034  					Prefix: "index_",
  1035  					Period: time.Hour * 168,
  1036  				},
  1037  				RowShards: 2,
  1038  			},
  1039  		},
  1040  	}
  1041  
  1042  	store, err := NewStore(cfg, config.ChunkStoreConfig{}, schemaConfig, limits, cm, nil, util_log.Logger)
  1043  	require.NoError(t, err)
  1044  
  1045  	// time ranges adding a chunk for each store and a chunk which overlaps both the stores
  1046  	chunksToBuildForTimeRanges := []timeRange{
  1047  		{
  1048  			// chunk just for first store
  1049  			secondStoreDate.Add(-3 * time.Hour),
  1050  			secondStoreDate.Add(-2 * time.Hour),
  1051  		},
  1052  		{
  1053  			// chunk overlapping both the stores
  1054  			secondStoreDate.Add(-time.Hour),
  1055  			secondStoreDate.Add(time.Hour),
  1056  		},
  1057  		{
  1058  			// chunk just for second store
  1059  			secondStoreDate.Add(2 * time.Hour),
  1060  			secondStoreDate.Add(3 * time.Hour),
  1061  		},
  1062  	}
  1063  
  1064  	// build and add chunks to the store
  1065  	addedChunkIDs := map[string]struct{}{}
  1066  	for _, tr := range chunksToBuildForTimeRanges {
  1067  		chk := newChunk(buildTestStreams(fooLabelsWithName, tr))
  1068  
  1069  		err := store.PutOne(ctx, chk.From, chk.Through, chk)
  1070  		require.NoError(t, err)
  1071  
  1072  		addedChunkIDs[schemaConfig.ExternalKey(chk.ChunkRef)] = struct{}{}
  1073  	}
  1074  
  1075  	// recreate the store because boltdb-shipper now runs queriers on snapshots which are created every 1 min and during startup.
  1076  	store.Stop()
  1077  
  1078  	store, err = NewStore(cfg, config.ChunkStoreConfig{}, schemaConfig, limits, cm, nil, util_log.Logger)
  1079  	require.NoError(t, err)
  1080  
  1081  	defer store.Stop()
  1082  
  1083  	// get all the chunks from both the stores
  1084  	chunks, _, err := store.GetChunkRefs(ctx, "fake", timeToModelTime(firstStoreDate), timeToModelTime(secondStoreDate.Add(24*time.Hour)), newMatchers(fooLabelsWithName.String())...)
  1085  	require.NoError(t, err)
  1086  	var totalChunks int
  1087  	for _, chks := range chunks {
  1088  		totalChunks += len(chks)
  1089  	}
  1090  	// we get common chunk twice because it is indexed in both the stores
  1091  	require.Equal(t, totalChunks, len(addedChunkIDs)+1)
  1092  
  1093  	// check whether we got back all the chunks which were added
  1094  	for i := range chunks {
  1095  		for _, c := range chunks[i] {
  1096  			_, ok := addedChunkIDs[schemaConfig.ExternalKey(c.ChunkRef)]
  1097  			require.True(t, ok)
  1098  		}
  1099  	}
  1100  }
  1101  
  1102  func mustParseLabels(s string) map[string]string {
  1103  	l, err := marshal.NewLabelSet(s)
  1104  	if err != nil {
  1105  		log.Fatalf("Failed to parse %s", s)
  1106  	}
  1107  
  1108  	return l
  1109  }
  1110  
  1111  func parseDate(in string) time.Time {
  1112  	t, err := time.Parse("2006-01-02", in)
  1113  	if err != nil {
  1114  		panic(err)
  1115  	}
  1116  	return t
  1117  }
  1118  
  1119  func buildTestStreams(labels labels.Labels, tr timeRange) logproto.Stream {
  1120  	stream := logproto.Stream{
  1121  		Labels:  labels.String(),
  1122  		Hash:    labels.Hash(),
  1123  		Entries: []logproto.Entry{},
  1124  	}
  1125  
  1126  	for from := tr.from; from.Before(tr.to); from = from.Add(time.Second) {
  1127  		stream.Entries = append(stream.Entries, logproto.Entry{
  1128  			Timestamp: from,
  1129  			Line:      from.String(),
  1130  		})
  1131  	}
  1132  
  1133  	return stream
  1134  }
  1135  
  1136  func timeToModelTime(t time.Time) model.Time {
  1137  	return model.TimeFromUnixNano(t.UnixNano())
  1138  }
  1139  
  1140  func Test_OverlappingChunks(t *testing.T) {
  1141  	chunks := []chunk.Chunk{
  1142  		newChunk(logproto.Stream{
  1143  			Labels: `{foo="bar"}`,
  1144  			Entries: []logproto.Entry{
  1145  				{Timestamp: time.Unix(0, 1), Line: "1"},
  1146  				{Timestamp: time.Unix(0, 4), Line: "4"},
  1147  			},
  1148  		}),
  1149  		newChunk(logproto.Stream{
  1150  			Labels: `{foo="bar"}`,
  1151  			Entries: []logproto.Entry{
  1152  				{Timestamp: time.Unix(0, 2), Line: "2"},
  1153  				{Timestamp: time.Unix(0, 3), Line: "3"},
  1154  			},
  1155  		}),
  1156  	}
  1157  	s := &store{
  1158  		Store: &mockChunkStore{chunks: chunks, client: &mockChunkStoreClient{chunks: chunks}},
  1159  		cfg: Config{
  1160  			MaxChunkBatchSize: 10,
  1161  		},
  1162  		chunkMetrics: NilMetrics,
  1163  	}
  1164  
  1165  	ctx = user.InjectOrgID(context.Background(), "test-user")
  1166  	it, err := s.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: &logproto.QueryRequest{
  1167  		Selector:  `{foo="bar"}`,
  1168  		Limit:     1000,
  1169  		Direction: logproto.BACKWARD,
  1170  		Start:     time.Unix(0, 0),
  1171  		End:       time.Unix(0, 10),
  1172  	}})
  1173  	if err != nil {
  1174  		t.Errorf("store.SelectLogs() error = %v", err)
  1175  		return
  1176  	}
  1177  	defer it.Close()
  1178  	require.True(t, it.Next())
  1179  	require.Equal(t, "4", it.Entry().Line)
  1180  	require.True(t, it.Next())
  1181  	require.Equal(t, "3", it.Entry().Line)
  1182  	require.True(t, it.Next())
  1183  	require.Equal(t, "2", it.Entry().Line)
  1184  	require.True(t, it.Next())
  1185  	require.Equal(t, "1", it.Entry().Line)
  1186  	require.False(t, it.Next())
  1187  }
  1188  
  1189  func Test_GetSeries(t *testing.T) {
  1190  	var (
  1191  		store = &store{
  1192  			Store: newMockChunkStore([]*logproto.Stream{
  1193  				{
  1194  					Labels: `{foo="bar",buzz="boo"}`,
  1195  					Entries: []logproto.Entry{
  1196  						{Timestamp: time.Unix(0, 1), Line: "1"},
  1197  					},
  1198  				},
  1199  				{
  1200  					Labels: `{foo="buzz"}`,
  1201  					Entries: []logproto.Entry{
  1202  						{Timestamp: time.Unix(0, 1), Line: "1"},
  1203  					},
  1204  				},
  1205  				{
  1206  					Labels: `{bar="foo"}`,
  1207  					Entries: []logproto.Entry{
  1208  						{Timestamp: time.Unix(0, 1), Line: "1"},
  1209  					},
  1210  				},
  1211  			}),
  1212  			cfg: Config{
  1213  				MaxChunkBatchSize: 10,
  1214  			},
  1215  			chunkMetrics: NilMetrics,
  1216  		}
  1217  		ctx            = user.InjectOrgID(context.Background(), "test-user")
  1218  		expectedSeries = []logproto.SeriesIdentifier{
  1219  			{
  1220  				Labels: map[string]string{"bar": "foo"},
  1221  			},
  1222  			{
  1223  				Labels: map[string]string{"foo": "bar", "buzz": "boo"},
  1224  			},
  1225  			{
  1226  				Labels: map[string]string{"foo": "buzz"},
  1227  			},
  1228  		}
  1229  	)
  1230  
  1231  	for _, tt := range []struct {
  1232  		name           string
  1233  		req            logql.SelectLogParams
  1234  		expectedSeries []logproto.SeriesIdentifier
  1235  	}{
  1236  		{
  1237  			"all series",
  1238  			logql.SelectLogParams{
  1239  				QueryRequest: &logproto.QueryRequest{
  1240  					Selector: ``,
  1241  					Start:    time.Unix(0, 0),
  1242  					End:      time.Unix(0, 10),
  1243  				},
  1244  			},
  1245  			expectedSeries,
  1246  		},
  1247  		{
  1248  			"selected series",
  1249  			logql.SelectLogParams{
  1250  				QueryRequest: &logproto.QueryRequest{
  1251  					Selector: `{buzz=~".oo"}`,
  1252  					Start:    time.Unix(0, 0),
  1253  					End:      time.Unix(0, 10),
  1254  				},
  1255  			},
  1256  			[]logproto.SeriesIdentifier{
  1257  				{
  1258  					Labels: map[string]string{"foo": "bar", "buzz": "boo"},
  1259  				},
  1260  			},
  1261  		},
  1262  		{
  1263  			"no match",
  1264  			logql.SelectLogParams{
  1265  				QueryRequest: &logproto.QueryRequest{
  1266  					Selector: `{buzz=~"foo"}`,
  1267  					Start:    time.Unix(0, 0),
  1268  					End:      time.Unix(0, 10),
  1269  				},
  1270  			},
  1271  			[]logproto.SeriesIdentifier{},
  1272  		},
  1273  	} {
  1274  		tt := tt
  1275  		t.Run(tt.name, func(t *testing.T) {
  1276  			series, err := store.Series(ctx, tt.req)
  1277  			require.NoError(t, err)
  1278  			require.Equal(t, tt.expectedSeries, series)
  1279  		})
  1280  	}
  1281  }
  1282  
  1283  func TestGetIndexStoreTableRanges(t *testing.T) {
  1284  	now := model.Now()
  1285  	schemaConfig := config.SchemaConfig{
  1286  		Configs: []config.PeriodConfig{
  1287  			{
  1288  				From:       config.DayTime{Time: now.Add(30 * 24 * time.Hour)},
  1289  				IndexType:  config.BoltDBShipperType,
  1290  				ObjectType: "filesystem",
  1291  				Schema:     "v9",
  1292  				IndexTables: config.PeriodicTableConfig{
  1293  					Prefix: "index_",
  1294  					Period: time.Hour * 24,
  1295  				},
  1296  			},
  1297  			{
  1298  				From:       config.DayTime{Time: now.Add(20 * 24 * time.Hour)},
  1299  				IndexType:  config.BoltDBShipperType,
  1300  				ObjectType: "filesystem",
  1301  				Schema:     "v11",
  1302  				IndexTables: config.PeriodicTableConfig{
  1303  					Prefix: "index_",
  1304  					Period: time.Hour * 24,
  1305  				},
  1306  				RowShards: 2,
  1307  			},
  1308  			{
  1309  				From:       config.DayTime{Time: now.Add(15 * 24 * time.Hour)},
  1310  				IndexType:  config.TSDBType,
  1311  				ObjectType: "filesystem",
  1312  				Schema:     "v11",
  1313  				IndexTables: config.PeriodicTableConfig{
  1314  					Prefix: "index_",
  1315  					Period: time.Hour * 24,
  1316  				},
  1317  				RowShards: 2,
  1318  			},
  1319  			{
  1320  				From:       config.DayTime{Time: now.Add(10 * 24 * time.Hour)},
  1321  				IndexType:  config.StorageTypeBigTable,
  1322  				ObjectType: "filesystem",
  1323  				Schema:     "v11",
  1324  				IndexTables: config.PeriodicTableConfig{
  1325  					Prefix: "index_",
  1326  					Period: time.Hour * 24,
  1327  				},
  1328  				RowShards: 2,
  1329  			},
  1330  			{
  1331  				From:       config.DayTime{Time: now.Add(5 * 24 * time.Hour)},
  1332  				IndexType:  config.TSDBType,
  1333  				ObjectType: "filesystem",
  1334  				Schema:     "v11",
  1335  				IndexTables: config.PeriodicTableConfig{
  1336  					Prefix: "index_",
  1337  					Period: time.Hour * 24,
  1338  				},
  1339  				RowShards: 2,
  1340  			},
  1341  		},
  1342  	}
  1343  
  1344  	require.Equal(t, config.TableRanges{
  1345  		{
  1346  			Start:        schemaConfig.Configs[0].From.Unix() / int64(schemaConfig.Configs[0].IndexTables.Period/time.Second),
  1347  			End:          schemaConfig.Configs[1].From.Add(-time.Millisecond).Unix() / int64(schemaConfig.Configs[0].IndexTables.Period/time.Second),
  1348  			PeriodConfig: &schemaConfig.Configs[0],
  1349  		},
  1350  		{
  1351  			Start:        schemaConfig.Configs[1].From.Unix() / int64(schemaConfig.Configs[0].IndexTables.Period/time.Second),
  1352  			End:          schemaConfig.Configs[2].From.Add(-time.Millisecond).Unix() / int64(schemaConfig.Configs[0].IndexTables.Period/time.Second),
  1353  			PeriodConfig: &schemaConfig.Configs[1],
  1354  		},
  1355  	}, getIndexStoreTableRanges(config.BoltDBShipperType, schemaConfig.Configs))
  1356  
  1357  	require.Equal(t, config.TableRanges{
  1358  		{
  1359  			Start:        schemaConfig.Configs[3].From.Unix() / int64(schemaConfig.Configs[0].IndexTables.Period/time.Second),
  1360  			End:          schemaConfig.Configs[4].From.Add(-time.Millisecond).Unix() / int64(schemaConfig.Configs[0].IndexTables.Period/time.Second),
  1361  			PeriodConfig: &schemaConfig.Configs[3],
  1362  		},
  1363  	}, getIndexStoreTableRanges(config.StorageTypeBigTable, schemaConfig.Configs))
  1364  
  1365  	require.Equal(t, config.TableRanges{
  1366  		{
  1367  			Start:        schemaConfig.Configs[2].From.Unix() / int64(schemaConfig.Configs[0].IndexTables.Period/time.Second),
  1368  			End:          schemaConfig.Configs[3].From.Add(-time.Millisecond).Unix() / int64(schemaConfig.Configs[0].IndexTables.Period/time.Second),
  1369  			PeriodConfig: &schemaConfig.Configs[2],
  1370  		},
  1371  		{
  1372  			Start:        schemaConfig.Configs[4].From.Unix() / int64(schemaConfig.Configs[0].IndexTables.Period/time.Second),
  1373  			End:          model.Time(math.MaxInt64).Unix() / int64(schemaConfig.Configs[0].IndexTables.Period/time.Second),
  1374  			PeriodConfig: &schemaConfig.Configs[4],
  1375  		},
  1376  	}, getIndexStoreTableRanges(config.TSDBType, schemaConfig.Configs))
  1377  }