github.com/yankunsam/loki/v2@v2.6.3-0.20220817130409-389df5235c27/pkg/ingester/checkpoint_test.go (about)

     1  package ingester
     2  
     3  import (
     4  	"context"
     5  	fmt "fmt"
     6  	"io/ioutil"
     7  	"sort"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/grafana/dskit/services"
    12  	"github.com/prometheus/prometheus/model/labels"
    13  	"github.com/stretchr/testify/assert"
    14  	"github.com/stretchr/testify/require"
    15  	"github.com/weaveworks/common/user"
    16  
    17  	"github.com/grafana/loki/pkg/chunkenc"
    18  	"github.com/grafana/loki/pkg/ingester/client"
    19  	"github.com/grafana/loki/pkg/logproto"
    20  	"github.com/grafana/loki/pkg/logql/log"
    21  	"github.com/grafana/loki/pkg/runtime"
    22  	"github.com/grafana/loki/pkg/storage/chunk"
    23  	"github.com/grafana/loki/pkg/validation"
    24  )
    25  
    26  // small util for ensuring data exists as we expect
    27  func ensureIngesterData(ctx context.Context, t *testing.T, start, end time.Time, i Interface) {
    28  	result := mockQuerierServer{
    29  		ctx: ctx,
    30  	}
    31  	err := i.Query(&logproto.QueryRequest{
    32  		Selector: `{foo="bar"}`,
    33  		Limit:    100,
    34  		Start:    start,
    35  		End:      end,
    36  	}, &result)
    37  
    38  	ln := int(end.Sub(start) / time.Second)
    39  	require.NoError(t, err)
    40  	require.Len(t, result.resps, 1)
    41  	require.Len(t, result.resps[0].Streams, 2)
    42  	require.Len(t, result.resps[0].Streams[0].Entries, ln)
    43  	require.Len(t, result.resps[0].Streams[1].Entries, ln)
    44  }
    45  
    46  func defaultIngesterTestConfigWithWAL(t *testing.T, walDir string) Config {
    47  	ingesterConfig := defaultIngesterTestConfig(t)
    48  	ingesterConfig.MaxTransferRetries = 0
    49  	ingesterConfig.WAL.Enabled = true
    50  	ingesterConfig.WAL.Dir = walDir
    51  	ingesterConfig.WAL.CheckpointDuration = time.Second
    52  
    53  	return ingesterConfig
    54  }
    55  
    56  func TestIngesterWAL(t *testing.T) {
    57  	walDir := t.TempDir()
    58  
    59  	ingesterConfig := defaultIngesterTestConfigWithWAL(t, walDir)
    60  
    61  	limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
    62  	require.NoError(t, err)
    63  
    64  	newStore := func() *mockStore {
    65  		return &mockStore{
    66  			chunks: map[string][]chunk.Chunk{},
    67  		}
    68  	}
    69  
    70  	i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil)
    71  	require.NoError(t, err)
    72  	require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
    73  	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
    74  
    75  	req := logproto.PushRequest{
    76  		Streams: []logproto.Stream{
    77  			{
    78  				Labels: `{foo="bar",bar="baz1"}`,
    79  			},
    80  			{
    81  				Labels: `{foo="bar",bar="baz2"}`,
    82  			},
    83  		},
    84  	}
    85  
    86  	start := time.Now()
    87  	steps := 10
    88  	end := start.Add(time.Second * time.Duration(steps))
    89  
    90  	for i := 0; i < steps; i++ {
    91  		req.Streams[0].Entries = append(req.Streams[0].Entries, logproto.Entry{
    92  			Timestamp: start.Add(time.Duration(i) * time.Second),
    93  			Line:      fmt.Sprintf("line %d", i),
    94  		})
    95  		req.Streams[1].Entries = append(req.Streams[1].Entries, logproto.Entry{
    96  			Timestamp: start.Add(time.Duration(i) * time.Second),
    97  			Line:      fmt.Sprintf("line %d", i),
    98  		})
    99  	}
   100  
   101  	ctx := user.InjectOrgID(context.Background(), "test")
   102  	_, err = i.Push(ctx, &req)
   103  	require.NoError(t, err)
   104  
   105  	ensureIngesterData(ctx, t, start, end, i)
   106  
   107  	require.Nil(t, services.StopAndAwaitTerminated(context.Background(), i))
   108  
   109  	// ensure we haven't checkpointed yet
   110  	expectCheckpoint(t, walDir, false, time.Second)
   111  
   112  	// restart the ingester
   113  	i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil)
   114  	require.NoError(t, err)
   115  	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
   116  	require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
   117  
   118  	// ensure we've recovered data from wal segments
   119  	ensureIngesterData(ctx, t, start, end, i)
   120  
   121  	// ensure we have checkpointed now
   122  	expectCheckpoint(t, walDir, true, ingesterConfig.WAL.CheckpointDuration*5) // give a bit of buffer
   123  
   124  	require.Nil(t, services.StopAndAwaitTerminated(context.Background(), i))
   125  
   126  	// restart the ingester
   127  	i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil)
   128  	require.NoError(t, err)
   129  	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
   130  	require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
   131  
   132  	// ensure we've recovered data from checkpoint+wal segments
   133  	ensureIngesterData(ctx, t, start, end, i)
   134  }
   135  
   136  func TestIngesterWALIgnoresStreamLimits(t *testing.T) {
   137  	walDir := t.TempDir()
   138  
   139  	ingesterConfig := defaultIngesterTestConfigWithWAL(t, walDir)
   140  
   141  	limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
   142  	require.NoError(t, err)
   143  
   144  	newStore := func() *mockStore {
   145  		return &mockStore{
   146  			chunks: map[string][]chunk.Chunk{},
   147  		}
   148  	}
   149  
   150  	i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil)
   151  	require.NoError(t, err)
   152  	require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
   153  	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
   154  
   155  	req := logproto.PushRequest{
   156  		Streams: []logproto.Stream{
   157  			{
   158  				Labels: `{foo="bar",bar="baz1"}`,
   159  			},
   160  			{
   161  				Labels: `{foo="bar",bar="baz2"}`,
   162  			},
   163  		},
   164  	}
   165  
   166  	start := time.Now()
   167  	steps := 10
   168  	end := start.Add(time.Second * time.Duration(steps))
   169  
   170  	for i := 0; i < steps; i++ {
   171  		req.Streams[0].Entries = append(req.Streams[0].Entries, logproto.Entry{
   172  			Timestamp: start.Add(time.Duration(i) * time.Second),
   173  			Line:      fmt.Sprintf("line %d", i),
   174  		})
   175  		req.Streams[1].Entries = append(req.Streams[1].Entries, logproto.Entry{
   176  			Timestamp: start.Add(time.Duration(i) * time.Second),
   177  			Line:      fmt.Sprintf("line %d", i),
   178  		})
   179  	}
   180  
   181  	ctx := user.InjectOrgID(context.Background(), "test")
   182  	_, err = i.Push(ctx, &req)
   183  	require.NoError(t, err)
   184  
   185  	ensureIngesterData(ctx, t, start, end, i)
   186  
   187  	require.Nil(t, services.StopAndAwaitTerminated(context.Background(), i))
   188  
   189  	// Limit all streams except those written during WAL recovery.
   190  	limitCfg := defaultLimitsTestConfig()
   191  	limitCfg.MaxLocalStreamsPerUser = -1
   192  	limits, err = validation.NewOverrides(limitCfg, nil)
   193  	require.NoError(t, err)
   194  
   195  	// restart the ingester
   196  	i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil)
   197  	require.NoError(t, err)
   198  	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
   199  	require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
   200  
   201  	// ensure we've recovered data from wal segments
   202  	ensureIngesterData(ctx, t, start, end, i)
   203  
   204  	req = logproto.PushRequest{
   205  		Streams: []logproto.Stream{
   206  			{
   207  				Labels: `{foo="new"}`,
   208  				Entries: []logproto.Entry{
   209  					{
   210  						Timestamp: start,
   211  						Line:      "hi",
   212  					},
   213  				},
   214  			},
   215  		},
   216  	}
   217  
   218  	ctx = user.InjectOrgID(context.Background(), "test")
   219  	_, err = i.Push(ctx, &req)
   220  	// Ensure regular pushes error due to stream limits.
   221  	require.Error(t, err)
   222  }
   223  
   224  func TestUnflushedChunks(t *testing.T) {
   225  	chks := []chunkDesc{
   226  		{
   227  			flushed: time.Now(),
   228  		},
   229  		{},
   230  		{
   231  			flushed: time.Now(),
   232  		},
   233  	}
   234  
   235  	require.Equal(t, 1, len(unflushedChunks(chks)))
   236  }
   237  
   238  func TestIngesterWALBackpressureSegments(t *testing.T) {
   239  	walDir := t.TempDir()
   240  
   241  	ingesterConfig := defaultIngesterTestConfigWithWAL(t, walDir)
   242  	ingesterConfig.WAL.ReplayMemoryCeiling = 1000
   243  
   244  	limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
   245  	require.NoError(t, err)
   246  
   247  	newStore := func() *mockStore {
   248  		return &mockStore{
   249  			chunks: map[string][]chunk.Chunk{},
   250  		}
   251  	}
   252  
   253  	i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil)
   254  	require.NoError(t, err)
   255  	require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
   256  	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
   257  
   258  	start := time.Now()
   259  	// Replay data 5x larger than the ceiling.
   260  	totalSize := int(5 * i.cfg.WAL.ReplayMemoryCeiling)
   261  	req, written := mkPush(start, totalSize)
   262  	require.Equal(t, totalSize, written)
   263  
   264  	ctx := user.InjectOrgID(context.Background(), "test")
   265  	_, err = i.Push(ctx, req)
   266  	require.NoError(t, err)
   267  
   268  	require.Nil(t, services.StopAndAwaitTerminated(context.Background(), i))
   269  
   270  	// ensure we haven't checkpointed yet
   271  	expectCheckpoint(t, walDir, false, time.Second)
   272  
   273  	// restart the ingester, ensuring we replayed from WAL.
   274  	i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil)
   275  	require.NoError(t, err)
   276  	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
   277  	require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
   278  }
   279  
   280  func TestIngesterWALBackpressureCheckpoint(t *testing.T) {
   281  	walDir := t.TempDir()
   282  
   283  	ingesterConfig := defaultIngesterTestConfigWithWAL(t, walDir)
   284  	ingesterConfig.WAL.ReplayMemoryCeiling = 1000
   285  
   286  	limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
   287  	require.NoError(t, err)
   288  
   289  	newStore := func() *mockStore {
   290  		return &mockStore{
   291  			chunks: map[string][]chunk.Chunk{},
   292  		}
   293  	}
   294  
   295  	i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil)
   296  	require.NoError(t, err)
   297  	require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
   298  	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
   299  
   300  	start := time.Now()
   301  	// Replay data 5x larger than the ceiling.
   302  	totalSize := int(5 * i.cfg.WAL.ReplayMemoryCeiling)
   303  	req, written := mkPush(start, totalSize)
   304  	require.Equal(t, totalSize, written)
   305  
   306  	ctx := user.InjectOrgID(context.Background(), "test")
   307  	_, err = i.Push(ctx, req)
   308  	require.NoError(t, err)
   309  
   310  	// ensure we have checkpointed now
   311  	expectCheckpoint(t, walDir, true, ingesterConfig.WAL.CheckpointDuration*5) // give a bit of buffer
   312  
   313  	require.Nil(t, services.StopAndAwaitTerminated(context.Background(), i))
   314  
   315  	// restart the ingester, ensuring we can replay from the checkpoint as well.
   316  	i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil)
   317  	require.NoError(t, err)
   318  	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
   319  	require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
   320  }
   321  
   322  func expectCheckpoint(t *testing.T, walDir string, shouldExist bool, max time.Duration) {
   323  	once := make(chan struct{}, 1)
   324  	once <- struct{}{}
   325  
   326  	deadline := time.After(max)
   327  	for {
   328  		select {
   329  		case <-deadline:
   330  			require.Fail(t, "timeout while waiting for checkpoint existence:", shouldExist)
   331  		case <-once: // Trick to ensure we check immediately before deferring to ticker.
   332  		default:
   333  			<-time.After(max / 10) // check 10x over the duration
   334  		}
   335  
   336  		fs, err := ioutil.ReadDir(walDir)
   337  		require.Nil(t, err)
   338  		var found bool
   339  		for _, f := range fs {
   340  			if _, err := checkpointIndex(f.Name(), false); err == nil {
   341  				found = true
   342  			}
   343  		}
   344  		if found == shouldExist {
   345  			return
   346  		}
   347  	}
   348  }
   349  
   350  // mkPush makes approximately totalSize bytes of log lines across min(500, totalSize) streams
   351  func mkPush(start time.Time, totalSize int) (*logproto.PushRequest, int) {
   352  	var written int
   353  	req := &logproto.PushRequest{
   354  		Streams: []logproto.Stream{
   355  			{
   356  				Labels: `{foo="bar",bar="baz1"}`,
   357  			},
   358  		},
   359  	}
   360  	totalStreams := 500
   361  	if totalStreams > totalSize {
   362  		totalStreams = totalSize
   363  	}
   364  
   365  	for i := 0; i < totalStreams; i++ {
   366  		req.Streams = append(req.Streams, logproto.Stream{
   367  			Labels: fmt.Sprintf(`{foo="bar",i="%d"}`, i),
   368  		})
   369  
   370  		for j := 0; j < totalSize/totalStreams; j++ {
   371  			req.Streams[i].Entries = append(req.Streams[i].Entries, logproto.Entry{
   372  				Timestamp: start.Add(time.Duration(j) * time.Nanosecond),
   373  				Line:      string([]byte{1}),
   374  			})
   375  			written++
   376  		}
   377  
   378  	}
   379  	return req, written
   380  }
   381  
   382  type ingesterInstancesFunc func() []*instance
   383  
   384  func (i ingesterInstancesFunc) getInstances() []*instance {
   385  	return i()
   386  }
   387  
   388  var currentSeries *Series
   389  
   390  func buildStreams() []logproto.Stream {
   391  	streams := make([]logproto.Stream, 10)
   392  	for i := range streams {
   393  		labels := makeRandomLabels().String()
   394  		entries := make([]logproto.Entry, 15*1e3)
   395  		for j := range entries {
   396  			entries[j] = logproto.Entry{
   397  				Timestamp: time.Unix(0, int64(j)),
   398  				Line:      fmt.Sprintf("entry for line %d", j),
   399  			}
   400  		}
   401  		streams[i] = logproto.Stream{
   402  			Labels:  labels,
   403  			Entries: entries,
   404  		}
   405  	}
   406  	return streams
   407  }
   408  
   409  var (
   410  	stream1 = logproto.Stream{
   411  		Labels: labels.Labels{labels.Label{Name: "stream", Value: "1"}}.String(),
   412  		Entries: []logproto.Entry{
   413  			{
   414  				Timestamp: time.Unix(0, 1),
   415  				Line:      "1",
   416  			},
   417  			{
   418  				Timestamp: time.Unix(0, 2),
   419  				Line:      "2",
   420  			},
   421  		},
   422  	}
   423  	stream2 = logproto.Stream{
   424  		Labels: labels.Labels{labels.Label{Name: "stream", Value: "2"}}.String(),
   425  		Entries: []logproto.Entry{
   426  			{
   427  				Timestamp: time.Unix(0, 1),
   428  				Line:      "3",
   429  			},
   430  			{
   431  				Timestamp: time.Unix(0, 2),
   432  				Line:      "4",
   433  			},
   434  		},
   435  	}
   436  )
   437  
   438  func Test_SeriesIterator(t *testing.T) {
   439  	var instances []*instance
   440  
   441  	// NB (owen-d): Not sure why we have these overrides
   442  	l := defaultLimitsTestConfig()
   443  	l.MaxLocalStreamsPerUser = 1000
   444  	l.IngestionRateMB = 1e4
   445  	l.IngestionBurstSizeMB = 1e4
   446  
   447  	limits, err := validation.NewOverrides(l, nil)
   448  	require.NoError(t, err)
   449  	limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1)
   450  
   451  	for i := 0; i < 3; i++ {
   452  		inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, fmt.Sprintf("%d", i), limiter, runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, nil, nil)
   453  		require.Nil(t, err)
   454  		require.NoError(t, inst.Push(context.Background(), &logproto.PushRequest{Streams: []logproto.Stream{stream1}}))
   455  		require.NoError(t, inst.Push(context.Background(), &logproto.PushRequest{Streams: []logproto.Stream{stream2}}))
   456  		instances = append(instances, inst)
   457  	}
   458  
   459  	iter := newStreamsIterator(ingesterInstancesFunc(func() []*instance {
   460  		return instances
   461  	}))
   462  
   463  	for i := 0; i < 3; i++ {
   464  		var streams []logproto.Stream
   465  		for j := 0; j < 2; j++ {
   466  			iter.Next()
   467  			assert.Equal(t, fmt.Sprintf("%d", i), iter.Stream().UserID)
   468  			memchunk, err := chunkenc.MemchunkFromCheckpoint(iter.Stream().Chunks[0].Data, iter.Stream().Chunks[0].Head, chunkenc.UnorderedHeadBlockFmt, 0, 0)
   469  			require.NoError(t, err)
   470  			it, err := memchunk.Iterator(context.Background(), time.Unix(0, 0), time.Unix(0, 100), logproto.FORWARD, log.NewNoopPipeline().ForStream(nil))
   471  			require.NoError(t, err)
   472  			stream := logproto.Stream{
   473  				Labels: logproto.FromLabelAdaptersToLabels(iter.Stream().Labels).String(),
   474  			}
   475  			for it.Next() {
   476  				stream.Entries = append(stream.Entries, it.Entry())
   477  			}
   478  			require.NoError(t, it.Close())
   479  			streams = append(streams, stream)
   480  		}
   481  		sort.Slice(streams, func(i, j int) bool { return streams[i].Labels < streams[j].Labels })
   482  		require.Equal(t, stream1, streams[0])
   483  		require.Equal(t, stream2, streams[1])
   484  	}
   485  
   486  	require.False(t, iter.Next())
   487  	require.Nil(t, iter.Error())
   488  }
   489  
   490  func Benchmark_SeriesIterator(b *testing.B) {
   491  	streams := buildStreams()
   492  	instances := make([]*instance, 10)
   493  
   494  	limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
   495  	require.NoError(b, err)
   496  	limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1)
   497  
   498  	for i := range instances {
   499  		inst, _ := newInstance(defaultConfig(), defaultPeriodConfigs, fmt.Sprintf("instance %d", i), limiter, runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, nil, nil)
   500  
   501  		require.NoError(b,
   502  			inst.Push(context.Background(), &logproto.PushRequest{
   503  				Streams: streams,
   504  			}),
   505  		)
   506  		instances[i] = inst
   507  	}
   508  	it := newIngesterSeriesIter(ingesterInstancesFunc(func() []*instance {
   509  		return instances
   510  	}))
   511  	defer it.Stop()
   512  
   513  	b.ResetTimer()
   514  	b.ReportAllocs()
   515  
   516  	for n := 0; n < b.N; n++ {
   517  		iter := it.Iter()
   518  		for iter.Next() {
   519  			currentSeries = iter.Stream()
   520  		}
   521  		require.NoError(b, iter.Error())
   522  	}
   523  }
   524  
   525  type noOpWalLogger struct{}
   526  
   527  func (noOpWalLogger) Log(recs ...[]byte) error { return nil }
   528  func (noOpWalLogger) Close() error             { return nil }
   529  func (noOpWalLogger) Dir() string              { return "" }
   530  
   531  func Benchmark_CheckpointWrite(b *testing.B) {
   532  	writer := WALCheckpointWriter{
   533  		metrics:       NilMetrics,
   534  		checkpointWAL: noOpWalLogger{},
   535  	}
   536  	lbs := labels.Labels{labels.Label{Name: "foo", Value: "bar"}}
   537  	chunks := buildChunks(b, 10)
   538  	b.ReportAllocs()
   539  	b.ResetTimer()
   540  
   541  	for n := 0; n < b.N; n++ {
   542  		require.NoError(b, writer.Write(&Series{
   543  			UserID:      "foo",
   544  			Fingerprint: lbs.Hash(),
   545  			Labels:      logproto.FromLabelsToLabelAdapters(lbs),
   546  			Chunks:      chunks,
   547  		}))
   548  	}
   549  }
   550  
   551  func buildChunks(t testing.TB, size int) []Chunk {
   552  	descs := make([]chunkDesc, 0, size)
   553  	chks := make([]Chunk, size)
   554  
   555  	for i := 0; i < size; i++ {
   556  		// build chunks of 256k blocks, 1.5MB target size. Same as default config.
   557  		c := chunkenc.NewMemChunk(chunkenc.EncGZIP, chunkenc.UnorderedHeadBlockFmt, 256*1024, 1500*1024)
   558  		fillChunk(t, c)
   559  		descs = append(descs, chunkDesc{
   560  			chunk: c,
   561  		})
   562  	}
   563  
   564  	there, err := toWireChunks(descs, nil)
   565  	require.NoError(t, err)
   566  	for i := range there {
   567  		chks[i] = there[i].Chunk
   568  	}
   569  	return chks
   570  }
   571  
   572  func TestIngesterWALReplaysUnorderedToOrdered(t *testing.T) {
   573  	for _, waitForCheckpoint := range []bool{false, true} {
   574  		t.Run(fmt.Sprintf("checkpoint-%v", waitForCheckpoint), func(t *testing.T) {
   575  			walDir := t.TempDir()
   576  
   577  			ingesterConfig := defaultIngesterTestConfigWithWAL(t, walDir)
   578  
   579  			// First launch the ingester with unordered writes enabled
   580  			dft := defaultLimitsTestConfig()
   581  			dft.UnorderedWrites = true
   582  			limits, err := validation.NewOverrides(dft, nil)
   583  			require.NoError(t, err)
   584  
   585  			newStore := func() *mockStore {
   586  				return &mockStore{
   587  					chunks: map[string][]chunk.Chunk{},
   588  				}
   589  			}
   590  
   591  			i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil)
   592  			require.NoError(t, err)
   593  			require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
   594  			defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
   595  
   596  			req := logproto.PushRequest{
   597  				Streams: []logproto.Stream{
   598  					{
   599  						Labels: `{foo="bar",bar="baz1"}`,
   600  					},
   601  					{
   602  						Labels: `{foo="bar",bar="baz2"}`,
   603  					},
   604  				},
   605  			}
   606  
   607  			start := time.Now()
   608  			steps := 10
   609  			end := start.Add(time.Second * time.Duration(steps))
   610  
   611  			// Write data out of order
   612  			for i := steps - 1; i >= 0; i-- {
   613  				req.Streams[0].Entries = append(req.Streams[0].Entries, logproto.Entry{
   614  					Timestamp: start.Add(time.Duration(i) * time.Second),
   615  					Line:      fmt.Sprintf("line %d", i),
   616  				})
   617  				req.Streams[1].Entries = append(req.Streams[1].Entries, logproto.Entry{
   618  					Timestamp: start.Add(time.Duration(i) * time.Second),
   619  					Line:      fmt.Sprintf("line %d", i),
   620  				})
   621  			}
   622  
   623  			ctx := user.InjectOrgID(context.Background(), "test")
   624  			_, err = i.Push(ctx, &req)
   625  			require.NoError(t, err)
   626  
   627  			if waitForCheckpoint {
   628  				// Ensure we have checkpointed now
   629  				expectCheckpoint(t, walDir, true, ingesterConfig.WAL.CheckpointDuration*10) // give a bit of buffer
   630  
   631  				// Add some more data after the checkpoint
   632  				tmp := end
   633  				end = end.Add(time.Second * time.Duration(steps))
   634  				req.Streams[0].Entries = nil
   635  				req.Streams[1].Entries = nil
   636  				// Write data out of order again
   637  				for i := steps - 1; i >= 0; i-- {
   638  					req.Streams[0].Entries = append(req.Streams[0].Entries, logproto.Entry{
   639  						Timestamp: tmp.Add(time.Duration(i) * time.Second),
   640  						Line:      fmt.Sprintf("line %d", steps+i),
   641  					})
   642  					req.Streams[1].Entries = append(req.Streams[1].Entries, logproto.Entry{
   643  						Timestamp: tmp.Add(time.Duration(i) * time.Second),
   644  						Line:      fmt.Sprintf("line %d", steps+i),
   645  					})
   646  				}
   647  
   648  				_, err = i.Push(ctx, &req)
   649  				require.NoError(t, err)
   650  			}
   651  
   652  			ensureIngesterData(ctx, t, start, end, i)
   653  
   654  			require.Nil(t, services.StopAndAwaitTerminated(context.Background(), i))
   655  
   656  			// Now disable unordered writes
   657  			limitCfg := defaultLimitsTestConfig()
   658  			limitCfg.UnorderedWrites = false
   659  			limits, err = validation.NewOverrides(limitCfg, nil)
   660  			require.NoError(t, err)
   661  
   662  			// restart the ingester
   663  			i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil)
   664  			require.NoError(t, err)
   665  			defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
   666  			require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
   667  
   668  			// ensure we've recovered data from wal segments
   669  			ensureIngesterData(ctx, t, start, end, i)
   670  		})
   671  	}
   672  }