github.com/yankunsam/loki/v2@v2.6.3-0.20220817130409-389df5235c27/pkg/ingester/stream_test.go (about) 1 package ingester 2 3 import ( 4 "bytes" 5 "context" 6 "fmt" 7 "math/rand" 8 "net/http" 9 "testing" 10 "time" 11 12 "github.com/prometheus/common/model" 13 "github.com/prometheus/prometheus/model/labels" 14 "github.com/stretchr/testify/require" 15 "github.com/weaveworks/common/httpgrpc" 16 17 "github.com/grafana/loki/pkg/chunkenc" 18 "github.com/grafana/loki/pkg/iter" 19 "github.com/grafana/loki/pkg/logproto" 20 "github.com/grafana/loki/pkg/logql/log" 21 "github.com/grafana/loki/pkg/util/flagext" 22 "github.com/grafana/loki/pkg/validation" 23 ) 24 25 var ( 26 countExtractor = func() log.StreamSampleExtractor { 27 ex, err := log.NewLineSampleExtractor(log.CountExtractor, nil, nil, false, false) 28 if err != nil { 29 panic(err) 30 } 31 return ex.ForStream(labels.Labels{}) 32 } 33 ) 34 35 func TestMaxReturnedStreamsErrors(t *testing.T) { 36 numLogs := 100 37 38 tt := []struct { 39 name string 40 limit int 41 expectErrs int 42 }{ 43 {"10", 10, 10}, 44 {"unlimited", 0, numLogs}, 45 } 46 47 limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) 48 require.NoError(t, err) 49 limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) 50 51 for _, tc := range tt { 52 t.Run(tc.name, func(t *testing.T) { 53 cfg := defaultConfig() 54 cfg.MaxReturnedErrors = tc.limit 55 s := newStream( 56 cfg, 57 limiter, 58 "fake", 59 model.Fingerprint(0), 60 labels.Labels{ 61 {Name: "foo", Value: "bar"}, 62 }, 63 true, 64 NilMetrics, 65 ) 66 67 _, err := s.Push(context.Background(), []logproto.Entry{ 68 {Timestamp: time.Unix(int64(numLogs), 0), Line: "log"}, 69 }, recordPool.GetRecord(), 0, true) 70 require.NoError(t, err) 71 72 newLines := make([]logproto.Entry, numLogs) 73 for i := 0; i < numLogs; i++ { 74 newLines[i] = logproto.Entry{Timestamp: time.Unix(int64(i), 0), Line: "log"} 75 } 76 77 var expected bytes.Buffer 78 for i := 0; i < tc.expectErrs; i++ { 79 fmt.Fprintf(&expected, 80 "entry with timestamp %s ignored, reason: 'entry too far behind, oldest acceptable timestamp is: %s' for stream: {foo=\"bar\"},\n", 81 time.Unix(int64(i), 0).String(), 82 time.Unix(int64(numLogs), 0).Format(time.RFC3339), 83 ) 84 } 85 86 fmt.Fprintf(&expected, "total ignored: %d out of %d", numLogs, numLogs) 87 expectErr := httpgrpc.Errorf(http.StatusBadRequest, expected.String()) 88 89 _, err = s.Push(context.Background(), newLines, recordPool.GetRecord(), 0, true) 90 require.Error(t, err) 91 require.Equal(t, expectErr.Error(), err.Error()) 92 }) 93 } 94 } 95 96 func TestPushDeduplication(t *testing.T) { 97 limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) 98 require.NoError(t, err) 99 limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) 100 101 s := newStream( 102 defaultConfig(), 103 limiter, 104 "fake", 105 model.Fingerprint(0), 106 labels.Labels{ 107 {Name: "foo", Value: "bar"}, 108 }, 109 true, 110 NilMetrics, 111 ) 112 113 written, err := s.Push(context.Background(), []logproto.Entry{ 114 {Timestamp: time.Unix(1, 0), Line: "test"}, 115 {Timestamp: time.Unix(1, 0), Line: "test"}, 116 {Timestamp: time.Unix(1, 0), Line: "newer, better test"}, 117 }, recordPool.GetRecord(), 0, true) 118 require.NoError(t, err) 119 require.Len(t, s.chunks, 1) 120 require.Equal(t, s.chunks[0].chunk.Size(), 2, 121 "expected exact duplicate to be dropped and newer content with same timestamp to be appended") 122 require.Equal(t, len("test"+"newer, better test"), written) 123 } 124 125 func TestPushRejectOldCounter(t *testing.T) { 126 limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) 127 require.NoError(t, err) 128 limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) 129 130 s := newStream( 131 defaultConfig(), 132 limiter, 133 "fake", 134 model.Fingerprint(0), 135 labels.Labels{ 136 {Name: "foo", Value: "bar"}, 137 }, 138 true, 139 NilMetrics, 140 ) 141 142 // counter should be 2 now since the first line will be deduped 143 _, err = s.Push(context.Background(), []logproto.Entry{ 144 {Timestamp: time.Unix(1, 0), Line: "test"}, 145 {Timestamp: time.Unix(1, 0), Line: "test"}, 146 {Timestamp: time.Unix(1, 0), Line: "newer, better test"}, 147 }, recordPool.GetRecord(), 0, true) 148 require.NoError(t, err) 149 require.Len(t, s.chunks, 1) 150 require.Equal(t, s.chunks[0].chunk.Size(), 2, 151 "expected exact duplicate to be dropped and newer content with same timestamp to be appended") 152 153 // fail to push with a counter <= the streams internal counter 154 _, err = s.Push(context.Background(), []logproto.Entry{ 155 {Timestamp: time.Unix(1, 0), Line: "test"}, 156 }, recordPool.GetRecord(), 2, true) 157 require.Equal(t, ErrEntriesExist, err) 158 159 // succeed with a greater counter 160 _, err = s.Push(context.Background(), []logproto.Entry{ 161 {Timestamp: time.Unix(1, 0), Line: "test"}, 162 }, recordPool.GetRecord(), 3, true) 163 require.Nil(t, err) 164 165 } 166 167 func TestStreamIterator(t *testing.T) { 168 const chunks = 3 169 const entries = 100 170 171 for _, chk := range []struct { 172 name string 173 new func() *chunkenc.MemChunk 174 }{ 175 {"gzipChunk", func() *chunkenc.MemChunk { 176 return chunkenc.NewMemChunk(chunkenc.EncGZIP, chunkenc.UnorderedHeadBlockFmt, 256*1024, 0) 177 }}, 178 } { 179 t.Run(chk.name, func(t *testing.T) { 180 var s stream 181 for i := int64(0); i < chunks; i++ { 182 chunk := chk.new() 183 for j := int64(0); j < entries; j++ { 184 k := i*entries + j 185 err := chunk.Append(&logproto.Entry{ 186 Timestamp: time.Unix(k, 0), 187 Line: fmt.Sprintf("line %d", k), 188 }) 189 require.NoError(t, err) 190 } 191 s.chunks = append(s.chunks, chunkDesc{chunk: chunk}) 192 } 193 194 for i := 0; i < 100; i++ { 195 from := rand.Intn(chunks*entries - 1) 196 len := rand.Intn(chunks*entries-from) + 1 197 iter, err := s.Iterator(context.TODO(), nil, time.Unix(int64(from), 0), time.Unix(int64(from+len), 0), logproto.FORWARD, log.NewNoopPipeline().ForStream(s.labels)) 198 require.NotNil(t, iter) 199 require.NoError(t, err) 200 testIteratorForward(t, iter, int64(from), int64(from+len)) 201 _ = iter.Close() 202 } 203 204 for i := 0; i < 100; i++ { 205 from := rand.Intn(entries - 1) 206 len := rand.Intn(chunks*entries-from) + 1 207 iter, err := s.Iterator(context.TODO(), nil, time.Unix(int64(from), 0), time.Unix(int64(from+len), 0), logproto.BACKWARD, log.NewNoopPipeline().ForStream(s.labels)) 208 require.NotNil(t, iter) 209 require.NoError(t, err) 210 testIteratorBackward(t, iter, int64(from), int64(from+len)) 211 _ = iter.Close() 212 } 213 }) 214 } 215 } 216 217 func TestUnorderedPush(t *testing.T) { 218 cfg := defaultIngesterTestConfig(t) 219 cfg.MaxChunkAge = 10 * time.Second 220 limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) 221 require.NoError(t, err) 222 limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) 223 224 s := newStream( 225 &cfg, 226 limiter, 227 "fake", 228 model.Fingerprint(0), 229 labels.Labels{ 230 {Name: "foo", Value: "bar"}, 231 }, 232 true, 233 NilMetrics, 234 ) 235 236 for _, x := range []struct { 237 cutBefore bool 238 entries []logproto.Entry 239 err bool 240 written int 241 }{ 242 { 243 entries: []logproto.Entry{ 244 {Timestamp: time.Unix(2, 0), Line: "x"}, 245 {Timestamp: time.Unix(1, 0), Line: "x"}, 246 {Timestamp: time.Unix(2, 0), Line: "x"}, // duplicate ts/line is ignored 247 {Timestamp: time.Unix(2, 0), Line: "x"}, // duplicate ts/line is ignored 248 {Timestamp: time.Unix(10, 0), Line: "x"}, 249 }, 250 written: 4, // 1 ignored 251 }, 252 // highest ts is now 10, validity bound is (10-10/2) = 5 253 { 254 entries: []logproto.Entry{ 255 {Timestamp: time.Unix(4, 0), Line: "x"}, // ordering err, too far 256 {Timestamp: time.Unix(8, 0), Line: "x"}, 257 {Timestamp: time.Unix(9, 0), Line: "x"}, 258 }, 259 err: true, 260 written: 2, // 1 ignored 261 }, 262 // force a chunk cut and then push data overlapping with previous chunk. 263 // This ultimately ensures the iterators implementation respects unordered chunks. 264 { 265 cutBefore: true, 266 entries: []logproto.Entry{ 267 {Timestamp: time.Unix(11, 0), Line: "x"}, 268 {Timestamp: time.Unix(7, 0), Line: "x"}, 269 }, 270 written: 2, 271 }, 272 } { 273 if x.cutBefore { 274 _ = s.cutChunk(context.Background()) 275 } 276 written, err := s.Push(context.Background(), x.entries, recordPool.GetRecord(), 0, true) 277 if x.err { 278 require.NotNil(t, err) 279 } else { 280 require.Nil(t, err) 281 } 282 require.Equal(t, x.written, written) 283 } 284 285 require.Equal(t, 2, len(s.chunks)) 286 287 exp := []logproto.Entry{ 288 {Timestamp: time.Unix(1, 0), Line: "x"}, 289 {Timestamp: time.Unix(2, 0), Line: "x"}, 290 {Timestamp: time.Unix(7, 0), Line: "x"}, 291 {Timestamp: time.Unix(8, 0), Line: "x"}, 292 {Timestamp: time.Unix(9, 0), Line: "x"}, 293 {Timestamp: time.Unix(10, 0), Line: "x"}, 294 {Timestamp: time.Unix(11, 0), Line: "x"}, 295 } 296 297 itr, err := s.Iterator(context.Background(), nil, time.Unix(int64(0), 0), time.Unix(12, 0), logproto.FORWARD, log.NewNoopPipeline().ForStream(s.labels)) 298 require.Nil(t, err) 299 iterEq(t, exp, itr) 300 301 sItr, err := s.SampleIterator(context.Background(), nil, time.Unix(int64(0), 0), time.Unix(12, 0), countExtractor()) 302 require.Nil(t, err) 303 for _, x := range exp { 304 require.Equal(t, true, sItr.Next()) 305 require.Equal(t, x.Timestamp, time.Unix(0, sItr.Sample().Timestamp)) 306 require.Equal(t, float64(1), sItr.Sample().Value) 307 } 308 require.Equal(t, false, sItr.Next()) 309 } 310 311 func TestPushRateLimit(t *testing.T) { 312 l := validation.Limits{ 313 PerStreamRateLimit: 10, 314 PerStreamRateLimitBurst: 10, 315 } 316 limits, err := validation.NewOverrides(l, nil) 317 require.NoError(t, err) 318 limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) 319 320 s := newStream( 321 defaultConfig(), 322 limiter, 323 "fake", 324 model.Fingerprint(0), 325 labels.Labels{ 326 {Name: "foo", Value: "bar"}, 327 }, 328 true, 329 NilMetrics, 330 ) 331 332 entries := []logproto.Entry{ 333 {Timestamp: time.Unix(1, 0), Line: "aaaaaaaaaa"}, 334 {Timestamp: time.Unix(1, 0), Line: "aaaaaaaaab"}, 335 } 336 // Counter should be 2 now since the first line will be deduped. 337 _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true) 338 require.Contains(t, err.Error(), (&validation.ErrStreamRateLimit{RateLimit: l.PerStreamRateLimit, Labels: s.labelsString, Bytes: flagext.ByteSize(len(entries[1].Line))}).Error()) 339 } 340 341 func TestReplayAppendIgnoresValidityWindow(t *testing.T) { 342 limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) 343 require.NoError(t, err) 344 limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) 345 346 cfg := defaultConfig() 347 cfg.MaxChunkAge = time.Minute 348 349 s := newStream( 350 cfg, 351 limiter, 352 "fake", 353 model.Fingerprint(0), 354 labels.Labels{ 355 {Name: "foo", Value: "bar"}, 356 }, 357 true, 358 NilMetrics, 359 ) 360 361 base := time.Now() 362 363 entries := []logproto.Entry{ 364 {Timestamp: base, Line: "1"}, 365 } 366 367 // Push a first entry (it doesn't matter if we look like we're replaying or not) 368 _, err = s.Push(context.Background(), entries, nil, 1, true) 369 require.Nil(t, err) 370 371 // Create a sample outside the validity window 372 entries = []logproto.Entry{ 373 {Timestamp: base.Add(-time.Hour), Line: "2"}, 374 } 375 376 // Pretend it's not a replay, ensure we error 377 _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true) 378 require.NotNil(t, err) 379 380 // Now pretend it's a replay. The same write should succeed. 381 _, err = s.Push(context.Background(), entries, nil, 2, true) 382 require.Nil(t, err) 383 384 } 385 386 func iterEq(t *testing.T, exp []logproto.Entry, got iter.EntryIterator) { 387 var i int 388 for got.Next() { 389 require.Equal(t, exp[i].Timestamp, got.Entry().Timestamp, "failed on the (%d) ts", i) 390 require.Equal(t, exp[i].Line, got.Entry().Line) 391 i++ 392 } 393 require.Equal(t, i, len(exp), "incorrect number of entries expected") 394 } 395 396 func Benchmark_PushStream(b *testing.B) { 397 ls := labels.Labels{ 398 labels.Label{Name: "namespace", Value: "loki-dev"}, 399 labels.Label{Name: "cluster", Value: "dev-us-central1"}, 400 labels.Label{Name: "job", Value: "loki-dev/ingester"}, 401 labels.Label{Name: "container", Value: "ingester"}, 402 } 403 404 limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) 405 require.NoError(b, err) 406 limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) 407 408 s := newStream(&Config{MaxChunkAge: 24 * time.Hour}, limiter, "fake", model.Fingerprint(0), ls, true, NilMetrics) 409 t, err := newTailer("foo", `{namespace="loki-dev"}`, &fakeTailServer{}, 10) 410 require.NoError(b, err) 411 412 go t.loop() 413 defer t.close() 414 415 s.tailers[1] = t 416 ctx := context.Background() 417 e := entries(100, time.Now()) 418 b.ResetTimer() 419 b.ReportAllocs() 420 421 for n := 0; n < b.N; n++ { 422 rec := recordPool.GetRecord() 423 _, err := s.Push(ctx, e, rec, 0, true) 424 require.NoError(b, err) 425 recordPool.PutRecord(rec) 426 } 427 }