github.com/yankunsam/loki/v2@v2.6.3-0.20220817130409-389df5235c27/pkg/ingester/encoding_test.go (about) 1 package ingester 2 3 import ( 4 "fmt" 5 "testing" 6 "time" 7 8 "github.com/prometheus/prometheus/model/labels" 9 "github.com/prometheus/prometheus/tsdb/record" 10 "github.com/stretchr/testify/require" 11 12 "github.com/grafana/loki/pkg/chunkenc" 13 "github.com/grafana/loki/pkg/logproto" 14 ) 15 16 func Test_Encoding_Series(t *testing.T) { 17 record := &WALRecord{ 18 entryIndexMap: make(map[uint64]int), 19 UserID: "123", 20 Series: []record.RefSeries{ 21 { 22 Ref: 456, 23 Labels: labels.FromMap(map[string]string{ 24 "foo": "bar", 25 "bazz": "buzz", 26 }), 27 }, 28 { 29 Ref: 789, 30 Labels: labels.FromMap(map[string]string{ 31 "abc": "123", 32 "def": "456", 33 }), 34 }, 35 }, 36 } 37 38 buf := record.encodeSeries(nil) 39 40 decoded := recordPool.GetRecord() 41 42 err := decodeWALRecord(buf, decoded) 43 require.Nil(t, err) 44 45 // Since we use a pool, there can be subtle differentiations between nil slices and len(0) slices. 46 // Both are valid, so check length. 47 require.Equal(t, 0, len(decoded.RefEntries)) 48 decoded.RefEntries = nil 49 require.Equal(t, record, decoded) 50 } 51 52 func Test_Encoding_Entries(t *testing.T) { 53 for _, tc := range []struct { 54 desc string 55 rec *WALRecord 56 version RecordType 57 }{ 58 { 59 desc: "v1", 60 rec: &WALRecord{ 61 entryIndexMap: make(map[uint64]int), 62 UserID: "123", 63 RefEntries: []RefEntries{ 64 { 65 Ref: 456, 66 Entries: []logproto.Entry{ 67 { 68 Timestamp: time.Unix(1000, 0), 69 Line: "first", 70 }, 71 { 72 Timestamp: time.Unix(2000, 0), 73 Line: "second", 74 }, 75 }, 76 }, 77 { 78 Ref: 789, 79 Entries: []logproto.Entry{ 80 { 81 Timestamp: time.Unix(3000, 0), 82 Line: "third", 83 }, 84 { 85 Timestamp: time.Unix(4000, 0), 86 Line: "fourth", 87 }, 88 }, 89 }, 90 }, 91 }, 92 version: WALRecordEntriesV1, 93 }, 94 { 95 desc: "v2", 96 rec: &WALRecord{ 97 entryIndexMap: make(map[uint64]int), 98 UserID: "123", 99 RefEntries: []RefEntries{ 100 { 101 Ref: 456, 102 Counter: 1, // v2 uses counter for WAL replay 103 Entries: []logproto.Entry{ 104 { 105 Timestamp: time.Unix(1000, 0), 106 Line: "first", 107 }, 108 { 109 Timestamp: time.Unix(2000, 0), 110 Line: "second", 111 }, 112 }, 113 }, 114 { 115 Ref: 789, 116 Counter: 2, // v2 uses counter for WAL replay 117 Entries: []logproto.Entry{ 118 { 119 Timestamp: time.Unix(3000, 0), 120 Line: "third", 121 }, 122 { 123 Timestamp: time.Unix(4000, 0), 124 Line: "fourth", 125 }, 126 }, 127 }, 128 }, 129 }, 130 version: WALRecordEntriesV2, 131 }, 132 } { 133 decoded := recordPool.GetRecord() 134 buf := tc.rec.encodeEntries(tc.version, nil) 135 err := decodeWALRecord(buf, decoded) 136 require.Nil(t, err) 137 require.Equal(t, tc.rec, decoded) 138 139 } 140 } 141 142 func Benchmark_EncodeEntries(b *testing.B) { 143 var entries []logproto.Entry 144 for i := int64(0); i < 10000; i++ { 145 entries = append(entries, logproto.Entry{ 146 Timestamp: time.Unix(0, i), 147 Line: fmt.Sprintf("long line with a lot of data like a log %d", i), 148 }) 149 } 150 record := &WALRecord{ 151 entryIndexMap: make(map[uint64]int), 152 UserID: "123", 153 RefEntries: []RefEntries{ 154 { 155 Ref: 456, 156 Entries: entries, 157 }, 158 { 159 Ref: 789, 160 Entries: entries, 161 }, 162 }, 163 } 164 b.ReportAllocs() 165 b.ResetTimer() 166 buf := recordPool.GetBytes()[:0] 167 defer recordPool.PutBytes(buf) 168 169 for n := 0; n < b.N; n++ { 170 record.encodeEntries(CurrentEntriesRec, buf) 171 } 172 } 173 174 func Benchmark_DecodeWAL(b *testing.B) { 175 var entries []logproto.Entry 176 for i := int64(0); i < 10000; i++ { 177 entries = append(entries, logproto.Entry{ 178 Timestamp: time.Unix(0, i), 179 Line: fmt.Sprintf("long line with a lot of data like a log %d", i), 180 }) 181 } 182 record := &WALRecord{ 183 entryIndexMap: make(map[uint64]int), 184 UserID: "123", 185 RefEntries: []RefEntries{ 186 { 187 Ref: 456, 188 Entries: entries, 189 }, 190 { 191 Ref: 789, 192 Entries: entries, 193 }, 194 }, 195 } 196 197 buf := record.encodeEntries(CurrentEntriesRec, nil) 198 rec := recordPool.GetRecord() 199 b.ReportAllocs() 200 b.ResetTimer() 201 202 for n := 0; n < b.N; n++ { 203 require.NoError(b, decodeWALRecord(buf, rec)) 204 } 205 } 206 207 func fillChunk(t testing.TB, c chunkenc.Chunk) { 208 t.Helper() 209 var i int64 210 entry := &logproto.Entry{ 211 Timestamp: time.Unix(0, 0), 212 Line: "entry for line 0", 213 } 214 215 for c.SpaceFor(entry) { 216 require.NoError(t, c.Append(entry)) 217 i++ 218 entry.Timestamp = time.Unix(0, i) 219 entry.Line = fmt.Sprintf("entry for line %d", i) 220 } 221 } 222 223 func dummyConf() *Config { 224 var conf Config 225 conf.BlockSize = 256 * 1024 226 conf.TargetChunkSize = 1500 * 1024 227 228 return &conf 229 } 230 231 func Test_EncodingChunks(t *testing.T) { 232 for _, f := range chunkenc.HeadBlockFmts { 233 for _, close := range []bool{true, false} { 234 for _, tc := range []struct { 235 desc string 236 conf Config 237 }{ 238 { 239 // mostly for historical parity 240 desc: "dummyConf", 241 conf: *dummyConf(), 242 }, 243 { 244 desc: "default", 245 conf: defaultIngesterTestConfig(t), 246 }, 247 } { 248 249 t.Run(fmt.Sprintf("%v-%v-%s", f, close, tc.desc), func(t *testing.T) { 250 conf := tc.conf 251 c := chunkenc.NewMemChunk(chunkenc.EncGZIP, f, conf.BlockSize, conf.TargetChunkSize) 252 fillChunk(t, c) 253 if close { 254 require.Nil(t, c.Close()) 255 } 256 257 from := []chunkDesc{ 258 { 259 chunk: c, 260 }, 261 // test non zero values 262 { 263 chunk: c, 264 closed: true, 265 synced: true, 266 flushed: time.Unix(1, 0), 267 lastUpdated: time.Unix(0, 1), 268 }, 269 } 270 there, err := toWireChunks(from, nil) 271 require.Nil(t, err) 272 chunks := make([]Chunk, 0, len(there)) 273 for _, c := range there { 274 chunks = append(chunks, c.Chunk) 275 276 // Ensure closed head chunks are empty 277 if close { 278 require.Equal(t, 0, len(c.Head)) 279 } else { 280 require.Greater(t, len(c.Head), 0) 281 } 282 } 283 284 backAgain, err := fromWireChunks(&conf, chunks) 285 require.Nil(t, err) 286 287 for i, to := range backAgain { 288 // test the encoding directly as the substructure may change. 289 // for instance the uncompressed size for each block is not included in the encoded version. 290 enc, err := to.chunk.Bytes() 291 require.Nil(t, err) 292 to.chunk = nil 293 294 matched := from[i] 295 exp, err := matched.chunk.Bytes() 296 require.Nil(t, err) 297 matched.chunk = nil 298 299 require.Equal(t, exp, enc) 300 require.Equal(t, matched, to) 301 302 } 303 304 }) 305 } 306 } 307 } 308 } 309 310 func Test_EncodingCheckpoint(t *testing.T) { 311 conf := dummyConf() 312 c := chunkenc.NewMemChunk(chunkenc.EncGZIP, chunkenc.UnorderedHeadBlockFmt, conf.BlockSize, conf.TargetChunkSize) 313 require.Nil(t, c.Append(&logproto.Entry{ 314 Timestamp: time.Unix(1, 0), 315 Line: "hi there", 316 })) 317 data, err := c.Bytes() 318 require.Nil(t, err) 319 from, to := c.Bounds() 320 321 ls := labels.FromMap(map[string]string{"foo": "bar"}) 322 s := &Series{ 323 UserID: "fake", 324 Fingerprint: 123, 325 Labels: logproto.FromLabelsToLabelAdapters(ls), 326 To: time.Unix(10, 0), 327 LastLine: "lastLine", 328 Chunks: []Chunk{ 329 { 330 From: from, 331 To: to, 332 Closed: true, 333 Synced: true, 334 FlushedAt: time.Unix(1, 0), 335 LastUpdated: time.Unix(0, 1), 336 Data: data, 337 }, 338 }, 339 } 340 341 b, err := encodeWithTypeHeader(s, CheckpointRecord, nil) 342 require.Nil(t, err) 343 344 out := &Series{} 345 err = decodeCheckpointRecord(b, out) 346 require.Nil(t, err) 347 348 // override the passed []byte to ensure that the resulting *Series doesn't 349 // contain any trailing refs to it. 350 for i := range b { 351 b[i] = 0 352 } 353 354 // test chunk bytes separately 355 sChunks := s.Chunks 356 s.Chunks = nil 357 outChunks := out.Chunks 358 out.Chunks = nil 359 360 zero := time.Unix(0, 0) 361 362 require.Equal(t, true, s.To.Equal(out.To)) 363 s.To = zero 364 out.To = zero 365 366 require.Equal(t, s, out) 367 require.Equal(t, len(sChunks), len(outChunks)) 368 for i, exp := range sChunks { 369 370 got := outChunks[i] 371 // Issues diffing zero-value time.Locations against nil ones. 372 // Check/override them individually so that other fields get tested in an extensible manner. 373 require.Equal(t, true, exp.From.Equal(got.From)) 374 exp.From = zero 375 got.From = zero 376 377 require.Equal(t, true, exp.To.Equal(got.To)) 378 exp.To = zero 379 got.To = zero 380 381 require.Equal(t, true, exp.FlushedAt.Equal(got.FlushedAt)) 382 exp.FlushedAt = zero 383 got.FlushedAt = zero 384 385 require.Equal(t, true, exp.LastUpdated.Equal(got.LastUpdated)) 386 exp.LastUpdated = zero 387 got.LastUpdated = zero 388 389 require.Equal(t, exp, got) 390 } 391 }