github.com/zuoyebang/bitalosdb@v1.1.1-0.20240516111551-79a8c4d8ce20/internal/record/record_test.go (about)

     1  // Copyright 2021 The Bitalosdb author(hustxrb@163.com) and other contributors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package record
    16  
    17  import (
    18  	"bytes"
    19  	"encoding/binary"
    20  	"fmt"
    21  	"io"
    22  	"io/ioutil"
    23  	"math"
    24  	"os"
    25  	"strings"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/zuoyebang/bitalosdb/internal/base"
    30  
    31  	"github.com/cockroachdb/errors"
    32  	"github.com/stretchr/testify/require"
    33  	"golang.org/x/exp/rand"
    34  )
    35  
    36  func short(s string) string {
    37  	if len(s) < 64 {
    38  		return s
    39  	}
    40  	return fmt.Sprintf("%s...(skipping %d bytes)...%s", s[:20], len(s)-40, s[len(s)-20:])
    41  }
    42  
    43  // big returns a string of length n, composed of repetitions of partial.
    44  func big(partial string, n int) string {
    45  	return strings.Repeat(partial, n/len(partial)+1)[:n]
    46  }
    47  
    48  type recordWriter interface {
    49  	WriteRecord([]byte) (int64, error)
    50  	Close() error
    51  }
    52  
    53  func testGeneratorWriter(
    54  	t *testing.T,
    55  	reset func(),
    56  	gen func() (string, bool),
    57  	newWriter func(io.Writer) recordWriter,
    58  ) {
    59  	buf := new(bytes.Buffer)
    60  
    61  	reset()
    62  	w := newWriter(buf)
    63  	for {
    64  		s, ok := gen()
    65  		if !ok {
    66  			break
    67  		}
    68  		if _, err := w.WriteRecord([]byte(s)); err != nil {
    69  			t.Fatalf("Write: %v", err)
    70  		}
    71  	}
    72  	if err := w.Close(); err != nil {
    73  		t.Fatalf("Close: %v", err)
    74  	}
    75  	reset()
    76  	r := NewReader(buf, 0 /* logNum */)
    77  	for {
    78  		s, ok := gen()
    79  		if !ok {
    80  			break
    81  		}
    82  		rr, err := r.Next()
    83  		if err != nil {
    84  			t.Fatalf("reader.Next: %v", err)
    85  		}
    86  		x, err := ioutil.ReadAll(rr)
    87  		if err != nil {
    88  			t.Fatalf("ReadAll: %v", err)
    89  		}
    90  		if string(x) != s {
    91  			t.Fatalf("got %q, want %q", short(string(x)), short(s))
    92  		}
    93  	}
    94  	if _, err := r.Next(); err != io.EOF {
    95  		t.Fatalf("got %v, want %v", err, io.EOF)
    96  	}
    97  }
    98  
    99  func testGenerator(t *testing.T, reset func(), gen func() (string, bool)) {
   100  	t.Run("Writer", func(t *testing.T) {
   101  		testGeneratorWriter(t, reset, gen, func(w io.Writer) recordWriter {
   102  			return NewWriter(w)
   103  		})
   104  	})
   105  
   106  	t.Run("LogWriter", func(t *testing.T) {
   107  		testGeneratorWriter(t, reset, gen, func(w io.Writer) recordWriter {
   108  			return NewLogWriter(w, 0 /* logNum */)
   109  		})
   110  	})
   111  }
   112  
   113  func testLiterals(t *testing.T, s []string) {
   114  	var i int
   115  	reset := func() {
   116  		i = 0
   117  	}
   118  	gen := func() (string, bool) {
   119  		if i == len(s) {
   120  			return "", false
   121  		}
   122  		i++
   123  		return s[i-1], true
   124  	}
   125  	testGenerator(t, reset, gen)
   126  }
   127  
   128  func TestMany(t *testing.T) {
   129  	const n = 1e5
   130  	var i int
   131  	reset := func() {
   132  		i = 0
   133  	}
   134  	gen := func() (string, bool) {
   135  		if i == n {
   136  			return "", false
   137  		}
   138  		i++
   139  		return fmt.Sprintf("%d.", i-1), true
   140  	}
   141  	testGenerator(t, reset, gen)
   142  }
   143  
   144  func TestRandom(t *testing.T) {
   145  	const n = 1e2
   146  	var (
   147  		i int
   148  		r *rand.Rand
   149  	)
   150  	reset := func() {
   151  		i, r = 0, rand.New(rand.NewSource(0))
   152  	}
   153  	gen := func() (string, bool) {
   154  		if i == n {
   155  			return "", false
   156  		}
   157  		i++
   158  		return strings.Repeat(string(uint8(i)), r.Intn(2*blockSize+16)), true
   159  	}
   160  	testGenerator(t, reset, gen)
   161  }
   162  
   163  func TestBasic(t *testing.T) {
   164  	testLiterals(t, []string{
   165  		strings.Repeat("a", 1000),
   166  		strings.Repeat("b", 97270),
   167  		strings.Repeat("c", 8000),
   168  	})
   169  }
   170  
   171  func TestBoundary(t *testing.T) {
   172  	for i := blockSize - 16; i < blockSize+16; i++ {
   173  		s0 := big("abcd", i)
   174  		for j := blockSize - 16; j < blockSize+16; j++ {
   175  			s1 := big("ABCDE", j)
   176  			testLiterals(t, []string{s0, s1})
   177  			testLiterals(t, []string{s0, "", s1})
   178  			testLiterals(t, []string{s0, "x", s1})
   179  		}
   180  	}
   181  }
   182  
   183  func TestFlush(t *testing.T) {
   184  	buf := new(bytes.Buffer)
   185  	w := NewWriter(buf)
   186  	// Write a couple of records. Everything should still be held
   187  	// in the record.Writer buffer, so that buf.Len should be 0.
   188  	w0, _ := w.Next()
   189  	w0.Write([]byte("0"))
   190  	w1, _ := w.Next()
   191  	w1.Write([]byte("11"))
   192  	if got, want := buf.Len(), 0; got != want {
   193  		t.Fatalf("buffer length #0: got %d want %d", got, want)
   194  	}
   195  	// Flush the record.Writer buffer, which should yield 17 bytes.
   196  	// 17 = 2*7 + 1 + 2, which is two headers and 1 + 2 payload bytes.
   197  	require.NoError(t, w.Flush())
   198  	if got, want := buf.Len(), 17; got != want {
   199  		t.Fatalf("buffer length #1: got %d want %d", got, want)
   200  	}
   201  	// Do another write, one that isn't large enough to complete the block.
   202  	// The write should not have flowed through to buf.
   203  	w2, _ := w.Next()
   204  	w2.Write(bytes.Repeat([]byte("2"), 10000))
   205  	if got, want := buf.Len(), 17; got != want {
   206  		t.Fatalf("buffer length #2: got %d want %d", got, want)
   207  	}
   208  	// Flushing should get us up to 10024 bytes written.
   209  	// 10024 = 17 + 7 + 10000.
   210  	require.NoError(t, w.Flush())
   211  	if got, want := buf.Len(), 10024; got != want {
   212  		t.Fatalf("buffer length #3: got %d want %d", got, want)
   213  	}
   214  	// Do a bigger write, one that completes the current block.
   215  	// We should now have 32768 bytes (a complete block), without
   216  	// an explicit flush.
   217  	w3, _ := w.Next()
   218  	w3.Write(bytes.Repeat([]byte("3"), 40000))
   219  	if got, want := buf.Len(), 32768; got != want {
   220  		t.Fatalf("buffer length #4: got %d want %d", got, want)
   221  	}
   222  	// Flushing should get us up to 50038 bytes written.
   223  	// 50038 = 10024 + 2*7 + 40000. There are two headers because
   224  	// the one record was split into two chunks.
   225  	require.NoError(t, w.Flush())
   226  	if got, want := buf.Len(), 50038; got != want {
   227  		t.Fatalf("buffer length #5: got %d want %d", got, want)
   228  	}
   229  	// Check that reading those records give the right lengths.
   230  	r := NewReader(buf, 0 /* logNum */)
   231  	wants := []int64{1, 2, 10000, 40000}
   232  	for i, want := range wants {
   233  		rr, _ := r.Next()
   234  		n, err := io.Copy(ioutil.Discard, rr)
   235  		if err != nil {
   236  			t.Fatalf("read #%d: %v", i, err)
   237  		}
   238  		if n != want {
   239  			t.Fatalf("read #%d: got %d bytes want %d", i, n, want)
   240  		}
   241  	}
   242  }
   243  
   244  func TestNonExhaustiveRead(t *testing.T) {
   245  	const n = 100
   246  	buf := new(bytes.Buffer)
   247  	p := make([]byte, 10)
   248  	rnd := rand.New(rand.NewSource(1))
   249  
   250  	w := NewWriter(buf)
   251  	for i := 0; i < n; i++ {
   252  		length := len(p) + rnd.Intn(3*blockSize)
   253  		s := string(uint8(i)) + "123456789abcdefgh"
   254  		_, _ = w.WriteRecord([]byte(big(s, length)))
   255  	}
   256  	if err := w.Close(); err != nil {
   257  		t.Fatalf("Close: %v", err)
   258  	}
   259  
   260  	r := NewReader(buf, 0 /* logNum */)
   261  	for i := 0; i < n; i++ {
   262  		rr, _ := r.Next()
   263  		_, err := io.ReadFull(rr, p)
   264  		if err != nil {
   265  			t.Fatalf("ReadFull: %v", err)
   266  		}
   267  		want := string(uint8(i)) + "123456789"
   268  		if got := string(p); got != want {
   269  			t.Fatalf("read #%d: got %q want %q", i, got, want)
   270  		}
   271  	}
   272  }
   273  
   274  func TestStaleReader(t *testing.T) {
   275  	buf := new(bytes.Buffer)
   276  
   277  	w := NewWriter(buf)
   278  	_, err := w.WriteRecord([]byte("0"))
   279  	require.NoError(t, err)
   280  
   281  	_, err = w.WriteRecord([]byte("11"))
   282  	require.NoError(t, err)
   283  
   284  	require.NoError(t, w.Close())
   285  
   286  	r := NewReader(buf, 0 /* logNum */)
   287  	r0, err := r.Next()
   288  	require.NoError(t, err)
   289  
   290  	r1, err := r.Next()
   291  	require.NoError(t, err)
   292  
   293  	p := make([]byte, 1)
   294  	if _, err := r0.Read(p); err == nil || !strings.Contains(err.Error(), "stale") {
   295  		t.Fatalf("stale read #0: unexpected error: %v", err)
   296  	}
   297  	if _, err := r1.Read(p); err != nil {
   298  		t.Fatalf("fresh read #1: got %v want nil error", err)
   299  	}
   300  	if p[0] != '1' {
   301  		t.Fatalf("fresh read #1: byte contents: got '%c' want '1'", p[0])
   302  	}
   303  }
   304  
   305  type testRecords struct {
   306  	records [][]byte // The raw value of each record.
   307  	offsets []int64  // The offset of each record within buf, derived from writer.LastRecordOffset.
   308  	buf     []byte   // The serialized records form of all records.
   309  }
   310  
   311  // makeTestRecords generates test records of specified lengths.
   312  // The first record will consist of repeating 0x00 bytes, the next record of
   313  // 0x01 bytes, and so forth. The values will loop back to 0x00 after 0xff.
   314  func makeTestRecords(recordLengths ...int) (*testRecords, error) {
   315  	ret := &testRecords{}
   316  	ret.records = make([][]byte, len(recordLengths))
   317  	ret.offsets = make([]int64, len(recordLengths))
   318  	for i, n := range recordLengths {
   319  		ret.records[i] = bytes.Repeat([]byte{byte(i)}, n)
   320  	}
   321  
   322  	buf := new(bytes.Buffer)
   323  	w := NewWriter(buf)
   324  	for i, rec := range ret.records {
   325  		wRec, err := w.Next()
   326  		if err != nil {
   327  			return nil, err
   328  		}
   329  
   330  		// Alternate between one big write and many small writes.
   331  		cSize := 8
   332  		if i&1 == 0 {
   333  			cSize = len(rec)
   334  		}
   335  		for ; len(rec) > cSize; rec = rec[cSize:] {
   336  			if _, err := wRec.Write(rec[:cSize]); err != nil {
   337  				return nil, err
   338  			}
   339  		}
   340  		if _, err := wRec.Write(rec); err != nil {
   341  			return nil, err
   342  		}
   343  
   344  		ret.offsets[i], err = w.LastRecordOffset()
   345  		if err != nil {
   346  			return nil, err
   347  		}
   348  	}
   349  
   350  	if err := w.Close(); err != nil {
   351  		return nil, err
   352  	}
   353  
   354  	ret.buf = buf.Bytes()
   355  	return ret, nil
   356  }
   357  
   358  // corruptBlock corrupts the checksum of the record that starts at the
   359  // specified block offset. The number of the block offset is 0 based.
   360  func corruptBlock(buf []byte, blockNum int) {
   361  	// Ensure we always permute at least 1 byte of the checksum.
   362  	if buf[blockSize*blockNum] == 0x00 {
   363  		buf[blockSize*blockNum] = 0xff
   364  	} else {
   365  		buf[blockSize*blockNum] = 0x00
   366  	}
   367  
   368  	buf[blockSize*blockNum+1] = 0x00
   369  	buf[blockSize*blockNum+2] = 0x00
   370  	buf[blockSize*blockNum+3] = 0x00
   371  }
   372  
   373  func TestRecoverNoOp(t *testing.T) {
   374  	recs, err := makeTestRecords(
   375  		blockSize-legacyHeaderSize,
   376  		blockSize-legacyHeaderSize,
   377  		blockSize-legacyHeaderSize,
   378  	)
   379  	if err != nil {
   380  		t.Fatalf("makeTestRecords: %v", err)
   381  	}
   382  
   383  	r := NewReader(bytes.NewReader(recs.buf), 0 /* logNum */)
   384  	_, err = r.Next()
   385  	if err != nil || r.err != nil {
   386  		t.Fatalf("reader.Next: %v reader.err: %v", err, r.err)
   387  	}
   388  
   389  	seq, begin, end, n := r.seq, r.begin, r.end, r.n
   390  
   391  	// Should be a no-op since r.err == nil.
   392  	r.recover()
   393  
   394  	// r.err was nil, nothing should have changed.
   395  	if seq != r.seq || begin != r.begin || end != r.end || n != r.n {
   396  		t.Fatal("reader.Recover when no error existed, was not a no-op")
   397  	}
   398  }
   399  
   400  func TestBasicRecover(t *testing.T) {
   401  	recs, err := makeTestRecords(
   402  		blockSize-legacyHeaderSize,
   403  		blockSize-legacyHeaderSize,
   404  		blockSize-legacyHeaderSize,
   405  	)
   406  	if err != nil {
   407  		t.Fatalf("makeTestRecords: %v", err)
   408  	}
   409  
   410  	// Corrupt the checksum of the second record r1 in our file.
   411  	corruptBlock(recs.buf, 1)
   412  
   413  	underlyingReader := bytes.NewReader(recs.buf)
   414  	r := NewReader(underlyingReader, 0 /* logNum */)
   415  
   416  	// The first record r0 should be read just fine.
   417  	r0, err := r.Next()
   418  	if err != nil {
   419  		t.Fatalf("Next: %v", err)
   420  	}
   421  	r0Data, err := ioutil.ReadAll(r0)
   422  	if err != nil {
   423  		t.Fatalf("ReadAll: %v", err)
   424  	}
   425  	if !bytes.Equal(r0Data, recs.records[0]) {
   426  		t.Fatal("Unexpected output in r0's data")
   427  	}
   428  
   429  	// The next record should have a checksum mismatch.
   430  	_, err = r.Next()
   431  	if err == nil {
   432  		t.Fatal("Expected an error while reading a corrupted record")
   433  	}
   434  	if err != ErrInvalidChunk {
   435  		t.Fatalf("Unexpected error returned: %v", err)
   436  	}
   437  
   438  	// Recover from that checksum mismatch.
   439  	r.recover()
   440  	currentOffset, err := underlyingReader.Seek(0, os.SEEK_CUR)
   441  	if err != nil {
   442  		t.Fatalf("current offset: %v", err)
   443  	}
   444  	if currentOffset != blockSize*2 {
   445  		t.Fatalf("current offset: got %d, want %d", currentOffset, blockSize*2)
   446  	}
   447  
   448  	// The third record r2 should be read just fine.
   449  	r2, err := r.Next()
   450  	if err != nil {
   451  		t.Fatalf("Next: %v", err)
   452  	}
   453  	r2Data, err := ioutil.ReadAll(r2)
   454  	if err != nil {
   455  		t.Fatalf("ReadAll: %v", err)
   456  	}
   457  	if !bytes.Equal(r2Data, recs.records[2]) {
   458  		t.Fatal("Unexpected output in r2's data")
   459  	}
   460  }
   461  
   462  func TestRecoverSingleBlock(t *testing.T) {
   463  	// The first record will be blockSize * 3 bytes long. Since each block has
   464  	// a 7 byte header, the first record will roll over into 4 blocks.
   465  	recs, err := makeTestRecords(
   466  		blockSize*3,
   467  		blockSize-legacyHeaderSize,
   468  		blockSize/2,
   469  	)
   470  	if err != nil {
   471  		t.Fatalf("makeTestRecords: %v", err)
   472  	}
   473  
   474  	// Corrupt the checksum for the portion of the first record that exists in
   475  	// the 4th block.
   476  	corruptBlock(recs.buf, 3)
   477  
   478  	// The first record should fail, but only when we read deeper beyond the
   479  	// first block.
   480  	r := NewReader(bytes.NewReader(recs.buf), 0 /* logNum */)
   481  	r0, err := r.Next()
   482  	if err != nil {
   483  		t.Fatalf("Next: %v", err)
   484  	}
   485  
   486  	// Reading deeper should yield a checksum mismatch.
   487  	_, err = ioutil.ReadAll(r0)
   488  	if err == nil {
   489  		t.Fatal("Expected a checksum mismatch error, got nil")
   490  	}
   491  	if err != ErrInvalidChunk {
   492  		t.Fatalf("Unexpected error returned: %v", err)
   493  	}
   494  
   495  	// Recover from that checksum mismatch.
   496  	r.recover()
   497  
   498  	// All of the data in the second record r1 is lost because the first record
   499  	// r0 shared a partial block with it. The second record also overlapped
   500  	// into the block with the third record r2. Recovery should jump to that
   501  	// block, skipping over the end of the second record and start parsing the
   502  	// third record.
   503  	r2, err := r.Next()
   504  	if err != nil {
   505  		t.Fatalf("Next: %v", err)
   506  	}
   507  	r2Data, _ := ioutil.ReadAll(r2)
   508  	if !bytes.Equal(r2Data, recs.records[2]) {
   509  		t.Fatal("Unexpected output in r2's data")
   510  	}
   511  }
   512  
   513  func TestRecoverMultipleBlocks(t *testing.T) {
   514  	recs, err := makeTestRecords(
   515  		// The first record will consume 3 entire blocks but a fraction of the 4th.
   516  		blockSize*3,
   517  		// The second record will completely fill the remainder of the 4th block.
   518  		3*(blockSize-legacyHeaderSize)-2*blockSize-2*legacyHeaderSize,
   519  		// Consume the entirety of the 5th block.
   520  		blockSize-legacyHeaderSize,
   521  		// Consume the entirety of the 6th block.
   522  		blockSize-legacyHeaderSize,
   523  		// Consume roughly half of the 7th block.
   524  		blockSize/2,
   525  	)
   526  	if err != nil {
   527  		t.Fatalf("makeTestRecords: %v", err)
   528  	}
   529  
   530  	// Corrupt the checksum for the portion of the first record that exists in the 4th block.
   531  	corruptBlock(recs.buf, 3)
   532  
   533  	// Now corrupt the two blocks in a row that correspond to recs.records[2:4].
   534  	corruptBlock(recs.buf, 4)
   535  	corruptBlock(recs.buf, 5)
   536  
   537  	// The first record should fail, but only when we read deeper beyond the first block.
   538  	r := NewReader(bytes.NewReader(recs.buf), 0 /* logNum */)
   539  	r0, err := r.Next()
   540  	if err != nil {
   541  		t.Fatalf("Next: %v", err)
   542  	}
   543  
   544  	// Reading deeper should yield a checksum mismatch.
   545  	_, err = ioutil.ReadAll(r0)
   546  	if err == nil {
   547  		t.Fatal("Exptected a checksum mismatch error, got nil")
   548  	}
   549  	if err != ErrInvalidChunk {
   550  		t.Fatalf("Unexpected error returned: %v", err)
   551  	}
   552  
   553  	// Recover from that checksum mismatch.
   554  	r.recover()
   555  
   556  	// All of the data in the second record is lost because the first
   557  	// record shared a partial block with it. The following two records
   558  	// have corrupted checksums as well, so the call above to r.Recover
   559  	// should result in r.Next() being a reader to the 5th record.
   560  	r4, err := r.Next()
   561  	if err != nil {
   562  		t.Fatalf("Next: %v", err)
   563  	}
   564  
   565  	r4Data, _ := ioutil.ReadAll(r4)
   566  	if !bytes.Equal(r4Data, recs.records[4]) {
   567  		t.Fatal("Unexpected output in r4's data")
   568  	}
   569  }
   570  
   571  // verifyLastBlockRecover reads each record from recs expecting that the
   572  // last record will be corrupted. It will then try Recover and verify that EOF
   573  // is returned.
   574  func verifyLastBlockRecover(recs *testRecords) error {
   575  	r := NewReader(bytes.NewReader(recs.buf), 0 /* logNum */)
   576  	// Loop to one element larger than the number of records to verify EOF.
   577  	for i := 0; i < len(recs.records)+1; i++ {
   578  		_, err := r.Next()
   579  		switch i {
   580  		case len(recs.records) - 1:
   581  			if err == nil {
   582  				return errors.New("Expected a checksum mismatch error, got nil")
   583  			}
   584  			r.recover()
   585  		case len(recs.records):
   586  			if err != io.EOF {
   587  				return errors.Errorf("Expected io.EOF, got %v", err)
   588  			}
   589  		default:
   590  			if err != nil {
   591  				return errors.Errorf("Next: %v", err)
   592  			}
   593  		}
   594  	}
   595  	return nil
   596  }
   597  
   598  func TestRecoverLastPartialBlock(t *testing.T) {
   599  	recs, err := makeTestRecords(
   600  		// The first record will consume 3 entire blocks but a fraction of the 4th.
   601  		blockSize*3,
   602  		// The second record will completely fill the remainder of the 4th block.
   603  		3*(blockSize-legacyHeaderSize)-2*blockSize-2*legacyHeaderSize,
   604  		// Consume roughly half of the 5th block.
   605  		blockSize/2,
   606  	)
   607  	if err != nil {
   608  		t.Fatalf("makeTestRecords: %v", err)
   609  	}
   610  
   611  	// Corrupt the 5th block.
   612  	corruptBlock(recs.buf, 4)
   613  
   614  	// Verify Recover works when the last block is corrupted.
   615  	if err := verifyLastBlockRecover(recs); err != nil {
   616  		t.Fatalf("verifyLastBlockRecover: %v", err)
   617  	}
   618  }
   619  
   620  func TestRecoverLastCompleteBlock(t *testing.T) {
   621  	recs, err := makeTestRecords(
   622  		// The first record will consume 3 entire blocks but a fraction of the 4th.
   623  		blockSize*3,
   624  		// The second record will completely fill the remainder of the 4th block.
   625  		3*(blockSize-legacyHeaderSize)-2*blockSize-2*legacyHeaderSize,
   626  		// Consume the entire 5th block.
   627  		blockSize-legacyHeaderSize,
   628  	)
   629  	if err != nil {
   630  		t.Fatalf("makeTestRecords: %v", err)
   631  	}
   632  
   633  	// Corrupt the 5th block.
   634  	corruptBlock(recs.buf, 4)
   635  
   636  	// Verify Recover works when the last block is corrupted.
   637  	if err := verifyLastBlockRecover(recs); err != nil {
   638  		t.Fatalf("verifyLastBlockRecover: %v", err)
   639  	}
   640  }
   641  
   642  func TestReaderOffset(t *testing.T) {
   643  	recs, err := makeTestRecords(
   644  		blockSize*2,
   645  		400,
   646  		500,
   647  		600,
   648  		700,
   649  		800,
   650  		9000,
   651  		1000,
   652  	)
   653  	if err != nil {
   654  		t.Fatalf("makeTestRecords: %v", err)
   655  	}
   656  
   657  	// The first record should fail, but only when we read deeper beyond the first block.
   658  	r := NewReader(bytes.NewReader(recs.buf), 0 /* logNum */)
   659  	for i, offset := range recs.offsets {
   660  		if offset != r.Offset() {
   661  			t.Fatalf("%d: expected offset %d, but found %d", i, offset, r.Offset())
   662  		}
   663  		rec, err := r.Next()
   664  		if err != nil {
   665  			t.Fatalf("Next: %v", err)
   666  		}
   667  		if _, err = ioutil.ReadAll(rec); err != nil {
   668  			t.Fatalf("ReadAll: %v", err)
   669  		}
   670  	}
   671  }
   672  
   673  func TestSeekRecord(t *testing.T) {
   674  	recs, err := makeTestRecords(
   675  		// The first record will consume 3 entire blocks but a fraction of the 4th.
   676  		blockSize*3,
   677  		// The second record will completely fill the remainder of the 4th block.
   678  		3*(blockSize-legacyHeaderSize)-2*blockSize-2*legacyHeaderSize,
   679  		// Consume the entirety of the 5th block.
   680  		blockSize-legacyHeaderSize,
   681  		// Consume the entirety of the 6th block.
   682  		blockSize-legacyHeaderSize,
   683  		// Consume roughly half of the 7th block.
   684  		blockSize/2,
   685  	)
   686  	if err != nil {
   687  		t.Fatalf("makeTestRecords: %v", err)
   688  	}
   689  
   690  	r := NewReader(bytes.NewReader(recs.buf), 0 /* logNum */)
   691  	// Seek to a valid block offset, but within a multiblock record. This should cause the next call to
   692  	// Next after SeekRecord to return the next valid FIRST/FULL chunk of the subsequent record.
   693  	err = r.seekRecord(blockSize)
   694  	if err != nil {
   695  		t.Fatalf("SeekRecord: %v", err)
   696  	}
   697  	rec, err := r.Next()
   698  	if err != nil {
   699  		t.Fatalf("Next: %v", err)
   700  	}
   701  	rData, _ := ioutil.ReadAll(rec)
   702  	if !bytes.Equal(rData, recs.records[1]) {
   703  		t.Fatalf("Unexpected output in record 1's data, got %v want %v", rData, recs.records[1])
   704  	}
   705  
   706  	// Seek 3 bytes into the second block, which is still in the middle of the first record, but not
   707  	// at a valid chunk boundary. Should result in an error upon calling r.Next.
   708  	err = r.seekRecord(blockSize + 3)
   709  	if err != nil {
   710  		t.Fatalf("SeekRecord: %v", err)
   711  	}
   712  	if _, err = r.Next(); err == nil {
   713  		t.Fatalf("Expected an error seeking to an invalid chunk boundary")
   714  	}
   715  	r.recover()
   716  
   717  	// Seek to the fifth block and verify all records can be read as appropriate.
   718  	err = r.seekRecord(blockSize * 4)
   719  	if err != nil {
   720  		t.Fatalf("SeekRecord: %v", err)
   721  	}
   722  
   723  	check := func(i int) {
   724  		for ; i < len(recs.records); i++ {
   725  			rec, err := r.Next()
   726  			if err != nil {
   727  				t.Fatalf("Next: %v", err)
   728  			}
   729  
   730  			rData, _ := ioutil.ReadAll(rec)
   731  			if !bytes.Equal(rData, recs.records[i]) {
   732  				t.Fatalf("Unexpected output in record #%d's data, got %v want %v", i, rData, recs.records[i])
   733  			}
   734  		}
   735  	}
   736  	check(2)
   737  
   738  	// Seek back to the fourth block, and read all subsequent records and verify them.
   739  	err = r.seekRecord(blockSize * 3)
   740  	if err != nil {
   741  		t.Fatalf("SeekRecord: %v", err)
   742  	}
   743  	check(1)
   744  
   745  	// Now seek past the end of the file and verify it causes an error.
   746  	err = r.seekRecord(1 << 20)
   747  	if err == nil {
   748  		t.Fatalf("Seek past the end of a file didn't cause an error")
   749  	}
   750  	if err != io.ErrUnexpectedEOF {
   751  		t.Fatalf("Seeking past EOF raised unexpected error: %v", err)
   752  	}
   753  	r.recover() // Verify recovery works.
   754  
   755  	// Validate the current records are returned after seeking to a valid offset.
   756  	err = r.seekRecord(blockSize * 4)
   757  	if err != nil {
   758  		t.Fatalf("SeekRecord: %v", err)
   759  	}
   760  	check(2)
   761  }
   762  
   763  func TestLastRecordOffset(t *testing.T) {
   764  	recs, err := makeTestRecords(
   765  		// The first record will consume 3 entire blocks but a fraction of the 4th.
   766  		blockSize*3,
   767  		// The second record will completely fill the remainder of the 4th block.
   768  		3*(blockSize-legacyHeaderSize)-2*blockSize-2*legacyHeaderSize,
   769  		// Consume the entirety of the 5th block.
   770  		blockSize-legacyHeaderSize,
   771  		// Consume the entirety of the 6th block.
   772  		blockSize-legacyHeaderSize,
   773  		// Consume roughly half of the 7th block.
   774  		blockSize/2,
   775  	)
   776  	if err != nil {
   777  		t.Fatalf("makeTestRecords: %v", err)
   778  	}
   779  
   780  	wants := []int64{0, 98332, 131072, 163840, 196608}
   781  	for i, got := range recs.offsets {
   782  		if want := wants[i]; got != want {
   783  			t.Errorf("record #%d: got %d, want %d", i, got, want)
   784  		}
   785  	}
   786  }
   787  
   788  func TestNoLastRecordOffset(t *testing.T) {
   789  	buf := new(bytes.Buffer)
   790  	w := NewWriter(buf)
   791  	defer w.Close()
   792  
   793  	if _, err := w.LastRecordOffset(); err != ErrNoLastRecord {
   794  		t.Fatalf("Expected ErrNoLastRecord, got: %v", err)
   795  	}
   796  
   797  	require.NoError(t, w.Flush())
   798  
   799  	if _, err := w.LastRecordOffset(); err != ErrNoLastRecord {
   800  		t.Fatalf("LastRecordOffset: got: %v, want ErrNoLastRecord", err)
   801  	}
   802  
   803  	_, err := w.WriteRecord([]byte("testrecord"))
   804  	require.NoError(t, err)
   805  
   806  	if off, err := w.LastRecordOffset(); err != nil {
   807  		t.Fatalf("LastRecordOffset: %v", err)
   808  	} else if off != 0 {
   809  		t.Fatalf("LastRecordOffset: got %d, want 0", off)
   810  	}
   811  }
   812  
   813  func TestInvalidLogNum(t *testing.T) {
   814  	var buf bytes.Buffer
   815  	w := NewLogWriter(&buf, 1)
   816  	for i := 0; i < 10; i++ {
   817  		s := fmt.Sprintf("%04d\n", i)
   818  		_, err := w.WriteRecord([]byte(s))
   819  		require.NoError(t, err)
   820  	}
   821  	require.NoError(t, w.Close())
   822  
   823  	{
   824  		r := NewReader(bytes.NewReader(buf.Bytes()), 1)
   825  		for i := 0; i < 10; i++ {
   826  			rr, err := r.Next()
   827  			require.NoError(t, err)
   828  
   829  			x, err := ioutil.ReadAll(rr)
   830  			require.NoError(t, err)
   831  
   832  			s := fmt.Sprintf("%04d\n", i)
   833  			if s != string(x) {
   834  				t.Fatalf("expected %s, but found %s", s, x)
   835  			}
   836  		}
   837  		if _, err := r.Next(); err != io.EOF {
   838  			t.Fatalf("expected EOF, but found %s", err)
   839  		}
   840  	}
   841  
   842  	{
   843  		r := NewReader(bytes.NewReader(buf.Bytes()), 2)
   844  		if _, err := r.Next(); err != io.EOF {
   845  			t.Fatalf("expected %s, but found %s\n", io.EOF, err)
   846  		}
   847  	}
   848  }
   849  
   850  func TestSize(t *testing.T) {
   851  	var buf bytes.Buffer
   852  	zeroes := make([]byte, 8<<10)
   853  	w := NewWriter(&buf)
   854  	for i := 0; i < 100; i++ {
   855  		n := rand.Intn(len(zeroes))
   856  		_, err := w.WriteRecord(zeroes[:n])
   857  		require.NoError(t, err)
   858  		require.NoError(t, w.Flush())
   859  		if buf.Len() != int(w.Size()) {
   860  			t.Fatalf("expected %d, but found %d", buf.Len(), w.Size())
   861  		}
   862  	}
   863  	require.NoError(t, w.Close())
   864  }
   865  
   866  type limitedWriter struct {
   867  	io.Writer
   868  	limit int
   869  }
   870  
   871  func (w *limitedWriter) Write(p []byte) (n int, err error) {
   872  	w.limit--
   873  	if w.limit < 0 {
   874  		return len(p), nil
   875  	}
   876  	return w.Writer.Write(p)
   877  }
   878  
   879  func TestRecycleLog(t *testing.T) {
   880  	const min = 16
   881  	const max = 4096
   882  
   883  	rnd := rand.New(rand.NewSource(uint64(time.Now().UnixNano())))
   884  	randBlock := func() []byte {
   885  		data := make([]byte, rand.Intn(max-min)+min)
   886  		tmp := data
   887  		for len(tmp) >= 8 {
   888  			binary.LittleEndian.PutUint64(tmp, rand.Uint64())
   889  			tmp = tmp[8:]
   890  		}
   891  		r := rand.Uint64()
   892  		for i := 0; i < len(tmp); i++ {
   893  			tmp[i] = byte(r)
   894  			r >>= 8
   895  		}
   896  		return data
   897  	}
   898  
   899  	// Recycle a log file 100 times, writing a random number of records filled
   900  	// with random data.
   901  	backing := make([]byte, 1<<20)
   902  	for i := 1; i <= 100; i++ {
   903  		blocks := rnd.Intn(100)
   904  		limitedBuf := &limitedWriter{
   905  			Writer: bytes.NewBuffer(backing[:0]),
   906  			limit:  blocks,
   907  		}
   908  
   909  		w := NewLogWriter(limitedBuf, base.FileNum(i))
   910  		sizes := make([]int, 10+rnd.Intn(100))
   911  		for j := range sizes {
   912  			data := randBlock()
   913  			if _, err := w.WriteRecord(data); err != nil {
   914  				t.Fatalf("%d/%d: %v", i, j, err)
   915  			}
   916  			sizes[j] = len(data)
   917  		}
   918  		if err := w.Close(); err != nil {
   919  			t.Fatalf("%d: %v", i, err)
   920  		}
   921  
   922  		r := NewReader(bytes.NewReader(backing), base.FileNum(i))
   923  		for j := range sizes {
   924  			rr, err := r.Next()
   925  			if err != nil {
   926  				// If we limited output then an EOF, zeroed, or invalid chunk is expected.
   927  				if limitedBuf.limit < 0 && (err == io.EOF || err == ErrZeroedChunk || err == ErrInvalidChunk) {
   928  					break
   929  				}
   930  				t.Fatalf("%d/%d: %v", i, j, err)
   931  			}
   932  			x, err := ioutil.ReadAll(rr)
   933  			if err != nil {
   934  				// If we limited output then an EOF, zeroed, or invalid chunk is expected.
   935  				if limitedBuf.limit < 0 && (err == io.EOF || err == ErrZeroedChunk || err == ErrInvalidChunk) {
   936  					break
   937  				}
   938  				t.Fatalf("%d/%d: %v", i, j, err)
   939  			}
   940  			if sizes[j] != len(x) {
   941  				t.Fatalf("%d/%d: expected record %d, but found %d", i, j, sizes[j], len(x))
   942  			}
   943  		}
   944  		if _, err := r.Next(); err != io.EOF && err != ErrZeroedChunk && err != ErrInvalidChunk {
   945  			t.Fatalf("%d: expected EOF, but found %v", i, err)
   946  		}
   947  	}
   948  }
   949  
   950  func TestTruncatedLog(t *testing.T) {
   951  	backing := make([]byte, 2*blockSize)
   952  	w := NewLogWriter(bytes.NewBuffer(backing[:0]), base.FileNum(1))
   953  	// Write a record that spans 2 blocks.
   954  	_, err := w.WriteRecord(bytes.Repeat([]byte("s"), blockSize+100))
   955  	require.NoError(t, err)
   956  	require.NoError(t, w.Close())
   957  	// Create a reader only for the first block.
   958  	r := NewReader(bytes.NewReader(backing[:blockSize]), base.FileNum(1))
   959  	rr, err := r.Next()
   960  	require.NoError(t, err)
   961  	_, err = ioutil.ReadAll(rr)
   962  	require.EqualValues(t, err, io.ErrUnexpectedEOF)
   963  }
   964  
   965  func TestRecycleLogWithPartialBlock(t *testing.T) {
   966  	backing := make([]byte, 27)
   967  	w := NewLogWriter(bytes.NewBuffer(backing[:0]), base.FileNum(1))
   968  	// Will write a chunk with 11 byte header + 5 byte payload.
   969  	_, err := w.WriteRecord([]byte("aaaaa"))
   970  	require.NoError(t, err)
   971  	// Close will write a 11-byte EOF chunk.
   972  	require.NoError(t, w.Close())
   973  
   974  	w = NewLogWriter(bytes.NewBuffer(backing[:0]), base.FileNum(2))
   975  	// Will write a chunk with 11 byte header + 1 byte payload.
   976  	_, err = w.WriteRecord([]byte("a"))
   977  	require.NoError(t, err)
   978  	// Close will write a 11-byte EOF chunk.
   979  	require.NoError(t, w.Close())
   980  
   981  	r := NewReader(bytes.NewReader(backing), base.FileNum(2))
   982  	_, err = r.Next()
   983  	require.NoError(t, err)
   984  	// 4 bytes left, which are not enough for even the legacy header.
   985  	if _, err = r.Next(); err != io.EOF {
   986  		t.Fatalf("unexpected error: %v", err)
   987  	}
   988  }
   989  
   990  func TestRecycleLogNumberOverflow(t *testing.T) {
   991  	// We truncate log numbers to 32-bits when writing to the WAL. Test log
   992  	// recycling at the wraparound point, ensuring that EOF chunks are
   993  	// interpreted correctly.
   994  
   995  	backing := make([]byte, 27)
   996  	w := NewLogWriter(bytes.NewBuffer(backing[:0]), base.FileNum(math.MaxUint32))
   997  	// Will write a chunk with 11 byte header + 5 byte payload.
   998  	_, err := w.WriteRecord([]byte("aaaaa"))
   999  	require.NoError(t, err)
  1000  	// Close will write a 11-byte EOF chunk.
  1001  	require.NoError(t, w.Close())
  1002  
  1003  	w = NewLogWriter(bytes.NewBuffer(backing[:0]), base.FileNum(math.MaxUint32+1))
  1004  	// Will write a chunk with 11 byte header + 1 byte payload.
  1005  	_, err = w.WriteRecord([]byte("a"))
  1006  	require.NoError(t, err)
  1007  	// Close will write a 11-byte EOF chunk.
  1008  	require.NoError(t, w.Close())
  1009  
  1010  	r := NewReader(bytes.NewReader(backing), base.FileNum(math.MaxUint32+1))
  1011  	_, err = r.Next()
  1012  	require.NoError(t, err)
  1013  	// 4 bytes left, which are not enough for even the legacy header.
  1014  	if _, err = r.Next(); err != io.EOF {
  1015  		t.Fatalf("unexpected error: %v", err)
  1016  	}
  1017  }
  1018  
  1019  func TestRecycleLogWithPartialRecord(t *testing.T) {
  1020  	const recordSize = (blockSize * 3) / 2
  1021  
  1022  	// Write a record that is larger than the log block size.
  1023  	backing1 := make([]byte, 2*blockSize)
  1024  	w := NewLogWriter(bytes.NewBuffer(backing1[:0]), base.FileNum(1))
  1025  	_, err := w.WriteRecord(bytes.Repeat([]byte("a"), recordSize))
  1026  	require.NoError(t, err)
  1027  	require.NoError(t, w.Close())
  1028  
  1029  	// Write another record to a new incarnation of the WAL that is larger than
  1030  	// the block size.
  1031  	backing2 := make([]byte, 2*blockSize)
  1032  	w = NewLogWriter(bytes.NewBuffer(backing2[:0]), base.FileNum(2))
  1033  	_, err = w.WriteRecord(bytes.Repeat([]byte("b"), recordSize))
  1034  	require.NoError(t, err)
  1035  	require.NoError(t, w.Close())
  1036  
  1037  	// Copy the second block from the first WAL to the second block of the second
  1038  	// WAL. This produces a scenario where it appears we crashed after writing
  1039  	// the first block of the second WAL, but before writing the second block.
  1040  	copy(backing2[blockSize:], backing1[blockSize:])
  1041  
  1042  	// Verify that we can't read a partial record from the second WAL.
  1043  	r := NewReader(bytes.NewReader(backing2), base.FileNum(2))
  1044  	rr, err := r.Next()
  1045  	require.NoError(t, err)
  1046  
  1047  	_, err = ioutil.ReadAll(rr)
  1048  	require.Equal(t, err, ErrInvalidChunk)
  1049  }
  1050  
  1051  func BenchmarkRecordWrite(b *testing.B) {
  1052  	for _, size := range []int{8, 16, 32, 64, 256, 1028, 4096, 65_536} {
  1053  		b.Run(fmt.Sprintf("size=%d", size), func(b *testing.B) {
  1054  			w := NewLogWriter(ioutil.Discard, 0 /* logNum */)
  1055  			defer w.Close()
  1056  			buf := make([]byte, size)
  1057  
  1058  			b.SetBytes(int64(len(buf)))
  1059  			b.ResetTimer()
  1060  			for i := 0; i < b.N; i++ {
  1061  				if _, err := w.WriteRecord(buf); err != nil {
  1062  					b.Fatal(err)
  1063  				}
  1064  			}
  1065  			b.StopTimer()
  1066  		})
  1067  	}
  1068  }