github.com/ethereum/go-ethereum@v1.16.1/core/rawdb/freezer_table_test.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package rawdb
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/binary"
    22  	"fmt"
    23  	"math/rand"
    24  	"os"
    25  	"path/filepath"
    26  	"reflect"
    27  	"testing"
    28  	"testing/quick"
    29  
    30  	"github.com/davecgh/go-spew/spew"
    31  	"github.com/ethereum/go-ethereum/metrics"
    32  	"github.com/stretchr/testify/require"
    33  )
    34  
    35  // TestFreezerBasics test initializing a freezertable from scratch, writing to the table,
    36  // and reading it back.
    37  func TestFreezerBasics(t *testing.T) {
    38  	t.Parallel()
    39  	// set cutoff at 50 bytes
    40  	f, err := newTable(os.TempDir(),
    41  		fmt.Sprintf("unittest-%d", rand.Uint64()),
    42  		metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, freezerTableConfig{noSnappy: true}, false)
    43  	if err != nil {
    44  		t.Fatal(err)
    45  	}
    46  	defer f.Close()
    47  
    48  	// Write 15 bytes 255 times, results in 85 files
    49  	writeChunks(t, f, 255, 15)
    50  
    51  	//print(t, f, 0)
    52  	//print(t, f, 1)
    53  	//print(t, f, 2)
    54  	//
    55  	//db[0] =  000000000000000000000000000000
    56  	//db[1] =  010101010101010101010101010101
    57  	//db[2] =  020202020202020202020202020202
    58  
    59  	for y := 0; y < 255; y++ {
    60  		exp := getChunk(15, y)
    61  		got, err := f.Retrieve(uint64(y))
    62  		if err != nil {
    63  			t.Fatalf("reading item %d: %v", y, err)
    64  		}
    65  		if !bytes.Equal(got, exp) {
    66  			t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
    67  		}
    68  	}
    69  	// Check that we cannot read too far
    70  	_, err = f.Retrieve(uint64(255))
    71  	if err != errOutOfBounds {
    72  		t.Fatal(err)
    73  	}
    74  }
    75  
    76  // TestFreezerBasicsClosing tests same as TestFreezerBasics, but also closes and reopens the freezer between
    77  // every operation
    78  func TestFreezerBasicsClosing(t *testing.T) {
    79  	t.Parallel()
    80  	// set cutoff at 50 bytes
    81  	var (
    82  		fname      = fmt.Sprintf("basics-close-%d", rand.Uint64())
    83  		rm, wm, sg = metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
    84  		f          *freezerTable
    85  		err        error
    86  	)
    87  	f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: true}, false)
    88  	if err != nil {
    89  		t.Fatal(err)
    90  	}
    91  
    92  	// Write 15 bytes 255 times, results in 85 files.
    93  	// In-between writes, the table is closed and re-opened.
    94  	for x := 0; x < 255; x++ {
    95  		data := getChunk(15, x)
    96  		batch := f.newBatch()
    97  		require.NoError(t, batch.AppendRaw(uint64(x), data))
    98  		require.NoError(t, batch.commit())
    99  		f.Close()
   100  
   101  		f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: true}, false)
   102  		if err != nil {
   103  			t.Fatal(err)
   104  		}
   105  	}
   106  	defer f.Close()
   107  
   108  	for y := 0; y < 255; y++ {
   109  		exp := getChunk(15, y)
   110  		got, err := f.Retrieve(uint64(y))
   111  		if err != nil {
   112  			t.Fatal(err)
   113  		}
   114  		if !bytes.Equal(got, exp) {
   115  			t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
   116  		}
   117  		f.Close()
   118  		f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: true}, false)
   119  		if err != nil {
   120  			t.Fatal(err)
   121  		}
   122  	}
   123  }
   124  
   125  // TestFreezerRepairDanglingHead tests that we can recover if index entries are removed
   126  func TestFreezerRepairDanglingHead(t *testing.T) {
   127  	t.Parallel()
   128  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   129  	fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
   130  
   131  	// Fill table
   132  	{
   133  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: true}, false)
   134  		if err != nil {
   135  			t.Fatal(err)
   136  		}
   137  		// Write 15 bytes 255 times
   138  		writeChunks(t, f, 255, 15)
   139  
   140  		// The last item should be there
   141  		if _, err = f.Retrieve(0xfe); err != nil {
   142  			t.Fatal(err)
   143  		}
   144  		f.Close()
   145  	}
   146  
   147  	// open the index
   148  	idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644)
   149  	if err != nil {
   150  		t.Fatalf("Failed to open index file: %v", err)
   151  	}
   152  	// Remove 4 bytes
   153  	stat, err := idxFile.Stat()
   154  	if err != nil {
   155  		t.Fatalf("Failed to stat index file: %v", err)
   156  	}
   157  	idxFile.Truncate(stat.Size() - 4)
   158  	idxFile.Close()
   159  
   160  	// Now open it again
   161  	{
   162  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: true}, false)
   163  		if err != nil {
   164  			t.Fatal(err)
   165  		}
   166  		// The last item should be missing
   167  		if _, err = f.Retrieve(0xff); err == nil {
   168  			t.Errorf("Expected error for missing index entry")
   169  		}
   170  		// The one before should still be there
   171  		if _, err = f.Retrieve(0xfd); err != nil {
   172  			t.Fatalf("Expected no error, got %v", err)
   173  		}
   174  	}
   175  }
   176  
   177  // TestFreezerRepairDanglingHeadLarge tests that we can recover if very many index entries are removed
   178  func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
   179  	t.Parallel()
   180  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   181  	fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
   182  
   183  	// Fill a table and close it
   184  	{
   185  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: true}, false)
   186  		if err != nil {
   187  			t.Fatal(err)
   188  		}
   189  		// Write 15 bytes 255 times
   190  		writeChunks(t, f, 255, 15)
   191  
   192  		// The last item should be there
   193  		if _, err = f.Retrieve(f.items.Load() - 1); err != nil {
   194  			t.Fatal(err)
   195  		}
   196  		f.Close()
   197  	}
   198  
   199  	// open the index
   200  	idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644)
   201  	if err != nil {
   202  		t.Fatalf("Failed to open index file: %v", err)
   203  	}
   204  	// Remove everything but the first item, and leave data unaligned
   205  	// 0-indexEntry, 1-indexEntry, corrupt-indexEntry
   206  	idxFile.Truncate(2*indexEntrySize + indexEntrySize/2)
   207  	idxFile.Close()
   208  
   209  	// Now open it again
   210  	{
   211  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: true}, false)
   212  		if err != nil {
   213  			t.Fatal(err)
   214  		}
   215  		// The first item should be there
   216  		if _, err = f.Retrieve(0); err != nil {
   217  			t.Fatal(err)
   218  		}
   219  		// The second item should be missing
   220  		if _, err = f.Retrieve(1); err == nil {
   221  			t.Errorf("Expected error for missing index entry")
   222  		}
   223  		// We should now be able to store items again, from item = 1
   224  		batch := f.newBatch()
   225  		for x := 1; x < 0xff; x++ {
   226  			require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x)))
   227  		}
   228  		require.NoError(t, batch.commit())
   229  		f.Close()
   230  	}
   231  
   232  	// And if we open it, we should now be able to read all of them (new values)
   233  	{
   234  		f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: true}, false)
   235  		for y := 1; y < 255; y++ {
   236  			exp := getChunk(15, ^y)
   237  			got, err := f.Retrieve(uint64(y))
   238  			if err != nil {
   239  				t.Fatal(err)
   240  			}
   241  			if !bytes.Equal(got, exp) {
   242  				t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
   243  			}
   244  		}
   245  	}
   246  }
   247  
   248  // TestSnappyDetection tests that we fail to open a snappy database and vice versa
   249  func TestSnappyDetection(t *testing.T) {
   250  	t.Parallel()
   251  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   252  	fname := fmt.Sprintf("snappytest-%d", rand.Uint64())
   253  
   254  	// Open with snappy
   255  	{
   256  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: true}, false)
   257  		if err != nil {
   258  			t.Fatal(err)
   259  		}
   260  		// Write 15 bytes 255 times
   261  		writeChunks(t, f, 255, 15)
   262  		f.Close()
   263  	}
   264  
   265  	// Open with snappy
   266  	{
   267  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: true}, false)
   268  		if err != nil {
   269  			t.Fatal(err)
   270  		}
   271  		// There should be 255 items
   272  		if _, err = f.Retrieve(0xfe); err != nil {
   273  			f.Close()
   274  			t.Fatalf("expected no error, got %v", err)
   275  		}
   276  	}
   277  
   278  	// Open without snappy
   279  	{
   280  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: false}, false)
   281  		if err != nil {
   282  			t.Fatal(err)
   283  		}
   284  		if _, err = f.Retrieve(0); err == nil {
   285  			f.Close()
   286  			t.Fatalf("expected empty table")
   287  		}
   288  	}
   289  }
   290  
   291  func assertFileSize(f string, size int64) error {
   292  	stat, err := os.Stat(f)
   293  	if err != nil {
   294  		return err
   295  	}
   296  	if stat.Size() != size {
   297  		return fmt.Errorf("error, expected size %d, got %d", size, stat.Size())
   298  	}
   299  	return nil
   300  }
   301  
   302  // TestFreezerRepairDanglingIndex checks that if the index has more entries than there are data,
   303  // the index is repaired
   304  func TestFreezerRepairDanglingIndex(t *testing.T) {
   305  	t.Parallel()
   306  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   307  	fname := fmt.Sprintf("dangling_indextest-%d", rand.Uint64())
   308  
   309  	// Fill a table and close it
   310  	{
   311  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: true}, false)
   312  		if err != nil {
   313  			t.Fatal(err)
   314  		}
   315  		// Write 15 bytes 9 times : 150 bytes
   316  		writeChunks(t, f, 9, 15)
   317  
   318  		// The last item should be there
   319  		if _, err = f.Retrieve(f.items.Load() - 1); err != nil {
   320  			f.Close()
   321  			t.Fatal(err)
   322  		}
   323  		f.Close()
   324  		// File sizes should be 45, 45, 45 : items[3, 3, 3)
   325  	}
   326  
   327  	// Crop third file
   328  	fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0002.rdat", fname))
   329  	// Truncate third file: 45 ,45, 20
   330  	{
   331  		if err := assertFileSize(fileToCrop, 45); err != nil {
   332  			t.Fatal(err)
   333  		}
   334  		file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644)
   335  		if err != nil {
   336  			t.Fatal(err)
   337  		}
   338  		file.Truncate(20)
   339  		file.Close()
   340  	}
   341  
   342  	// Open db it again
   343  	// It should restore the file(s) to
   344  	// 45, 45, 15
   345  	// with 3+3+1 items
   346  	{
   347  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: true}, false)
   348  		if err != nil {
   349  			t.Fatal(err)
   350  		}
   351  		defer f.Close()
   352  		if f.items.Load() != 7 {
   353  			t.Fatalf("expected %d items, got %d", 7, f.items.Load())
   354  		}
   355  		if err := assertFileSize(fileToCrop, 15); err != nil {
   356  			t.Fatal(err)
   357  		}
   358  	}
   359  }
   360  
   361  func TestFreezerTruncate(t *testing.T) {
   362  	t.Parallel()
   363  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   364  	fname := fmt.Sprintf("truncation-%d", rand.Uint64())
   365  
   366  	// Fill table
   367  	{
   368  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: true}, false)
   369  		if err != nil {
   370  			t.Fatal(err)
   371  		}
   372  		// Write 15 bytes 30 times
   373  		writeChunks(t, f, 30, 15)
   374  
   375  		// The last item should be there
   376  		if _, err = f.Retrieve(f.items.Load() - 1); err != nil {
   377  			t.Fatal(err)
   378  		}
   379  		f.Close()
   380  	}
   381  
   382  	// Reopen, truncate
   383  	{
   384  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: true}, false)
   385  		if err != nil {
   386  			t.Fatal(err)
   387  		}
   388  		defer f.Close()
   389  		f.truncateHead(10) // 150 bytes
   390  		if f.items.Load() != 10 {
   391  			t.Fatalf("expected %d items, got %d", 10, f.items.Load())
   392  		}
   393  		// 45, 45, 45, 15 -- bytes should be 15
   394  		if f.headBytes != 15 {
   395  			t.Fatalf("expected %d bytes, got %d", 15, f.headBytes)
   396  		}
   397  	}
   398  }
   399  
   400  // TestFreezerRepairFirstFile tests a head file with the very first item only half-written.
   401  // That will rewind the index, and _should_ truncate the head file
   402  func TestFreezerRepairFirstFile(t *testing.T) {
   403  	t.Parallel()
   404  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   405  	fname := fmt.Sprintf("truncationfirst-%d", rand.Uint64())
   406  
   407  	// Fill table
   408  	{
   409  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: true}, false)
   410  		if err != nil {
   411  			t.Fatal(err)
   412  		}
   413  		// Write 80 bytes, splitting out into two files
   414  		batch := f.newBatch()
   415  		require.NoError(t, batch.AppendRaw(0, getChunk(40, 0xFF)))
   416  		require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xEE)))
   417  		require.NoError(t, batch.commit())
   418  
   419  		// The last item should be there
   420  		if _, err = f.Retrieve(1); err != nil {
   421  			t.Fatal(err)
   422  		}
   423  		f.Close()
   424  	}
   425  
   426  	// Truncate the file in half
   427  	fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0001.rdat", fname))
   428  	{
   429  		if err := assertFileSize(fileToCrop, 40); err != nil {
   430  			t.Fatal(err)
   431  		}
   432  		file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644)
   433  		if err != nil {
   434  			t.Fatal(err)
   435  		}
   436  		file.Truncate(20)
   437  		file.Close()
   438  	}
   439  
   440  	// Reopen
   441  	{
   442  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: true}, false)
   443  		if err != nil {
   444  			t.Fatal(err)
   445  		}
   446  		if f.items.Load() != 1 {
   447  			f.Close()
   448  			t.Fatalf("expected %d items, got %d", 0, f.items.Load())
   449  		}
   450  
   451  		// Write 40 bytes
   452  		batch := f.newBatch()
   453  		require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xDD)))
   454  		require.NoError(t, batch.commit())
   455  
   456  		f.Close()
   457  
   458  		// Should have been truncated down to zero and then 40 written
   459  		if err := assertFileSize(fileToCrop, 40); err != nil {
   460  			t.Fatal(err)
   461  		}
   462  	}
   463  }
   464  
   465  // TestFreezerReadAndTruncate tests:
   466  // - we have a table open
   467  // - do some reads, so files are open in readonly
   468  // - truncate so those files are 'removed'
   469  // - check that we did not keep the rdonly file descriptors
   470  func TestFreezerReadAndTruncate(t *testing.T) {
   471  	t.Parallel()
   472  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   473  	fname := fmt.Sprintf("read_truncate-%d", rand.Uint64())
   474  
   475  	// Fill table
   476  	{
   477  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: true}, false)
   478  		if err != nil {
   479  			t.Fatal(err)
   480  		}
   481  		// Write 15 bytes 30 times
   482  		writeChunks(t, f, 30, 15)
   483  
   484  		// The last item should be there
   485  		if _, err = f.Retrieve(f.items.Load() - 1); err != nil {
   486  			t.Fatal(err)
   487  		}
   488  		f.Close()
   489  	}
   490  
   491  	// Reopen and read all files
   492  	{
   493  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: true}, false)
   494  		if err != nil {
   495  			t.Fatal(err)
   496  		}
   497  		if f.items.Load() != 30 {
   498  			f.Close()
   499  			t.Fatalf("expected %d items, got %d", 0, f.items.Load())
   500  		}
   501  		for y := byte(0); y < 30; y++ {
   502  			f.Retrieve(uint64(y))
   503  		}
   504  
   505  		// Now, truncate back to zero
   506  		f.truncateHead(0)
   507  
   508  		// Write the data again
   509  		batch := f.newBatch()
   510  		for x := 0; x < 30; x++ {
   511  			require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x)))
   512  		}
   513  		require.NoError(t, batch.commit())
   514  		f.Close()
   515  	}
   516  }
   517  
   518  func TestFreezerOffset(t *testing.T) {
   519  	t.Parallel()
   520  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   521  	fname := fmt.Sprintf("offset-%d", rand.Uint64())
   522  
   523  	// Fill table
   524  	f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, freezerTableConfig{noSnappy: true}, false)
   525  	if err != nil {
   526  		t.Fatal(err)
   527  	}
   528  
   529  	// Write 6 x 20 bytes, splitting out into three files
   530  	batch := f.newBatch()
   531  	require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
   532  	require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
   533  
   534  	require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
   535  	require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
   536  
   537  	require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
   538  	require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
   539  	require.NoError(t, batch.commit())
   540  
   541  	t.Log(f.dumpIndexString(0, 100))
   542  
   543  	// Now crop it.
   544  	f.truncateTail(4)
   545  	f.Close()
   546  
   547  	// Now open again
   548  	f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, freezerTableConfig{noSnappy: true}, false)
   549  	if err != nil {
   550  		t.Fatal(err)
   551  	}
   552  	t.Log(f.dumpIndexString(0, 100))
   553  
   554  	// It should allow writing item 6.
   555  	batch = f.newBatch()
   556  	require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x99)))
   557  	require.NoError(t, batch.commit())
   558  
   559  	checkRetrieveError(t, f, map[uint64]error{
   560  		0: errOutOfBounds,
   561  		1: errOutOfBounds,
   562  		2: errOutOfBounds,
   563  		3: errOutOfBounds,
   564  	})
   565  	checkRetrieve(t, f, map[uint64][]byte{
   566  		4: getChunk(20, 0xbb),
   567  		5: getChunk(20, 0xaa),
   568  		6: getChunk(20, 0x99),
   569  	})
   570  	f.Close()
   571  
   572  	// Edit the index again, with a much larger initial offset of 1M.
   573  	{
   574  		// Read the index file
   575  		p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname))
   576  		indexFile, err := os.OpenFile(p, os.O_RDWR, 0644)
   577  		if err != nil {
   578  			t.Fatal(err)
   579  		}
   580  		indexBuf := make([]byte, 3*indexEntrySize)
   581  		indexFile.Read(indexBuf)
   582  
   583  		// Update the index file, so that we store
   584  		// [ file = 2, offset = 1M ] at index zero
   585  
   586  		zeroIndex := indexEntry{
   587  			offset:  uint32(1000000), // We have removed 1M items
   588  			filenum: uint32(2),       // First file is 2
   589  		}
   590  		buf := zeroIndex.append(nil)
   591  
   592  		// Overwrite index zero
   593  		copy(indexBuf, buf)
   594  		indexFile.WriteAt(indexBuf, 0)
   595  		indexFile.Close()
   596  	}
   597  
   598  	// Check that existing items have been moved to index 1M.
   599  	{
   600  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, freezerTableConfig{noSnappy: true}, false)
   601  		if err != nil {
   602  			t.Fatal(err)
   603  		}
   604  		defer f.Close()
   605  		t.Log(f.dumpIndexString(0, 100))
   606  
   607  		checkRetrieveError(t, f, map[uint64]error{
   608  			0:      errOutOfBounds,
   609  			1:      errOutOfBounds,
   610  			2:      errOutOfBounds,
   611  			3:      errOutOfBounds,
   612  			999999: errOutOfBounds,
   613  		})
   614  		checkRetrieve(t, f, map[uint64][]byte{
   615  			1000000: getChunk(20, 0xbb),
   616  			1000001: getChunk(20, 0xaa),
   617  		})
   618  	}
   619  }
   620  
   621  func assertTableSize(t *testing.T, f *freezerTable, size int) {
   622  	t.Helper()
   623  	if got, err := f.size(); got != uint64(size) {
   624  		t.Fatalf("expected size of %d bytes, got %d, err: %v", size, got, err)
   625  	}
   626  }
   627  
   628  func TestTruncateTail(t *testing.T) {
   629  	t.Parallel()
   630  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   631  	fname := fmt.Sprintf("truncate-tail-%d", rand.Uint64())
   632  
   633  	// Fill table
   634  	f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, freezerTableConfig{noSnappy: true}, false)
   635  	if err != nil {
   636  		t.Fatal(err)
   637  	}
   638  
   639  	// Write 7 x 20 bytes, splitting out into four files
   640  	batch := f.newBatch()
   641  	require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
   642  	require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
   643  	require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
   644  	require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
   645  	require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
   646  	require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
   647  	require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
   648  	require.NoError(t, batch.commit())
   649  
   650  	// nothing to do, all the items should still be there.
   651  	f.truncateTail(0)
   652  	fmt.Println(f.dumpIndexString(0, 1000))
   653  	checkRetrieve(t, f, map[uint64][]byte{
   654  		0: getChunk(20, 0xFF),
   655  		1: getChunk(20, 0xEE),
   656  		2: getChunk(20, 0xdd),
   657  		3: getChunk(20, 0xcc),
   658  		4: getChunk(20, 0xbb),
   659  		5: getChunk(20, 0xaa),
   660  		6: getChunk(20, 0x11),
   661  	})
   662  	// maxFileSize*fileCount + headBytes + indexFileSize - hiddenBytes
   663  	expected := 20*7 + 48 - 0
   664  	assertTableSize(t, f, expected)
   665  
   666  	// truncate single element( item 0 ), deletion is only supported at file level
   667  	f.truncateTail(1)
   668  	fmt.Println(f.dumpIndexString(0, 1000))
   669  	checkRetrieveError(t, f, map[uint64]error{
   670  		0: errOutOfBounds,
   671  	})
   672  	checkRetrieve(t, f, map[uint64][]byte{
   673  		1: getChunk(20, 0xEE),
   674  		2: getChunk(20, 0xdd),
   675  		3: getChunk(20, 0xcc),
   676  		4: getChunk(20, 0xbb),
   677  		5: getChunk(20, 0xaa),
   678  		6: getChunk(20, 0x11),
   679  	})
   680  	expected = 20*7 + 48 - 20
   681  	assertTableSize(t, f, expected)
   682  
   683  	// Reopen the table, the deletion information should be persisted as well
   684  	f.Close()
   685  	f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, freezerTableConfig{noSnappy: true}, false)
   686  	if err != nil {
   687  		t.Fatal(err)
   688  	}
   689  	checkRetrieveError(t, f, map[uint64]error{
   690  		0: errOutOfBounds,
   691  	})
   692  	checkRetrieve(t, f, map[uint64][]byte{
   693  		1: getChunk(20, 0xEE),
   694  		2: getChunk(20, 0xdd),
   695  		3: getChunk(20, 0xcc),
   696  		4: getChunk(20, 0xbb),
   697  		5: getChunk(20, 0xaa),
   698  		6: getChunk(20, 0x11),
   699  	})
   700  
   701  	// truncate two elements( item 0, item 1 ), the file 0 should be deleted
   702  	f.truncateTail(2)
   703  	checkRetrieveError(t, f, map[uint64]error{
   704  		0: errOutOfBounds,
   705  		1: errOutOfBounds,
   706  	})
   707  	checkRetrieve(t, f, map[uint64][]byte{
   708  		2: getChunk(20, 0xdd),
   709  		3: getChunk(20, 0xcc),
   710  		4: getChunk(20, 0xbb),
   711  		5: getChunk(20, 0xaa),
   712  		6: getChunk(20, 0x11),
   713  	})
   714  	expected = 20*5 + 36 - 0
   715  	assertTableSize(t, f, expected)
   716  
   717  	// Reopen the table, the above testing should still pass
   718  	f.Close()
   719  	f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, freezerTableConfig{noSnappy: true}, false)
   720  	if err != nil {
   721  		t.Fatal(err)
   722  	}
   723  	defer f.Close()
   724  
   725  	checkRetrieveError(t, f, map[uint64]error{
   726  		0: errOutOfBounds,
   727  		1: errOutOfBounds,
   728  	})
   729  	checkRetrieve(t, f, map[uint64][]byte{
   730  		2: getChunk(20, 0xdd),
   731  		3: getChunk(20, 0xcc),
   732  		4: getChunk(20, 0xbb),
   733  		5: getChunk(20, 0xaa),
   734  		6: getChunk(20, 0x11),
   735  	})
   736  
   737  	// truncate 3 more elements( item 2, 3, 4), the file 1 should be deleted
   738  	// file 2 should only contain item 5
   739  	f.truncateTail(5)
   740  	checkRetrieveError(t, f, map[uint64]error{
   741  		0: errOutOfBounds,
   742  		1: errOutOfBounds,
   743  		2: errOutOfBounds,
   744  		3: errOutOfBounds,
   745  		4: errOutOfBounds,
   746  	})
   747  	checkRetrieve(t, f, map[uint64][]byte{
   748  		5: getChunk(20, 0xaa),
   749  		6: getChunk(20, 0x11),
   750  	})
   751  	expected = 20*3 + 24 - 20
   752  	assertTableSize(t, f, expected)
   753  
   754  	// truncate all, the entire freezer should be deleted
   755  	f.truncateTail(7)
   756  	checkRetrieveError(t, f, map[uint64]error{
   757  		0: errOutOfBounds,
   758  		1: errOutOfBounds,
   759  		2: errOutOfBounds,
   760  		3: errOutOfBounds,
   761  		4: errOutOfBounds,
   762  		5: errOutOfBounds,
   763  		6: errOutOfBounds,
   764  	})
   765  	expected = 12
   766  	assertTableSize(t, f, expected)
   767  }
   768  
   769  func TestTruncateHead(t *testing.T) {
   770  	t.Parallel()
   771  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   772  	fname := fmt.Sprintf("truncate-head-blow-tail-%d", rand.Uint64())
   773  
   774  	// Fill table
   775  	f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, freezerTableConfig{noSnappy: true}, false)
   776  	if err != nil {
   777  		t.Fatal(err)
   778  	}
   779  
   780  	// Write 7 x 20 bytes, splitting out into four files
   781  	batch := f.newBatch()
   782  	require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
   783  	require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
   784  	require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
   785  	require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
   786  	require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
   787  	require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
   788  	require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
   789  	require.NoError(t, batch.commit())
   790  
   791  	f.truncateTail(4) // Tail = 4
   792  
   793  	// NewHead is required to be 3, the entire table should be truncated
   794  	f.truncateHead(4)
   795  	checkRetrieveError(t, f, map[uint64]error{
   796  		0: errOutOfBounds, // Deleted by tail
   797  		1: errOutOfBounds, // Deleted by tail
   798  		2: errOutOfBounds, // Deleted by tail
   799  		3: errOutOfBounds, // Deleted by tail
   800  		4: errOutOfBounds, // Deleted by Head
   801  		5: errOutOfBounds, // Deleted by Head
   802  		6: errOutOfBounds, // Deleted by Head
   803  	})
   804  
   805  	// Append new items
   806  	batch = f.newBatch()
   807  	require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
   808  	require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
   809  	require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
   810  	require.NoError(t, batch.commit())
   811  
   812  	checkRetrieve(t, f, map[uint64][]byte{
   813  		4: getChunk(20, 0xbb),
   814  		5: getChunk(20, 0xaa),
   815  		6: getChunk(20, 0x11),
   816  	})
   817  }
   818  
   819  func checkRetrieve(t *testing.T, f *freezerTable, items map[uint64][]byte) {
   820  	t.Helper()
   821  
   822  	for item, wantBytes := range items {
   823  		value, err := f.Retrieve(item)
   824  		if err != nil {
   825  			t.Fatalf("can't get expected item %d: %v", item, err)
   826  		}
   827  		if !bytes.Equal(value, wantBytes) {
   828  			t.Fatalf("item %d has wrong value %x (want %x)", item, value, wantBytes)
   829  		}
   830  	}
   831  }
   832  
   833  func checkRetrieveError(t *testing.T, f *freezerTable, items map[uint64]error) {
   834  	t.Helper()
   835  
   836  	for item, wantError := range items {
   837  		value, err := f.Retrieve(item)
   838  		if err == nil {
   839  			t.Fatalf("unexpected value %x for item %d, want error %v", item, value, wantError)
   840  		}
   841  		if err != wantError {
   842  			t.Fatalf("wrong error for item %d: %v", item, err)
   843  		}
   844  	}
   845  }
   846  
   847  // Gets a chunk of data, filled with 'b'
   848  func getChunk(size int, b int) []byte {
   849  	data := make([]byte, size)
   850  	for i := range data {
   851  		data[i] = byte(b)
   852  	}
   853  	return data
   854  }
   855  
   856  // TODO (?)
   857  // - test that if we remove several head-files, as well as data last data-file,
   858  //   the index is truncated accordingly
   859  // Right now, the freezer would fail on these conditions:
   860  // 1. have data files d0, d1, d2, d3
   861  // 2. remove d2,d3
   862  //
   863  // However, all 'normal' failure modes arising due to failing to sync() or save a file
   864  // should be handled already, and the case described above can only (?) happen if an
   865  // external process/user deletes files from the filesystem.
   866  
   867  func writeChunks(t *testing.T, ft *freezerTable, n int, length int) {
   868  	t.Helper()
   869  
   870  	batch := ft.newBatch()
   871  	for i := 0; i < n; i++ {
   872  		if err := batch.AppendRaw(uint64(i), getChunk(length, i)); err != nil {
   873  			t.Fatalf("AppendRaw(%d, ...) returned error: %v", i, err)
   874  		}
   875  	}
   876  	if err := batch.commit(); err != nil {
   877  		t.Fatalf("Commit returned error: %v", err)
   878  	}
   879  }
   880  
   881  // TestSequentialRead does some basic tests on the RetrieveItems.
   882  func TestSequentialRead(t *testing.T) {
   883  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   884  	fname := fmt.Sprintf("batchread-%d", rand.Uint64())
   885  	{ // Fill table
   886  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: true}, false)
   887  		if err != nil {
   888  			t.Fatal(err)
   889  		}
   890  		// Write 15 bytes 30 times
   891  		writeChunks(t, f, 30, 15)
   892  		f.dumpIndexStdout(0, 30)
   893  		f.Close()
   894  	}
   895  	{ // Open it, iterate, verify iteration
   896  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, freezerTableConfig{noSnappy: true}, false)
   897  		if err != nil {
   898  			t.Fatal(err)
   899  		}
   900  		items, err := f.RetrieveItems(0, 10000, 100000)
   901  		if err != nil {
   902  			t.Fatal(err)
   903  		}
   904  		if have, want := len(items), 30; have != want {
   905  			t.Fatalf("want %d items, have %d ", want, have)
   906  		}
   907  		for i, have := range items {
   908  			want := getChunk(15, i)
   909  			if !bytes.Equal(want, have) {
   910  				t.Fatalf("data corruption: have\n%x\n, want \n%x\n", have, want)
   911  			}
   912  		}
   913  		f.Close()
   914  	}
   915  	{ // Open it, iterate, verify byte limit. The byte limit is less than item
   916  		// size, so each lookup should only return one item
   917  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, freezerTableConfig{noSnappy: true}, false)
   918  		if err != nil {
   919  			t.Fatal(err)
   920  		}
   921  		items, err := f.RetrieveItems(0, 10000, 10)
   922  		if err != nil {
   923  			t.Fatal(err)
   924  		}
   925  		if have, want := len(items), 1; have != want {
   926  			t.Fatalf("want %d items, have %d ", want, have)
   927  		}
   928  		for i, have := range items {
   929  			want := getChunk(15, i)
   930  			if !bytes.Equal(want, have) {
   931  				t.Fatalf("data corruption: have\n%x\n, want \n%x\n", have, want)
   932  			}
   933  		}
   934  		f.Close()
   935  	}
   936  }
   937  
   938  // TestSequentialReadByteLimit does some more advanced tests on batch reads.
   939  // These tests check that when the byte limit hits, we correctly abort in time,
   940  // but also properly do all the deferred reads for the previous data, regardless
   941  // of whether the data crosses a file boundary or not.
   942  func TestSequentialReadByteLimit(t *testing.T) {
   943  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   944  	fname := fmt.Sprintf("batchread-2-%d", rand.Uint64())
   945  	{ // Fill table
   946  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, freezerTableConfig{noSnappy: true}, false)
   947  		if err != nil {
   948  			t.Fatal(err)
   949  		}
   950  		// Write 10 bytes 30 times,
   951  		// Splitting it at every 100 bytes (10 items)
   952  		writeChunks(t, f, 30, 10)
   953  		f.Close()
   954  	}
   955  	for i, tc := range []struct {
   956  		items uint64
   957  		limit uint64
   958  		want  int
   959  	}{
   960  		{9, 89, 8},
   961  		{10, 99, 9},
   962  		{11, 109, 10},
   963  		{100, 89, 8},
   964  		{100, 99, 9},
   965  		{100, 109, 10},
   966  	} {
   967  		{
   968  			f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, freezerTableConfig{noSnappy: true}, false)
   969  			if err != nil {
   970  				t.Fatal(err)
   971  			}
   972  			items, err := f.RetrieveItems(0, tc.items, tc.limit)
   973  			if err != nil {
   974  				t.Fatal(err)
   975  			}
   976  			if have, want := len(items), tc.want; have != want {
   977  				t.Fatalf("test %d: want %d items, have %d ", i, want, have)
   978  			}
   979  			for ii, have := range items {
   980  				want := getChunk(10, ii)
   981  				if !bytes.Equal(want, have) {
   982  					t.Fatalf("test %d: data corruption item %d: have\n%x\n, want \n%x\n", i, ii, have, want)
   983  				}
   984  			}
   985  			f.Close()
   986  		}
   987  	}
   988  }
   989  
   990  // TestSequentialReadNoByteLimit tests the batch-read if maxBytes is not specified.
   991  // Freezer should return the requested items regardless the size limitation.
   992  func TestSequentialReadNoByteLimit(t *testing.T) {
   993  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   994  	fname := fmt.Sprintf("batchread-3-%d", rand.Uint64())
   995  	{ // Fill table
   996  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, freezerTableConfig{noSnappy: true}, false)
   997  		if err != nil {
   998  			t.Fatal(err)
   999  		}
  1000  		// Write 10 bytes 30 times,
  1001  		// Splitting it at every 100 bytes (10 items)
  1002  		writeChunks(t, f, 30, 10)
  1003  		f.Close()
  1004  	}
  1005  	for i, tc := range []struct {
  1006  		items uint64
  1007  		want  int
  1008  	}{
  1009  		{1, 1},
  1010  		{30, 30},
  1011  		{31, 30},
  1012  	} {
  1013  		{
  1014  			f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, freezerTableConfig{noSnappy: true}, false)
  1015  			if err != nil {
  1016  				t.Fatal(err)
  1017  			}
  1018  			items, err := f.RetrieveItems(0, tc.items, 0)
  1019  			if err != nil {
  1020  				t.Fatal(err)
  1021  			}
  1022  			if have, want := len(items), tc.want; have != want {
  1023  				t.Fatalf("test %d: want %d items, have %d ", i, want, have)
  1024  			}
  1025  			for ii, have := range items {
  1026  				want := getChunk(10, ii)
  1027  				if !bytes.Equal(want, have) {
  1028  					t.Fatalf("test %d: data corruption item %d: have\n%x\n, want \n%x\n", i, ii, have, want)
  1029  				}
  1030  			}
  1031  			f.Close()
  1032  		}
  1033  	}
  1034  }
  1035  
  1036  func TestFreezerReadonly(t *testing.T) {
  1037  	tmpdir := os.TempDir()
  1038  	// Case 1: Check it fails on non-existent file.
  1039  	_, err := newTable(tmpdir,
  1040  		fmt.Sprintf("readonlytest-%d", rand.Uint64()),
  1041  		metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, freezerTableConfig{noSnappy: true}, true)
  1042  	if err == nil {
  1043  		t.Fatal("readonly table instantiation should fail for non-existent table")
  1044  	}
  1045  
  1046  	// Case 2: Check that it fails on invalid index length.
  1047  	fname := fmt.Sprintf("readonlytest-%d", rand.Uint64())
  1048  	idxFile, err := openFreezerFileForAppend(filepath.Join(tmpdir, fmt.Sprintf("%s.ridx", fname)))
  1049  	if err != nil {
  1050  		t.Errorf("Failed to open index file: %v\n", err)
  1051  	}
  1052  	// size should not be a multiple of indexEntrySize.
  1053  	idxFile.Write(make([]byte, 17))
  1054  	idxFile.Close()
  1055  	_, err = newTable(tmpdir, fname,
  1056  		metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, freezerTableConfig{noSnappy: true}, true)
  1057  	if err == nil {
  1058  		t.Errorf("readonly table instantiation should fail for invalid index size")
  1059  	}
  1060  
  1061  	// Case 3: Open table non-readonly table to write some data.
  1062  	// Then corrupt the head file and make sure opening the table
  1063  	// again in readonly triggers an error.
  1064  	fname = fmt.Sprintf("readonlytest-%d", rand.Uint64())
  1065  	f, err := newTable(tmpdir, fname,
  1066  		metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, freezerTableConfig{noSnappy: true}, false)
  1067  	if err != nil {
  1068  		t.Fatalf("failed to instantiate table: %v", err)
  1069  	}
  1070  	writeChunks(t, f, 8, 32)
  1071  	// Corrupt table file
  1072  	if _, err := f.head.Write([]byte{1, 1}); err != nil {
  1073  		t.Fatal(err)
  1074  	}
  1075  	if err := f.Close(); err != nil {
  1076  		t.Fatal(err)
  1077  	}
  1078  	_, err = newTable(tmpdir, fname,
  1079  		metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, freezerTableConfig{noSnappy: true}, true)
  1080  	if err == nil {
  1081  		t.Errorf("readonly table instantiation should fail for corrupt table file")
  1082  	}
  1083  
  1084  	// Case 4: Write some data to a table and later re-open it as readonly.
  1085  	// Should be successful.
  1086  	fname = fmt.Sprintf("readonlytest-%d", rand.Uint64())
  1087  	f, err = newTable(tmpdir, fname,
  1088  		metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, freezerTableConfig{noSnappy: true}, false)
  1089  	if err != nil {
  1090  		t.Fatalf("failed to instantiate table: %v\n", err)
  1091  	}
  1092  	writeChunks(t, f, 32, 128)
  1093  	if err := f.Close(); err != nil {
  1094  		t.Fatal(err)
  1095  	}
  1096  	f, err = newTable(tmpdir, fname,
  1097  		metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, freezerTableConfig{noSnappy: true}, true)
  1098  	if err != nil {
  1099  		t.Fatal(err)
  1100  	}
  1101  	v, err := f.Retrieve(10)
  1102  	if err != nil {
  1103  		t.Fatal(err)
  1104  	}
  1105  	exp := getChunk(128, 10)
  1106  	if !bytes.Equal(v, exp) {
  1107  		t.Errorf("retrieved value is incorrect")
  1108  	}
  1109  
  1110  	// Case 5: Now write some data via a batch.
  1111  	// This should fail either during AppendRaw or Commit
  1112  	batch := f.newBatch()
  1113  	writeErr := batch.AppendRaw(32, make([]byte, 1))
  1114  	if writeErr == nil {
  1115  		writeErr = batch.commit()
  1116  	}
  1117  	if writeErr == nil {
  1118  		t.Fatalf("Writing to readonly table should fail")
  1119  	}
  1120  }
  1121  
  1122  // randTest performs random freezer table operations.
  1123  // Instances of this test are created by Generate.
  1124  type randTest []randTestStep
  1125  
  1126  type randTestStep struct {
  1127  	op     int
  1128  	items  []uint64 // for append and retrieve
  1129  	blobs  [][]byte // for append
  1130  	target uint64   // for truncate(head/tail)
  1131  	err    error    // for debugging
  1132  }
  1133  
  1134  const (
  1135  	opReload = iota
  1136  	opAppend
  1137  	opRetrieve
  1138  	opTruncateHead
  1139  	opTruncateHeadAll
  1140  	opTruncateTail
  1141  	opTruncateTailAll
  1142  	opCheckAll
  1143  	opMax // boundary value, not an actual op
  1144  )
  1145  
  1146  func getVals(first uint64, n int) [][]byte {
  1147  	var ret [][]byte
  1148  	for i := 0; i < n; i++ {
  1149  		val := make([]byte, 8)
  1150  		binary.BigEndian.PutUint64(val, first+uint64(i))
  1151  		ret = append(ret, val)
  1152  	}
  1153  	return ret
  1154  }
  1155  
  1156  func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
  1157  	var (
  1158  		deleted uint64   // The number of deleted items from tail
  1159  		items   []uint64 // The index of entries in table
  1160  
  1161  		// getItems retrieves the indexes for items in table.
  1162  		getItems = func(n int) []uint64 {
  1163  			length := len(items)
  1164  			if length == 0 {
  1165  				return nil
  1166  			}
  1167  			var ret []uint64
  1168  			index := rand.Intn(length)
  1169  			for i := index; len(ret) < n && i < length; i++ {
  1170  				ret = append(ret, items[i])
  1171  			}
  1172  			return ret
  1173  		}
  1174  
  1175  		// addItems appends the given length items into the table.
  1176  		addItems = func(n int) []uint64 {
  1177  			var first = deleted
  1178  			if len(items) != 0 {
  1179  				first = items[len(items)-1] + 1
  1180  			}
  1181  			var ret []uint64
  1182  			for i := 0; i < n; i++ {
  1183  				ret = append(ret, first+uint64(i))
  1184  			}
  1185  			items = append(items, ret...)
  1186  			return ret
  1187  		}
  1188  	)
  1189  
  1190  	var steps randTest
  1191  	for i := 0; i < size; i++ {
  1192  		step := randTestStep{op: r.Intn(opMax)}
  1193  		switch step.op {
  1194  		case opReload, opCheckAll:
  1195  		case opAppend:
  1196  			num := r.Intn(3)
  1197  			step.items = addItems(num)
  1198  			if len(step.items) == 0 {
  1199  				step.blobs = nil
  1200  			} else {
  1201  				step.blobs = getVals(step.items[0], num)
  1202  			}
  1203  		case opRetrieve:
  1204  			step.items = getItems(r.Intn(3))
  1205  		case opTruncateHead:
  1206  			if len(items) == 0 {
  1207  				step.target = deleted
  1208  			} else {
  1209  				index := r.Intn(len(items))
  1210  				items = items[:index]
  1211  				step.target = deleted + uint64(index)
  1212  			}
  1213  		case opTruncateHeadAll:
  1214  			step.target = deleted
  1215  			items = items[:0]
  1216  		case opTruncateTail:
  1217  			if len(items) == 0 {
  1218  				step.target = deleted
  1219  			} else {
  1220  				index := r.Intn(len(items))
  1221  				items = items[index:]
  1222  				deleted += uint64(index)
  1223  				step.target = deleted
  1224  			}
  1225  		case opTruncateTailAll:
  1226  			step.target = deleted + uint64(len(items))
  1227  			items = items[:0]
  1228  			deleted = step.target
  1229  		}
  1230  		steps = append(steps, step)
  1231  	}
  1232  	return reflect.ValueOf(steps)
  1233  }
  1234  
  1235  func runRandTest(rt randTest) bool {
  1236  	fname := fmt.Sprintf("randtest-%d", rand.Uint64())
  1237  	f, err := newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, freezerTableConfig{noSnappy: true}, false)
  1238  	if err != nil {
  1239  		panic("failed to initialize table")
  1240  	}
  1241  	var values [][]byte
  1242  	for i, step := range rt {
  1243  		switch step.op {
  1244  		case opReload:
  1245  			f.Close()
  1246  			f, err = newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, freezerTableConfig{noSnappy: true}, false)
  1247  			if err != nil {
  1248  				rt[i].err = fmt.Errorf("failed to reload table %v", err)
  1249  			}
  1250  		case opCheckAll:
  1251  			tail := f.itemHidden.Load()
  1252  			head := f.items.Load()
  1253  
  1254  			if tail == head {
  1255  				continue
  1256  			}
  1257  			got, err := f.RetrieveItems(f.itemHidden.Load(), head-tail, 100000)
  1258  			if err != nil {
  1259  				rt[i].err = err
  1260  			} else {
  1261  				if !reflect.DeepEqual(got, values) {
  1262  					rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v", got, values)
  1263  				}
  1264  			}
  1265  
  1266  		case opAppend:
  1267  			batch := f.newBatch()
  1268  			for i := 0; i < len(step.items); i++ {
  1269  				batch.AppendRaw(step.items[i], step.blobs[i])
  1270  			}
  1271  			batch.commit()
  1272  			values = append(values, step.blobs...)
  1273  
  1274  		case opRetrieve:
  1275  			var blobs [][]byte
  1276  			if len(step.items) == 0 {
  1277  				continue
  1278  			}
  1279  			tail := f.itemHidden.Load()
  1280  			for i := 0; i < len(step.items); i++ {
  1281  				blobs = append(blobs, values[step.items[i]-tail])
  1282  			}
  1283  			got, err := f.RetrieveItems(step.items[0], uint64(len(step.items)), 100000)
  1284  			if err != nil {
  1285  				rt[i].err = err
  1286  			} else {
  1287  				if !reflect.DeepEqual(got, blobs) {
  1288  					rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v %v", got, blobs, step.items)
  1289  				}
  1290  			}
  1291  
  1292  		case opTruncateHead:
  1293  			f.truncateHead(step.target)
  1294  
  1295  			length := f.items.Load() - f.itemHidden.Load()
  1296  			values = values[:length]
  1297  
  1298  		case opTruncateHeadAll:
  1299  			f.truncateHead(step.target)
  1300  			values = nil
  1301  
  1302  		case opTruncateTail:
  1303  			prev := f.itemHidden.Load()
  1304  			f.truncateTail(step.target)
  1305  
  1306  			truncated := f.itemHidden.Load() - prev
  1307  			values = values[truncated:]
  1308  
  1309  		case opTruncateTailAll:
  1310  			f.truncateTail(step.target)
  1311  			values = nil
  1312  		}
  1313  		// Abort the test on error.
  1314  		if rt[i].err != nil {
  1315  			return false
  1316  		}
  1317  	}
  1318  	f.Close()
  1319  	return true
  1320  }
  1321  
  1322  func TestRandom(t *testing.T) {
  1323  	if err := quick.Check(runRandTest, nil); err != nil {
  1324  		if cerr, ok := err.(*quick.CheckError); ok {
  1325  			t.Fatalf("random test iteration %d failed: %s", cerr.Count, spew.Sdump(cerr.In))
  1326  		}
  1327  		t.Fatal(err)
  1328  	}
  1329  }
  1330  
  1331  func TestIndexValidation(t *testing.T) {
  1332  	const dataSize = 10
  1333  
  1334  	garbage := indexEntry{
  1335  		filenum: 100,
  1336  		offset:  200,
  1337  	}
  1338  	var cases = []struct {
  1339  		write         int
  1340  		offset        int64
  1341  		data          []byte
  1342  		expItems      int
  1343  		hasCorruption bool
  1344  	}{
  1345  		// extend index file with zero bytes at the end
  1346  		{
  1347  			write:    5,
  1348  			offset:   (5 + 1) * indexEntrySize,
  1349  			data:     make([]byte, indexEntrySize),
  1350  			expItems: 5,
  1351  		},
  1352  		// extend index file with unaligned zero bytes at the end
  1353  		{
  1354  			write:    5,
  1355  			offset:   (5 + 1) * indexEntrySize,
  1356  			data:     make([]byte, indexEntrySize*1.5),
  1357  			expItems: 5,
  1358  		},
  1359  		// write garbage in the first non-head item
  1360  		{
  1361  			write:    5,
  1362  			offset:   indexEntrySize,
  1363  			data:     garbage.append(nil),
  1364  			expItems: 0,
  1365  		},
  1366  		// write garbage in the middle
  1367  		{
  1368  			write:    5,
  1369  			offset:   3 * indexEntrySize,
  1370  			data:     garbage.append(nil),
  1371  			expItems: 2,
  1372  		},
  1373  		// fulfill the first data file (but not yet advanced), the zero bytes
  1374  		// at tail should be truncated.
  1375  		{
  1376  			write:    10,
  1377  			offset:   11 * indexEntrySize,
  1378  			data:     garbage.append(nil),
  1379  			expItems: 10,
  1380  		},
  1381  	}
  1382  	for _, c := range cases {
  1383  		fn := fmt.Sprintf("t-%d", rand.Uint64())
  1384  		f, err := newTable(os.TempDir(), fn, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 10*dataSize, freezerTableConfig{noSnappy: true}, false)
  1385  		if err != nil {
  1386  			t.Fatal(err)
  1387  		}
  1388  		writeChunks(t, f, c.write, dataSize)
  1389  
  1390  		// write corrupted data
  1391  		f.index.WriteAt(c.data, c.offset)
  1392  		f.Close()
  1393  
  1394  		// reopen the table, corruption should be truncated
  1395  		f, err = newTable(os.TempDir(), fn, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 100, freezerTableConfig{noSnappy: true}, false)
  1396  		if err != nil {
  1397  			t.Fatal(err)
  1398  		}
  1399  		for i := 0; i < c.expItems; i++ {
  1400  			exp := getChunk(10, i)
  1401  			got, err := f.Retrieve(uint64(i))
  1402  			if err != nil && !c.hasCorruption {
  1403  				t.Fatalf("Failed to read from table, %v", err)
  1404  			}
  1405  			if !bytes.Equal(exp, got) && !c.hasCorruption {
  1406  				t.Fatalf("Unexpected item data, want: %v, got: %v", exp, got)
  1407  			}
  1408  		}
  1409  		if f.items.Load() != uint64(c.expItems) {
  1410  			t.Fatalf("Unexpected item number, want: %d, got: %d", c.expItems, f.items.Load())
  1411  		}
  1412  	}
  1413  }
  1414  
  1415  // TestFlushOffsetTracking tests the flush offset tracking. The offset moving
  1416  // in the test is mostly triggered by the advanceHead (new data file) and
  1417  // heda/tail truncation.
  1418  func TestFlushOffsetTracking(t *testing.T) {
  1419  	const (
  1420  		items    = 35
  1421  		dataSize = 10
  1422  		fileSize = 100
  1423  	)
  1424  	fn := fmt.Sprintf("t-%d", rand.Uint64())
  1425  	f, err := newTable(os.TempDir(), fn, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), fileSize, freezerTableConfig{noSnappy: true}, false)
  1426  	if err != nil {
  1427  		t.Fatal(err)
  1428  	}
  1429  	// Data files:
  1430  	//   F1(10 items) -> F2(10 items) -> F3(10 items) -> F4(5 items, non-full)
  1431  	writeChunks(t, f, items, dataSize)
  1432  
  1433  	var cases = []struct {
  1434  		op     func(*freezerTable)
  1435  		offset int64
  1436  	}{
  1437  		{
  1438  			// Data files:
  1439  			//   F1(10 items) -> F2(10 items) -> F3(10 items) -> F4(5 items, non-full)
  1440  			func(f *freezerTable) {}, // no-op
  1441  			31 * indexEntrySize,
  1442  		},
  1443  		{
  1444  			// Write more items to fulfill the newest data file, but the file advance
  1445  			// is not triggered.
  1446  
  1447  			// Data files:
  1448  			//   F1(10 items) -> F2(10 items) -> F3(10 items) -> F4(10 items, full)
  1449  			func(f *freezerTable) {
  1450  				batch := f.newBatch()
  1451  				for i := 0; i < 5; i++ {
  1452  					batch.AppendRaw(items+uint64(i), make([]byte, dataSize))
  1453  				}
  1454  				batch.commit()
  1455  			},
  1456  			31 * indexEntrySize,
  1457  		},
  1458  		{
  1459  			// Write more items to trigger the data file advance
  1460  
  1461  			// Data files:
  1462  			//   F1(10 items) -> F2(10 items) -> F3(10 items) -> F4(10 items) -> F5(1 item)
  1463  			func(f *freezerTable) {
  1464  				batch := f.newBatch()
  1465  				batch.AppendRaw(items+5, make([]byte, dataSize))
  1466  				batch.commit()
  1467  			},
  1468  			41 * indexEntrySize,
  1469  		},
  1470  		{
  1471  			// Head truncate
  1472  
  1473  			// Data files:
  1474  			//   F1(10 items) -> F2(10 items) -> F3(10 items) -> F4(10 items) -> F5(0 item)
  1475  			func(f *freezerTable) {
  1476  				f.truncateHead(items + 5)
  1477  			},
  1478  			41 * indexEntrySize,
  1479  		},
  1480  		{
  1481  			// Tail truncate
  1482  
  1483  			// Data files:
  1484  			//   F1(1 hidden, 9 visible) -> F2(10 items) -> F3(10 items) -> F4(10 items) -> F5(0 item)
  1485  			func(f *freezerTable) {
  1486  				f.truncateTail(1)
  1487  			},
  1488  			41 * indexEntrySize,
  1489  		},
  1490  		{
  1491  			// Tail truncate
  1492  
  1493  			// Data files:
  1494  			//   F2(10 items) -> F3(10 items) -> F4(10 items) -> F5(0 item)
  1495  			func(f *freezerTable) {
  1496  				f.truncateTail(10)
  1497  			},
  1498  			31 * indexEntrySize,
  1499  		},
  1500  		{
  1501  			// Tail truncate
  1502  
  1503  			// Data files:
  1504  			//   F4(10 items) -> F5(0 item)
  1505  			func(f *freezerTable) {
  1506  				f.truncateTail(30)
  1507  			},
  1508  			11 * indexEntrySize,
  1509  		},
  1510  		{
  1511  			// Head truncate
  1512  
  1513  			// Data files:
  1514  			//   F4(9 items)
  1515  			func(f *freezerTable) {
  1516  				f.truncateHead(items + 4)
  1517  			},
  1518  			10 * indexEntrySize,
  1519  		},
  1520  	}
  1521  	for _, c := range cases {
  1522  		c.op(f)
  1523  		if f.metadata.flushOffset != c.offset {
  1524  			t.Fatalf("Unexpected index flush offset, want: %d, got: %d", c.offset, f.metadata.flushOffset)
  1525  		}
  1526  	}
  1527  }
  1528  
  1529  func TestTailTruncationCrash(t *testing.T) {
  1530  	const (
  1531  		items    = 35
  1532  		dataSize = 10
  1533  		fileSize = 100
  1534  	)
  1535  	fn := fmt.Sprintf("t-%d", rand.Uint64())
  1536  	f, err := newTable(os.TempDir(), fn, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), fileSize, freezerTableConfig{noSnappy: true}, false)
  1537  	if err != nil {
  1538  		t.Fatal(err)
  1539  	}
  1540  	// Data files:
  1541  	//   F1(10 items) -> F2(10 items) -> F3(10 items) -> F4(5 items, non-full)
  1542  	writeChunks(t, f, items, dataSize)
  1543  
  1544  	// The latest 5 items are not persisted yet
  1545  	if f.metadata.flushOffset != 31*indexEntrySize {
  1546  		t.Fatalf("Unexpected index flush offset, want: %d, got: %d", 31*indexEntrySize, f.metadata.flushOffset)
  1547  	}
  1548  
  1549  	f.truncateTail(5)
  1550  	if f.metadata.flushOffset != 31*indexEntrySize {
  1551  		t.Fatalf("Unexpected index flush offset, want: %d, got: %d", 31*indexEntrySize, f.metadata.flushOffset)
  1552  	}
  1553  
  1554  	// Truncate the first 10 items which results in the first data file
  1555  	// being removed. The offset should be moved to 26*indexEntrySize.
  1556  	f.truncateTail(10)
  1557  	if f.metadata.flushOffset != 26*indexEntrySize {
  1558  		t.Fatalf("Unexpected index flush offset, want: %d, got: %d", 26*indexEntrySize, f.metadata.flushOffset)
  1559  	}
  1560  
  1561  	// Write the offset back to 31*indexEntrySize to simulate a crash
  1562  	// which occurs after truncating the index file without updating
  1563  	// the offset
  1564  	f.metadata.setFlushOffset(31*indexEntrySize, true)
  1565  
  1566  	f, err = newTable(os.TempDir(), fn, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), fileSize, freezerTableConfig{noSnappy: true}, false)
  1567  	if err != nil {
  1568  		t.Fatal(err)
  1569  	}
  1570  	if f.metadata.flushOffset != 26*indexEntrySize {
  1571  		t.Fatalf("Unexpected index flush offset, want: %d, got: %d", 26*indexEntrySize, f.metadata.flushOffset)
  1572  	}
  1573  }