github.com/ethw3/go-ethereuma@v0.0.0-20221013053120-c14602a4c23c/core/rawdb/freezer_table_test.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package rawdb
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/binary"
    22  	"fmt"
    23  	"math/rand"
    24  	"os"
    25  	"path/filepath"
    26  	"reflect"
    27  	"sync/atomic"
    28  	"testing"
    29  	"testing/quick"
    30  	"time"
    31  
    32  	"github.com/davecgh/go-spew/spew"
    33  	"github.com/ethw3/go-ethereuma/metrics"
    34  	"github.com/stretchr/testify/require"
    35  )
    36  
    37  func init() {
    38  	rand.Seed(time.Now().Unix())
    39  }
    40  
    41  // TestFreezerBasics test initializing a freezertable from scratch, writing to the table,
    42  // and reading it back.
    43  func TestFreezerBasics(t *testing.T) {
    44  	t.Parallel()
    45  	// set cutoff at 50 bytes
    46  	f, err := newTable(os.TempDir(),
    47  		fmt.Sprintf("unittest-%d", rand.Uint64()),
    48  		metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
    49  	if err != nil {
    50  		t.Fatal(err)
    51  	}
    52  	defer f.Close()
    53  
    54  	// Write 15 bytes 255 times, results in 85 files
    55  	writeChunks(t, f, 255, 15)
    56  
    57  	//print(t, f, 0)
    58  	//print(t, f, 1)
    59  	//print(t, f, 2)
    60  	//
    61  	//db[0] =  000000000000000000000000000000
    62  	//db[1] =  010101010101010101010101010101
    63  	//db[2] =  020202020202020202020202020202
    64  
    65  	for y := 0; y < 255; y++ {
    66  		exp := getChunk(15, y)
    67  		got, err := f.Retrieve(uint64(y))
    68  		if err != nil {
    69  			t.Fatalf("reading item %d: %v", y, err)
    70  		}
    71  		if !bytes.Equal(got, exp) {
    72  			t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
    73  		}
    74  	}
    75  	// Check that we cannot read too far
    76  	_, err = f.Retrieve(uint64(255))
    77  	if err != errOutOfBounds {
    78  		t.Fatal(err)
    79  	}
    80  }
    81  
    82  // TestFreezerBasicsClosing tests same as TestFreezerBasics, but also closes and reopens the freezer between
    83  // every operation
    84  func TestFreezerBasicsClosing(t *testing.T) {
    85  	t.Parallel()
    86  	// set cutoff at 50 bytes
    87  	var (
    88  		fname      = fmt.Sprintf("basics-close-%d", rand.Uint64())
    89  		rm, wm, sg = metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
    90  		f          *freezerTable
    91  		err        error
    92  	)
    93  	f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
    94  	if err != nil {
    95  		t.Fatal(err)
    96  	}
    97  
    98  	// Write 15 bytes 255 times, results in 85 files.
    99  	// In-between writes, the table is closed and re-opened.
   100  	for x := 0; x < 255; x++ {
   101  		data := getChunk(15, x)
   102  		batch := f.newBatch()
   103  		require.NoError(t, batch.AppendRaw(uint64(x), data))
   104  		require.NoError(t, batch.commit())
   105  		f.Close()
   106  
   107  		f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
   108  		if err != nil {
   109  			t.Fatal(err)
   110  		}
   111  	}
   112  	defer f.Close()
   113  
   114  	for y := 0; y < 255; y++ {
   115  		exp := getChunk(15, y)
   116  		got, err := f.Retrieve(uint64(y))
   117  		if err != nil {
   118  			t.Fatal(err)
   119  		}
   120  		if !bytes.Equal(got, exp) {
   121  			t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
   122  		}
   123  		f.Close()
   124  		f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
   125  		if err != nil {
   126  			t.Fatal(err)
   127  		}
   128  	}
   129  }
   130  
   131  // TestFreezerRepairDanglingHead tests that we can recover if index entries are removed
   132  func TestFreezerRepairDanglingHead(t *testing.T) {
   133  	t.Parallel()
   134  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   135  	fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
   136  
   137  	// Fill table
   138  	{
   139  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
   140  		if err != nil {
   141  			t.Fatal(err)
   142  		}
   143  		// Write 15 bytes 255 times
   144  		writeChunks(t, f, 255, 15)
   145  
   146  		// The last item should be there
   147  		if _, err = f.Retrieve(0xfe); err != nil {
   148  			t.Fatal(err)
   149  		}
   150  		f.Close()
   151  	}
   152  
   153  	// open the index
   154  	idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644)
   155  	if err != nil {
   156  		t.Fatalf("Failed to open index file: %v", err)
   157  	}
   158  	// Remove 4 bytes
   159  	stat, err := idxFile.Stat()
   160  	if err != nil {
   161  		t.Fatalf("Failed to stat index file: %v", err)
   162  	}
   163  	idxFile.Truncate(stat.Size() - 4)
   164  	idxFile.Close()
   165  
   166  	// Now open it again
   167  	{
   168  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
   169  		if err != nil {
   170  			t.Fatal(err)
   171  		}
   172  		// The last item should be missing
   173  		if _, err = f.Retrieve(0xff); err == nil {
   174  			t.Errorf("Expected error for missing index entry")
   175  		}
   176  		// The one before should still be there
   177  		if _, err = f.Retrieve(0xfd); err != nil {
   178  			t.Fatalf("Expected no error, got %v", err)
   179  		}
   180  	}
   181  }
   182  
   183  // TestFreezerRepairDanglingHeadLarge tests that we can recover if very many index entries are removed
   184  func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
   185  	t.Parallel()
   186  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   187  	fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
   188  
   189  	// Fill a table and close it
   190  	{
   191  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
   192  		if err != nil {
   193  			t.Fatal(err)
   194  		}
   195  		// Write 15 bytes 255 times
   196  		writeChunks(t, f, 255, 15)
   197  
   198  		// The last item should be there
   199  		if _, err = f.Retrieve(f.items - 1); err != nil {
   200  			t.Fatal(err)
   201  		}
   202  		f.Close()
   203  	}
   204  
   205  	// open the index
   206  	idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644)
   207  	if err != nil {
   208  		t.Fatalf("Failed to open index file: %v", err)
   209  	}
   210  	// Remove everything but the first item, and leave data unaligned
   211  	// 0-indexEntry, 1-indexEntry, corrupt-indexEntry
   212  	idxFile.Truncate(2*indexEntrySize + indexEntrySize/2)
   213  	idxFile.Close()
   214  
   215  	// Now open it again
   216  	{
   217  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
   218  		if err != nil {
   219  			t.Fatal(err)
   220  		}
   221  		// The first item should be there
   222  		if _, err = f.Retrieve(0); err != nil {
   223  			t.Fatal(err)
   224  		}
   225  		// The second item should be missing
   226  		if _, err = f.Retrieve(1); err == nil {
   227  			t.Errorf("Expected error for missing index entry")
   228  		}
   229  		// We should now be able to store items again, from item = 1
   230  		batch := f.newBatch()
   231  		for x := 1; x < 0xff; x++ {
   232  			require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x)))
   233  		}
   234  		require.NoError(t, batch.commit())
   235  		f.Close()
   236  	}
   237  
   238  	// And if we open it, we should now be able to read all of them (new values)
   239  	{
   240  		f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
   241  		for y := 1; y < 255; y++ {
   242  			exp := getChunk(15, ^y)
   243  			got, err := f.Retrieve(uint64(y))
   244  			if err != nil {
   245  				t.Fatal(err)
   246  			}
   247  			if !bytes.Equal(got, exp) {
   248  				t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
   249  			}
   250  		}
   251  	}
   252  }
   253  
   254  // TestSnappyDetection tests that we fail to open a snappy database and vice versa
   255  func TestSnappyDetection(t *testing.T) {
   256  	t.Parallel()
   257  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   258  	fname := fmt.Sprintf("snappytest-%d", rand.Uint64())
   259  
   260  	// Open with snappy
   261  	{
   262  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
   263  		if err != nil {
   264  			t.Fatal(err)
   265  		}
   266  		// Write 15 bytes 255 times
   267  		writeChunks(t, f, 255, 15)
   268  		f.Close()
   269  	}
   270  
   271  	// Open without snappy
   272  	{
   273  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false, false)
   274  		if err != nil {
   275  			t.Fatal(err)
   276  		}
   277  		if _, err = f.Retrieve(0); err == nil {
   278  			f.Close()
   279  			t.Fatalf("expected empty table")
   280  		}
   281  	}
   282  
   283  	// Open with snappy
   284  	{
   285  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
   286  		if err != nil {
   287  			t.Fatal(err)
   288  		}
   289  		// There should be 255 items
   290  		if _, err = f.Retrieve(0xfe); err != nil {
   291  			f.Close()
   292  			t.Fatalf("expected no error, got %v", err)
   293  		}
   294  	}
   295  }
   296  
   297  func assertFileSize(f string, size int64) error {
   298  	stat, err := os.Stat(f)
   299  	if err != nil {
   300  		return err
   301  	}
   302  	if stat.Size() != size {
   303  		return fmt.Errorf("error, expected size %d, got %d", size, stat.Size())
   304  	}
   305  	return nil
   306  }
   307  
   308  // TestFreezerRepairDanglingIndex checks that if the index has more entries than there are data,
   309  // the index is repaired
   310  func TestFreezerRepairDanglingIndex(t *testing.T) {
   311  	t.Parallel()
   312  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   313  	fname := fmt.Sprintf("dangling_indextest-%d", rand.Uint64())
   314  
   315  	// Fill a table and close it
   316  	{
   317  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
   318  		if err != nil {
   319  			t.Fatal(err)
   320  		}
   321  		// Write 15 bytes 9 times : 150 bytes
   322  		writeChunks(t, f, 9, 15)
   323  
   324  		// The last item should be there
   325  		if _, err = f.Retrieve(f.items - 1); err != nil {
   326  			f.Close()
   327  			t.Fatal(err)
   328  		}
   329  		f.Close()
   330  		// File sizes should be 45, 45, 45 : items[3, 3, 3)
   331  	}
   332  
   333  	// Crop third file
   334  	fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0002.rdat", fname))
   335  	// Truncate third file: 45 ,45, 20
   336  	{
   337  		if err := assertFileSize(fileToCrop, 45); err != nil {
   338  			t.Fatal(err)
   339  		}
   340  		file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644)
   341  		if err != nil {
   342  			t.Fatal(err)
   343  		}
   344  		file.Truncate(20)
   345  		file.Close()
   346  	}
   347  
   348  	// Open db it again
   349  	// It should restore the file(s) to
   350  	// 45, 45, 15
   351  	// with 3+3+1 items
   352  	{
   353  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
   354  		if err != nil {
   355  			t.Fatal(err)
   356  		}
   357  		defer f.Close()
   358  		if f.items != 7 {
   359  			t.Fatalf("expected %d items, got %d", 7, f.items)
   360  		}
   361  		if err := assertFileSize(fileToCrop, 15); err != nil {
   362  			t.Fatal(err)
   363  		}
   364  	}
   365  }
   366  
   367  func TestFreezerTruncate(t *testing.T) {
   368  	t.Parallel()
   369  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   370  	fname := fmt.Sprintf("truncation-%d", rand.Uint64())
   371  
   372  	// Fill table
   373  	{
   374  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
   375  		if err != nil {
   376  			t.Fatal(err)
   377  		}
   378  		// Write 15 bytes 30 times
   379  		writeChunks(t, f, 30, 15)
   380  
   381  		// The last item should be there
   382  		if _, err = f.Retrieve(f.items - 1); err != nil {
   383  			t.Fatal(err)
   384  		}
   385  		f.Close()
   386  	}
   387  
   388  	// Reopen, truncate
   389  	{
   390  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
   391  		if err != nil {
   392  			t.Fatal(err)
   393  		}
   394  		defer f.Close()
   395  		f.truncateHead(10) // 150 bytes
   396  		if f.items != 10 {
   397  			t.Fatalf("expected %d items, got %d", 10, f.items)
   398  		}
   399  		// 45, 45, 45, 15 -- bytes should be 15
   400  		if f.headBytes != 15 {
   401  			t.Fatalf("expected %d bytes, got %d", 15, f.headBytes)
   402  		}
   403  	}
   404  }
   405  
   406  // TestFreezerRepairFirstFile tests a head file with the very first item only half-written.
   407  // That will rewind the index, and _should_ truncate the head file
   408  func TestFreezerRepairFirstFile(t *testing.T) {
   409  	t.Parallel()
   410  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   411  	fname := fmt.Sprintf("truncationfirst-%d", rand.Uint64())
   412  
   413  	// Fill table
   414  	{
   415  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
   416  		if err != nil {
   417  			t.Fatal(err)
   418  		}
   419  		// Write 80 bytes, splitting out into two files
   420  		batch := f.newBatch()
   421  		require.NoError(t, batch.AppendRaw(0, getChunk(40, 0xFF)))
   422  		require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xEE)))
   423  		require.NoError(t, batch.commit())
   424  
   425  		// The last item should be there
   426  		if _, err = f.Retrieve(1); err != nil {
   427  			t.Fatal(err)
   428  		}
   429  		f.Close()
   430  	}
   431  
   432  	// Truncate the file in half
   433  	fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0001.rdat", fname))
   434  	{
   435  		if err := assertFileSize(fileToCrop, 40); err != nil {
   436  			t.Fatal(err)
   437  		}
   438  		file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644)
   439  		if err != nil {
   440  			t.Fatal(err)
   441  		}
   442  		file.Truncate(20)
   443  		file.Close()
   444  	}
   445  
   446  	// Reopen
   447  	{
   448  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
   449  		if err != nil {
   450  			t.Fatal(err)
   451  		}
   452  		if f.items != 1 {
   453  			f.Close()
   454  			t.Fatalf("expected %d items, got %d", 0, f.items)
   455  		}
   456  
   457  		// Write 40 bytes
   458  		batch := f.newBatch()
   459  		require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xDD)))
   460  		require.NoError(t, batch.commit())
   461  
   462  		f.Close()
   463  
   464  		// Should have been truncated down to zero and then 40 written
   465  		if err := assertFileSize(fileToCrop, 40); err != nil {
   466  			t.Fatal(err)
   467  		}
   468  	}
   469  }
   470  
   471  // TestFreezerReadAndTruncate tests:
   472  // - we have a table open
   473  // - do some reads, so files are open in readonly
   474  // - truncate so those files are 'removed'
   475  // - check that we did not keep the rdonly file descriptors
   476  func TestFreezerReadAndTruncate(t *testing.T) {
   477  	t.Parallel()
   478  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   479  	fname := fmt.Sprintf("read_truncate-%d", rand.Uint64())
   480  
   481  	// Fill table
   482  	{
   483  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
   484  		if err != nil {
   485  			t.Fatal(err)
   486  		}
   487  		// Write 15 bytes 30 times
   488  		writeChunks(t, f, 30, 15)
   489  
   490  		// The last item should be there
   491  		if _, err = f.Retrieve(f.items - 1); err != nil {
   492  			t.Fatal(err)
   493  		}
   494  		f.Close()
   495  	}
   496  
   497  	// Reopen and read all files
   498  	{
   499  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
   500  		if err != nil {
   501  			t.Fatal(err)
   502  		}
   503  		if f.items != 30 {
   504  			f.Close()
   505  			t.Fatalf("expected %d items, got %d", 0, f.items)
   506  		}
   507  		for y := byte(0); y < 30; y++ {
   508  			f.Retrieve(uint64(y))
   509  		}
   510  
   511  		// Now, truncate back to zero
   512  		f.truncateHead(0)
   513  
   514  		// Write the data again
   515  		batch := f.newBatch()
   516  		for x := 0; x < 30; x++ {
   517  			require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x)))
   518  		}
   519  		require.NoError(t, batch.commit())
   520  		f.Close()
   521  	}
   522  }
   523  
   524  func TestFreezerOffset(t *testing.T) {
   525  	t.Parallel()
   526  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   527  	fname := fmt.Sprintf("offset-%d", rand.Uint64())
   528  
   529  	// Fill table
   530  	{
   531  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
   532  		if err != nil {
   533  			t.Fatal(err)
   534  		}
   535  
   536  		// Write 6 x 20 bytes, splitting out into three files
   537  		batch := f.newBatch()
   538  		require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
   539  		require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
   540  
   541  		require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
   542  		require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
   543  
   544  		require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
   545  		require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
   546  		require.NoError(t, batch.commit())
   547  
   548  		t.Log(f.dumpIndexString(0, 100))
   549  		f.Close()
   550  	}
   551  
   552  	// Now crop it.
   553  	{
   554  		// delete files 0 and 1
   555  		for i := 0; i < 2; i++ {
   556  			p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.%04d.rdat", fname, i))
   557  			if err := os.Remove(p); err != nil {
   558  				t.Fatal(err)
   559  			}
   560  		}
   561  		// Read the index file
   562  		p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname))
   563  		indexFile, err := os.OpenFile(p, os.O_RDWR, 0644)
   564  		if err != nil {
   565  			t.Fatal(err)
   566  		}
   567  		indexBuf := make([]byte, 7*indexEntrySize)
   568  		indexFile.Read(indexBuf)
   569  
   570  		// Update the index file, so that we store
   571  		// [ file = 2, offset = 4 ] at index zero
   572  
   573  		zeroIndex := indexEntry{
   574  			filenum: uint32(2), // First file is 2
   575  			offset:  uint32(4), // We have removed four items
   576  		}
   577  		buf := zeroIndex.append(nil)
   578  
   579  		// Overwrite index zero
   580  		copy(indexBuf, buf)
   581  
   582  		// Remove the four next indices by overwriting
   583  		copy(indexBuf[indexEntrySize:], indexBuf[indexEntrySize*5:])
   584  		indexFile.WriteAt(indexBuf, 0)
   585  
   586  		// Need to truncate the moved index items
   587  		indexFile.Truncate(indexEntrySize * (1 + 2))
   588  		indexFile.Close()
   589  	}
   590  
   591  	// Now open again
   592  	{
   593  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
   594  		if err != nil {
   595  			t.Fatal(err)
   596  		}
   597  		defer f.Close()
   598  		t.Log(f.dumpIndexString(0, 100))
   599  
   600  		// It should allow writing item 6.
   601  		batch := f.newBatch()
   602  		require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x99)))
   603  		require.NoError(t, batch.commit())
   604  
   605  		checkRetrieveError(t, f, map[uint64]error{
   606  			0: errOutOfBounds,
   607  			1: errOutOfBounds,
   608  			2: errOutOfBounds,
   609  			3: errOutOfBounds,
   610  		})
   611  		checkRetrieve(t, f, map[uint64][]byte{
   612  			4: getChunk(20, 0xbb),
   613  			5: getChunk(20, 0xaa),
   614  			6: getChunk(20, 0x99),
   615  		})
   616  	}
   617  
   618  	// Edit the index again, with a much larger initial offset of 1M.
   619  	{
   620  		// Read the index file
   621  		p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname))
   622  		indexFile, err := os.OpenFile(p, os.O_RDWR, 0644)
   623  		if err != nil {
   624  			t.Fatal(err)
   625  		}
   626  		indexBuf := make([]byte, 3*indexEntrySize)
   627  		indexFile.Read(indexBuf)
   628  
   629  		// Update the index file, so that we store
   630  		// [ file = 2, offset = 1M ] at index zero
   631  
   632  		zeroIndex := indexEntry{
   633  			offset:  uint32(1000000), // We have removed 1M items
   634  			filenum: uint32(2),       // First file is 2
   635  		}
   636  		buf := zeroIndex.append(nil)
   637  
   638  		// Overwrite index zero
   639  		copy(indexBuf, buf)
   640  		indexFile.WriteAt(indexBuf, 0)
   641  		indexFile.Close()
   642  	}
   643  
   644  	// Check that existing items have been moved to index 1M.
   645  	{
   646  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
   647  		if err != nil {
   648  			t.Fatal(err)
   649  		}
   650  		defer f.Close()
   651  		t.Log(f.dumpIndexString(0, 100))
   652  
   653  		checkRetrieveError(t, f, map[uint64]error{
   654  			0:      errOutOfBounds,
   655  			1:      errOutOfBounds,
   656  			2:      errOutOfBounds,
   657  			3:      errOutOfBounds,
   658  			999999: errOutOfBounds,
   659  		})
   660  		checkRetrieve(t, f, map[uint64][]byte{
   661  			1000000: getChunk(20, 0xbb),
   662  			1000001: getChunk(20, 0xaa),
   663  		})
   664  	}
   665  }
   666  
   667  func TestTruncateTail(t *testing.T) {
   668  	t.Parallel()
   669  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   670  	fname := fmt.Sprintf("truncate-tail-%d", rand.Uint64())
   671  
   672  	// Fill table
   673  	f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
   674  	if err != nil {
   675  		t.Fatal(err)
   676  	}
   677  
   678  	// Write 7 x 20 bytes, splitting out into four files
   679  	batch := f.newBatch()
   680  	require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
   681  	require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
   682  	require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
   683  	require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
   684  	require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
   685  	require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
   686  	require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
   687  	require.NoError(t, batch.commit())
   688  
   689  	// nothing to do, all the items should still be there.
   690  	f.truncateTail(0)
   691  	fmt.Println(f.dumpIndexString(0, 1000))
   692  	checkRetrieve(t, f, map[uint64][]byte{
   693  		0: getChunk(20, 0xFF),
   694  		1: getChunk(20, 0xEE),
   695  		2: getChunk(20, 0xdd),
   696  		3: getChunk(20, 0xcc),
   697  		4: getChunk(20, 0xbb),
   698  		5: getChunk(20, 0xaa),
   699  		6: getChunk(20, 0x11),
   700  	})
   701  
   702  	// truncate single element( item 0 ), deletion is only supported at file level
   703  	f.truncateTail(1)
   704  	fmt.Println(f.dumpIndexString(0, 1000))
   705  	checkRetrieveError(t, f, map[uint64]error{
   706  		0: errOutOfBounds,
   707  	})
   708  	checkRetrieve(t, f, map[uint64][]byte{
   709  		1: getChunk(20, 0xEE),
   710  		2: getChunk(20, 0xdd),
   711  		3: getChunk(20, 0xcc),
   712  		4: getChunk(20, 0xbb),
   713  		5: getChunk(20, 0xaa),
   714  		6: getChunk(20, 0x11),
   715  	})
   716  
   717  	// Reopen the table, the deletion information should be persisted as well
   718  	f.Close()
   719  	f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
   720  	if err != nil {
   721  		t.Fatal(err)
   722  	}
   723  	checkRetrieveError(t, f, map[uint64]error{
   724  		0: errOutOfBounds,
   725  	})
   726  	checkRetrieve(t, f, map[uint64][]byte{
   727  		1: getChunk(20, 0xEE),
   728  		2: getChunk(20, 0xdd),
   729  		3: getChunk(20, 0xcc),
   730  		4: getChunk(20, 0xbb),
   731  		5: getChunk(20, 0xaa),
   732  		6: getChunk(20, 0x11),
   733  	})
   734  
   735  	// truncate two elements( item 0, item 1 ), the file 0 should be deleted
   736  	f.truncateTail(2)
   737  	checkRetrieveError(t, f, map[uint64]error{
   738  		0: errOutOfBounds,
   739  		1: errOutOfBounds,
   740  	})
   741  	checkRetrieve(t, f, map[uint64][]byte{
   742  		2: getChunk(20, 0xdd),
   743  		3: getChunk(20, 0xcc),
   744  		4: getChunk(20, 0xbb),
   745  		5: getChunk(20, 0xaa),
   746  		6: getChunk(20, 0x11),
   747  	})
   748  
   749  	// Reopen the table, the above testing should still pass
   750  	f.Close()
   751  	f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
   752  	if err != nil {
   753  		t.Fatal(err)
   754  	}
   755  	defer f.Close()
   756  
   757  	checkRetrieveError(t, f, map[uint64]error{
   758  		0: errOutOfBounds,
   759  		1: errOutOfBounds,
   760  	})
   761  	checkRetrieve(t, f, map[uint64][]byte{
   762  		2: getChunk(20, 0xdd),
   763  		3: getChunk(20, 0xcc),
   764  		4: getChunk(20, 0xbb),
   765  		5: getChunk(20, 0xaa),
   766  		6: getChunk(20, 0x11),
   767  	})
   768  
   769  	// truncate all, the entire freezer should be deleted
   770  	f.truncateTail(7)
   771  	checkRetrieveError(t, f, map[uint64]error{
   772  		0: errOutOfBounds,
   773  		1: errOutOfBounds,
   774  		2: errOutOfBounds,
   775  		3: errOutOfBounds,
   776  		4: errOutOfBounds,
   777  		5: errOutOfBounds,
   778  		6: errOutOfBounds,
   779  	})
   780  }
   781  
   782  func TestTruncateHead(t *testing.T) {
   783  	t.Parallel()
   784  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   785  	fname := fmt.Sprintf("truncate-head-blow-tail-%d", rand.Uint64())
   786  
   787  	// Fill table
   788  	f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
   789  	if err != nil {
   790  		t.Fatal(err)
   791  	}
   792  
   793  	// Write 7 x 20 bytes, splitting out into four files
   794  	batch := f.newBatch()
   795  	require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
   796  	require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
   797  	require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
   798  	require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
   799  	require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
   800  	require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
   801  	require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
   802  	require.NoError(t, batch.commit())
   803  
   804  	f.truncateTail(4) // Tail = 4
   805  
   806  	// NewHead is required to be 3, the entire table should be truncated
   807  	f.truncateHead(4)
   808  	checkRetrieveError(t, f, map[uint64]error{
   809  		0: errOutOfBounds, // Deleted by tail
   810  		1: errOutOfBounds, // Deleted by tail
   811  		2: errOutOfBounds, // Deleted by tail
   812  		3: errOutOfBounds, // Deleted by tail
   813  		4: errOutOfBounds, // Deleted by Head
   814  		5: errOutOfBounds, // Deleted by Head
   815  		6: errOutOfBounds, // Deleted by Head
   816  	})
   817  
   818  	// Append new items
   819  	batch = f.newBatch()
   820  	require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
   821  	require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
   822  	require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
   823  	require.NoError(t, batch.commit())
   824  
   825  	checkRetrieve(t, f, map[uint64][]byte{
   826  		4: getChunk(20, 0xbb),
   827  		5: getChunk(20, 0xaa),
   828  		6: getChunk(20, 0x11),
   829  	})
   830  }
   831  
   832  func checkRetrieve(t *testing.T, f *freezerTable, items map[uint64][]byte) {
   833  	t.Helper()
   834  
   835  	for item, wantBytes := range items {
   836  		value, err := f.Retrieve(item)
   837  		if err != nil {
   838  			t.Fatalf("can't get expected item %d: %v", item, err)
   839  		}
   840  		if !bytes.Equal(value, wantBytes) {
   841  			t.Fatalf("item %d has wrong value %x (want %x)", item, value, wantBytes)
   842  		}
   843  	}
   844  }
   845  
   846  func checkRetrieveError(t *testing.T, f *freezerTable, items map[uint64]error) {
   847  	t.Helper()
   848  
   849  	for item, wantError := range items {
   850  		value, err := f.Retrieve(item)
   851  		if err == nil {
   852  			t.Fatalf("unexpected value %x for item %d, want error %v", item, value, wantError)
   853  		}
   854  		if err != wantError {
   855  			t.Fatalf("wrong error for item %d: %v", item, err)
   856  		}
   857  	}
   858  }
   859  
   860  // Gets a chunk of data, filled with 'b'
   861  func getChunk(size int, b int) []byte {
   862  	data := make([]byte, size)
   863  	for i := range data {
   864  		data[i] = byte(b)
   865  	}
   866  	return data
   867  }
   868  
   869  // TODO (?)
   870  // - test that if we remove several head-files, aswell as data last data-file,
   871  //   the index is truncated accordingly
   872  // Right now, the freezer would fail on these conditions:
   873  // 1. have data files d0, d1, d2, d3
   874  // 2. remove d2,d3
   875  //
   876  // However, all 'normal' failure modes arising due to failing to sync() or save a file
   877  // should be handled already, and the case described above can only (?) happen if an
   878  // external process/user deletes files from the filesystem.
   879  
   880  func writeChunks(t *testing.T, ft *freezerTable, n int, length int) {
   881  	t.Helper()
   882  
   883  	batch := ft.newBatch()
   884  	for i := 0; i < n; i++ {
   885  		if err := batch.AppendRaw(uint64(i), getChunk(length, i)); err != nil {
   886  			t.Fatalf("AppendRaw(%d, ...) returned error: %v", i, err)
   887  		}
   888  	}
   889  	if err := batch.commit(); err != nil {
   890  		t.Fatalf("Commit returned error: %v", err)
   891  	}
   892  }
   893  
   894  // TestSequentialRead does some basic tests on the RetrieveItems.
   895  func TestSequentialRead(t *testing.T) {
   896  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   897  	fname := fmt.Sprintf("batchread-%d", rand.Uint64())
   898  	{ // Fill table
   899  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
   900  		if err != nil {
   901  			t.Fatal(err)
   902  		}
   903  		// Write 15 bytes 30 times
   904  		writeChunks(t, f, 30, 15)
   905  		f.dumpIndexStdout(0, 30)
   906  		f.Close()
   907  	}
   908  	{ // Open it, iterate, verify iteration
   909  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
   910  		if err != nil {
   911  			t.Fatal(err)
   912  		}
   913  		items, err := f.RetrieveItems(0, 10000, 100000)
   914  		if err != nil {
   915  			t.Fatal(err)
   916  		}
   917  		if have, want := len(items), 30; have != want {
   918  			t.Fatalf("want %d items, have %d ", want, have)
   919  		}
   920  		for i, have := range items {
   921  			want := getChunk(15, i)
   922  			if !bytes.Equal(want, have) {
   923  				t.Fatalf("data corruption: have\n%x\n, want \n%x\n", have, want)
   924  			}
   925  		}
   926  		f.Close()
   927  	}
   928  	{ // Open it, iterate, verify byte limit. The byte limit is less than item
   929  		// size, so each lookup should only return one item
   930  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
   931  		if err != nil {
   932  			t.Fatal(err)
   933  		}
   934  		items, err := f.RetrieveItems(0, 10000, 10)
   935  		if err != nil {
   936  			t.Fatal(err)
   937  		}
   938  		if have, want := len(items), 1; have != want {
   939  			t.Fatalf("want %d items, have %d ", want, have)
   940  		}
   941  		for i, have := range items {
   942  			want := getChunk(15, i)
   943  			if !bytes.Equal(want, have) {
   944  				t.Fatalf("data corruption: have\n%x\n, want \n%x\n", have, want)
   945  			}
   946  		}
   947  		f.Close()
   948  	}
   949  }
   950  
   951  // TestSequentialReadByteLimit does some more advanced tests on batch reads.
   952  // These tests check that when the byte limit hits, we correctly abort in time,
   953  // but also properly do all the deferred reads for the previous data, regardless
   954  // of whether the data crosses a file boundary or not.
   955  func TestSequentialReadByteLimit(t *testing.T) {
   956  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   957  	fname := fmt.Sprintf("batchread-2-%d", rand.Uint64())
   958  	{ // Fill table
   959  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false)
   960  		if err != nil {
   961  			t.Fatal(err)
   962  		}
   963  		// Write 10 bytes 30 times,
   964  		// Splitting it at every 100 bytes (10 items)
   965  		writeChunks(t, f, 30, 10)
   966  		f.Close()
   967  	}
   968  	for i, tc := range []struct {
   969  		items uint64
   970  		limit uint64
   971  		want  int
   972  	}{
   973  		{9, 89, 8},
   974  		{10, 99, 9},
   975  		{11, 109, 10},
   976  		{100, 89, 8},
   977  		{100, 99, 9},
   978  		{100, 109, 10},
   979  	} {
   980  		{
   981  			f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false)
   982  			if err != nil {
   983  				t.Fatal(err)
   984  			}
   985  			items, err := f.RetrieveItems(0, tc.items, tc.limit)
   986  			if err != nil {
   987  				t.Fatal(err)
   988  			}
   989  			if have, want := len(items), tc.want; have != want {
   990  				t.Fatalf("test %d: want %d items, have %d ", i, want, have)
   991  			}
   992  			for ii, have := range items {
   993  				want := getChunk(10, ii)
   994  				if !bytes.Equal(want, have) {
   995  					t.Fatalf("test %d: data corruption item %d: have\n%x\n, want \n%x\n", i, ii, have, want)
   996  				}
   997  			}
   998  			f.Close()
   999  		}
  1000  	}
  1001  }
  1002  
  1003  func TestFreezerReadonly(t *testing.T) {
  1004  	tmpdir := os.TempDir()
  1005  	// Case 1: Check it fails on non-existent file.
  1006  	_, err := newTable(tmpdir,
  1007  		fmt.Sprintf("readonlytest-%d", rand.Uint64()),
  1008  		metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
  1009  	if err == nil {
  1010  		t.Fatal("readonly table instantiation should fail for non-existent table")
  1011  	}
  1012  
  1013  	// Case 2: Check that it fails on invalid index length.
  1014  	fname := fmt.Sprintf("readonlytest-%d", rand.Uint64())
  1015  	idxFile, err := openFreezerFileForAppend(filepath.Join(tmpdir, fmt.Sprintf("%s.ridx", fname)))
  1016  	if err != nil {
  1017  		t.Errorf("Failed to open index file: %v\n", err)
  1018  	}
  1019  	// size should not be a multiple of indexEntrySize.
  1020  	idxFile.Write(make([]byte, 17))
  1021  	idxFile.Close()
  1022  	_, err = newTable(tmpdir, fname,
  1023  		metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
  1024  	if err == nil {
  1025  		t.Errorf("readonly table instantiation should fail for invalid index size")
  1026  	}
  1027  
  1028  	// Case 3: Open table non-readonly table to write some data.
  1029  	// Then corrupt the head file and make sure opening the table
  1030  	// again in readonly triggers an error.
  1031  	fname = fmt.Sprintf("readonlytest-%d", rand.Uint64())
  1032  	f, err := newTable(tmpdir, fname,
  1033  		metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
  1034  	if err != nil {
  1035  		t.Fatalf("failed to instantiate table: %v", err)
  1036  	}
  1037  	writeChunks(t, f, 8, 32)
  1038  	// Corrupt table file
  1039  	if _, err := f.head.Write([]byte{1, 1}); err != nil {
  1040  		t.Fatal(err)
  1041  	}
  1042  	if err := f.Close(); err != nil {
  1043  		t.Fatal(err)
  1044  	}
  1045  	_, err = newTable(tmpdir, fname,
  1046  		metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
  1047  	if err == nil {
  1048  		t.Errorf("readonly table instantiation should fail for corrupt table file")
  1049  	}
  1050  
  1051  	// Case 4: Write some data to a table and later re-open it as readonly.
  1052  	// Should be successful.
  1053  	fname = fmt.Sprintf("readonlytest-%d", rand.Uint64())
  1054  	f, err = newTable(tmpdir, fname,
  1055  		metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
  1056  	if err != nil {
  1057  		t.Fatalf("failed to instantiate table: %v\n", err)
  1058  	}
  1059  	writeChunks(t, f, 32, 128)
  1060  	if err := f.Close(); err != nil {
  1061  		t.Fatal(err)
  1062  	}
  1063  	f, err = newTable(tmpdir, fname,
  1064  		metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
  1065  	if err != nil {
  1066  		t.Fatal(err)
  1067  	}
  1068  	v, err := f.Retrieve(10)
  1069  	if err != nil {
  1070  		t.Fatal(err)
  1071  	}
  1072  	exp := getChunk(128, 10)
  1073  	if !bytes.Equal(v, exp) {
  1074  		t.Errorf("retrieved value is incorrect")
  1075  	}
  1076  
  1077  	// Case 5: Now write some data via a batch.
  1078  	// This should fail either during AppendRaw or Commit
  1079  	batch := f.newBatch()
  1080  	writeErr := batch.AppendRaw(32, make([]byte, 1))
  1081  	if writeErr == nil {
  1082  		writeErr = batch.commit()
  1083  	}
  1084  	if writeErr == nil {
  1085  		t.Fatalf("Writing to readonly table should fail")
  1086  	}
  1087  }
  1088  
  1089  // randTest performs random freezer table operations.
  1090  // Instances of this test are created by Generate.
  1091  type randTest []randTestStep
  1092  
  1093  type randTestStep struct {
  1094  	op     int
  1095  	items  []uint64 // for append and retrieve
  1096  	blobs  [][]byte // for append
  1097  	target uint64   // for truncate(head/tail)
  1098  	err    error    // for debugging
  1099  }
  1100  
  1101  const (
  1102  	opReload = iota
  1103  	opAppend
  1104  	opRetrieve
  1105  	opTruncateHead
  1106  	opTruncateHeadAll
  1107  	opTruncateTail
  1108  	opTruncateTailAll
  1109  	opCheckAll
  1110  	opMax // boundary value, not an actual op
  1111  )
  1112  
  1113  func getVals(first uint64, n int) [][]byte {
  1114  	var ret [][]byte
  1115  	for i := 0; i < n; i++ {
  1116  		val := make([]byte, 8)
  1117  		binary.BigEndian.PutUint64(val, first+uint64(i))
  1118  		ret = append(ret, val)
  1119  	}
  1120  	return ret
  1121  }
  1122  
  1123  func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
  1124  	var (
  1125  		deleted uint64   // The number of deleted items from tail
  1126  		items   []uint64 // The index of entries in table
  1127  
  1128  		// getItems retrieves the indexes for items in table.
  1129  		getItems = func(n int) []uint64 {
  1130  			length := len(items)
  1131  			if length == 0 {
  1132  				return nil
  1133  			}
  1134  			var ret []uint64
  1135  			index := rand.Intn(length)
  1136  			for i := index; len(ret) < n && i < length; i++ {
  1137  				ret = append(ret, items[i])
  1138  			}
  1139  			return ret
  1140  		}
  1141  
  1142  		// addItems appends the given length items into the table.
  1143  		addItems = func(n int) []uint64 {
  1144  			var first = deleted
  1145  			if len(items) != 0 {
  1146  				first = items[len(items)-1] + 1
  1147  			}
  1148  			var ret []uint64
  1149  			for i := 0; i < n; i++ {
  1150  				ret = append(ret, first+uint64(i))
  1151  			}
  1152  			items = append(items, ret...)
  1153  			return ret
  1154  		}
  1155  	)
  1156  
  1157  	var steps randTest
  1158  	for i := 0; i < size; i++ {
  1159  		step := randTestStep{op: r.Intn(opMax)}
  1160  		switch step.op {
  1161  		case opReload, opCheckAll:
  1162  		case opAppend:
  1163  			num := r.Intn(3)
  1164  			step.items = addItems(num)
  1165  			if len(step.items) == 0 {
  1166  				step.blobs = nil
  1167  			} else {
  1168  				step.blobs = getVals(step.items[0], num)
  1169  			}
  1170  		case opRetrieve:
  1171  			step.items = getItems(r.Intn(3))
  1172  		case opTruncateHead:
  1173  			if len(items) == 0 {
  1174  				step.target = deleted
  1175  			} else {
  1176  				index := r.Intn(len(items))
  1177  				items = items[:index]
  1178  				step.target = deleted + uint64(index)
  1179  			}
  1180  		case opTruncateHeadAll:
  1181  			step.target = deleted
  1182  			items = items[:0]
  1183  		case opTruncateTail:
  1184  			if len(items) == 0 {
  1185  				step.target = deleted
  1186  			} else {
  1187  				index := r.Intn(len(items))
  1188  				items = items[index:]
  1189  				deleted += uint64(index)
  1190  				step.target = deleted
  1191  			}
  1192  		case opTruncateTailAll:
  1193  			step.target = deleted + uint64(len(items))
  1194  			items = items[:0]
  1195  			deleted = step.target
  1196  		}
  1197  		steps = append(steps, step)
  1198  	}
  1199  	return reflect.ValueOf(steps)
  1200  }
  1201  
  1202  func runRandTest(rt randTest) bool {
  1203  	fname := fmt.Sprintf("randtest-%d", rand.Uint64())
  1204  	f, err := newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
  1205  	if err != nil {
  1206  		panic("failed to initialize table")
  1207  	}
  1208  	var values [][]byte
  1209  	for i, step := range rt {
  1210  		switch step.op {
  1211  		case opReload:
  1212  			f.Close()
  1213  			f, err = newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
  1214  			if err != nil {
  1215  				rt[i].err = fmt.Errorf("failed to reload table %v", err)
  1216  			}
  1217  		case opCheckAll:
  1218  			tail := atomic.LoadUint64(&f.itemHidden)
  1219  			head := atomic.LoadUint64(&f.items)
  1220  
  1221  			if tail == head {
  1222  				continue
  1223  			}
  1224  			got, err := f.RetrieveItems(atomic.LoadUint64(&f.itemHidden), head-tail, 100000)
  1225  			if err != nil {
  1226  				rt[i].err = err
  1227  			} else {
  1228  				if !reflect.DeepEqual(got, values) {
  1229  					rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v", got, values)
  1230  				}
  1231  			}
  1232  
  1233  		case opAppend:
  1234  			batch := f.newBatch()
  1235  			for i := 0; i < len(step.items); i++ {
  1236  				batch.AppendRaw(step.items[i], step.blobs[i])
  1237  			}
  1238  			batch.commit()
  1239  			values = append(values, step.blobs...)
  1240  
  1241  		case opRetrieve:
  1242  			var blobs [][]byte
  1243  			if len(step.items) == 0 {
  1244  				continue
  1245  			}
  1246  			tail := atomic.LoadUint64(&f.itemHidden)
  1247  			for i := 0; i < len(step.items); i++ {
  1248  				blobs = append(blobs, values[step.items[i]-tail])
  1249  			}
  1250  			got, err := f.RetrieveItems(step.items[0], uint64(len(step.items)), 100000)
  1251  			if err != nil {
  1252  				rt[i].err = err
  1253  			} else {
  1254  				if !reflect.DeepEqual(got, blobs) {
  1255  					rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v %v", got, blobs, step.items)
  1256  				}
  1257  			}
  1258  
  1259  		case opTruncateHead:
  1260  			f.truncateHead(step.target)
  1261  
  1262  			length := atomic.LoadUint64(&f.items) - atomic.LoadUint64(&f.itemHidden)
  1263  			values = values[:length]
  1264  
  1265  		case opTruncateHeadAll:
  1266  			f.truncateHead(step.target)
  1267  			values = nil
  1268  
  1269  		case opTruncateTail:
  1270  			prev := atomic.LoadUint64(&f.itemHidden)
  1271  			f.truncateTail(step.target)
  1272  
  1273  			truncated := atomic.LoadUint64(&f.itemHidden) - prev
  1274  			values = values[truncated:]
  1275  
  1276  		case opTruncateTailAll:
  1277  			f.truncateTail(step.target)
  1278  			values = nil
  1279  		}
  1280  		// Abort the test on error.
  1281  		if rt[i].err != nil {
  1282  			return false
  1283  		}
  1284  	}
  1285  	f.Close()
  1286  	return true
  1287  }
  1288  
  1289  func TestRandom(t *testing.T) {
  1290  	if err := quick.Check(runRandTest, nil); err != nil {
  1291  		if cerr, ok := err.(*quick.CheckError); ok {
  1292  			t.Fatalf("random test iteration %d failed: %s", cerr.Count, spew.Sdump(cerr.In))
  1293  		}
  1294  		t.Fatal(err)
  1295  	}
  1296  }