github.com/cryptotooltop/go-ethereum@v0.0.0-20231103184714-151d1922f3e5/core/rawdb/freezer_table_test.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package rawdb
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"math/rand"
    23  	"os"
    24  	"path/filepath"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/stretchr/testify/require"
    29  
    30  	"github.com/scroll-tech/go-ethereum/metrics"
    31  )
    32  
    33  func init() {
    34  	rand.Seed(time.Now().Unix())
    35  }
    36  
    37  // TestFreezerBasics test initializing a freezertable from scratch, writing to the table,
    38  // and reading it back.
    39  func TestFreezerBasics(t *testing.T) {
    40  	t.Parallel()
    41  	// set cutoff at 50 bytes
    42  	f, err := newTable(os.TempDir(),
    43  		fmt.Sprintf("unittest-%d", rand.Uint64()),
    44  		metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true)
    45  	if err != nil {
    46  		t.Fatal(err)
    47  	}
    48  	defer f.Close()
    49  
    50  	// Write 15 bytes 255 times, results in 85 files
    51  	writeChunks(t, f, 255, 15)
    52  
    53  	//print(t, f, 0)
    54  	//print(t, f, 1)
    55  	//print(t, f, 2)
    56  	//
    57  	//db[0] =  000000000000000000000000000000
    58  	//db[1] =  010101010101010101010101010101
    59  	//db[2] =  020202020202020202020202020202
    60  
    61  	for y := 0; y < 255; y++ {
    62  		exp := getChunk(15, y)
    63  		got, err := f.Retrieve(uint64(y))
    64  		if err != nil {
    65  			t.Fatalf("reading item %d: %v", y, err)
    66  		}
    67  		if !bytes.Equal(got, exp) {
    68  			t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
    69  		}
    70  	}
    71  	// Check that we cannot read too far
    72  	_, err = f.Retrieve(uint64(255))
    73  	if err != errOutOfBounds {
    74  		t.Fatal(err)
    75  	}
    76  }
    77  
    78  // TestFreezerBasicsClosing tests same as TestFreezerBasics, but also closes and reopens the freezer between
    79  // every operation
    80  func TestFreezerBasicsClosing(t *testing.T) {
    81  	t.Parallel()
    82  	// set cutoff at 50 bytes
    83  	var (
    84  		fname      = fmt.Sprintf("basics-close-%d", rand.Uint64())
    85  		rm, wm, sg = metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
    86  		f          *freezerTable
    87  		err        error
    88  	)
    89  	f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
    90  	if err != nil {
    91  		t.Fatal(err)
    92  	}
    93  
    94  	// Write 15 bytes 255 times, results in 85 files.
    95  	// In-between writes, the table is closed and re-opened.
    96  	for x := 0; x < 255; x++ {
    97  		data := getChunk(15, x)
    98  		batch := f.newBatch()
    99  		require.NoError(t, batch.AppendRaw(uint64(x), data))
   100  		require.NoError(t, batch.commit())
   101  		f.Close()
   102  
   103  		f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
   104  		if err != nil {
   105  			t.Fatal(err)
   106  		}
   107  	}
   108  	defer f.Close()
   109  
   110  	for y := 0; y < 255; y++ {
   111  		exp := getChunk(15, y)
   112  		got, err := f.Retrieve(uint64(y))
   113  		if err != nil {
   114  			t.Fatal(err)
   115  		}
   116  		if !bytes.Equal(got, exp) {
   117  			t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
   118  		}
   119  		f.Close()
   120  		f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
   121  		if err != nil {
   122  			t.Fatal(err)
   123  		}
   124  	}
   125  }
   126  
   127  // TestFreezerRepairDanglingHead tests that we can recover if index entries are removed
   128  func TestFreezerRepairDanglingHead(t *testing.T) {
   129  	t.Parallel()
   130  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   131  	fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
   132  
   133  	// Fill table
   134  	{
   135  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
   136  		if err != nil {
   137  			t.Fatal(err)
   138  		}
   139  		// Write 15 bytes 255 times
   140  		writeChunks(t, f, 255, 15)
   141  
   142  		// The last item should be there
   143  		if _, err = f.Retrieve(0xfe); err != nil {
   144  			t.Fatal(err)
   145  		}
   146  		f.Close()
   147  	}
   148  
   149  	// open the index
   150  	idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644)
   151  	if err != nil {
   152  		t.Fatalf("Failed to open index file: %v", err)
   153  	}
   154  	// Remove 4 bytes
   155  	stat, err := idxFile.Stat()
   156  	if err != nil {
   157  		t.Fatalf("Failed to stat index file: %v", err)
   158  	}
   159  	idxFile.Truncate(stat.Size() - 4)
   160  	idxFile.Close()
   161  
   162  	// Now open it again
   163  	{
   164  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
   165  		if err != nil {
   166  			t.Fatal(err)
   167  		}
   168  		// The last item should be missing
   169  		if _, err = f.Retrieve(0xff); err == nil {
   170  			t.Errorf("Expected error for missing index entry")
   171  		}
   172  		// The one before should still be there
   173  		if _, err = f.Retrieve(0xfd); err != nil {
   174  			t.Fatalf("Expected no error, got %v", err)
   175  		}
   176  	}
   177  }
   178  
   179  // TestFreezerRepairDanglingHeadLarge tests that we can recover if very many index entries are removed
   180  func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
   181  	t.Parallel()
   182  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   183  	fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
   184  
   185  	// Fill a table and close it
   186  	{
   187  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
   188  		if err != nil {
   189  			t.Fatal(err)
   190  		}
   191  		// Write 15 bytes 255 times
   192  		writeChunks(t, f, 255, 15)
   193  
   194  		// The last item should be there
   195  		if _, err = f.Retrieve(f.items - 1); err != nil {
   196  			t.Fatal(err)
   197  		}
   198  		f.Close()
   199  	}
   200  
   201  	// open the index
   202  	idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644)
   203  	if err != nil {
   204  		t.Fatalf("Failed to open index file: %v", err)
   205  	}
   206  	// Remove everything but the first item, and leave data unaligned
   207  	// 0-indexEntry, 1-indexEntry, corrupt-indexEntry
   208  	idxFile.Truncate(indexEntrySize + indexEntrySize + indexEntrySize/2)
   209  	idxFile.Close()
   210  
   211  	// Now open it again
   212  	{
   213  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
   214  		if err != nil {
   215  			t.Fatal(err)
   216  		}
   217  		// The first item should be there
   218  		if _, err = f.Retrieve(0); err != nil {
   219  			t.Fatal(err)
   220  		}
   221  		// The second item should be missing
   222  		if _, err = f.Retrieve(1); err == nil {
   223  			t.Errorf("Expected error for missing index entry")
   224  		}
   225  		// We should now be able to store items again, from item = 1
   226  		batch := f.newBatch()
   227  		for x := 1; x < 0xff; x++ {
   228  			require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x)))
   229  		}
   230  		require.NoError(t, batch.commit())
   231  		f.Close()
   232  	}
   233  
   234  	// And if we open it, we should now be able to read all of them (new values)
   235  	{
   236  		f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
   237  		for y := 1; y < 255; y++ {
   238  			exp := getChunk(15, ^y)
   239  			got, err := f.Retrieve(uint64(y))
   240  			if err != nil {
   241  				t.Fatal(err)
   242  			}
   243  			if !bytes.Equal(got, exp) {
   244  				t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
   245  			}
   246  		}
   247  	}
   248  }
   249  
   250  // TestSnappyDetection tests that we fail to open a snappy database and vice versa
   251  func TestSnappyDetection(t *testing.T) {
   252  	t.Parallel()
   253  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   254  	fname := fmt.Sprintf("snappytest-%d", rand.Uint64())
   255  
   256  	// Open with snappy
   257  	{
   258  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
   259  		if err != nil {
   260  			t.Fatal(err)
   261  		}
   262  		// Write 15 bytes 255 times
   263  		writeChunks(t, f, 255, 15)
   264  		f.Close()
   265  	}
   266  
   267  	// Open without snappy
   268  	{
   269  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false)
   270  		if err != nil {
   271  			t.Fatal(err)
   272  		}
   273  		if _, err = f.Retrieve(0); err == nil {
   274  			f.Close()
   275  			t.Fatalf("expected empty table")
   276  		}
   277  	}
   278  
   279  	// Open with snappy
   280  	{
   281  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
   282  		if err != nil {
   283  			t.Fatal(err)
   284  		}
   285  		// There should be 255 items
   286  		if _, err = f.Retrieve(0xfe); err != nil {
   287  			f.Close()
   288  			t.Fatalf("expected no error, got %v", err)
   289  		}
   290  	}
   291  }
   292  
   293  func assertFileSize(f string, size int64) error {
   294  	stat, err := os.Stat(f)
   295  	if err != nil {
   296  		return err
   297  	}
   298  	if stat.Size() != size {
   299  		return fmt.Errorf("error, expected size %d, got %d", size, stat.Size())
   300  	}
   301  	return nil
   302  }
   303  
   304  // TestFreezerRepairDanglingIndex checks that if the index has more entries than there are data,
   305  // the index is repaired
   306  func TestFreezerRepairDanglingIndex(t *testing.T) {
   307  	t.Parallel()
   308  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   309  	fname := fmt.Sprintf("dangling_indextest-%d", rand.Uint64())
   310  
   311  	// Fill a table and close it
   312  	{
   313  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
   314  		if err != nil {
   315  			t.Fatal(err)
   316  		}
   317  		// Write 15 bytes 9 times : 150 bytes
   318  		writeChunks(t, f, 9, 15)
   319  
   320  		// The last item should be there
   321  		if _, err = f.Retrieve(f.items - 1); err != nil {
   322  			f.Close()
   323  			t.Fatal(err)
   324  		}
   325  		f.Close()
   326  		// File sizes should be 45, 45, 45 : items[3, 3, 3)
   327  	}
   328  
   329  	// Crop third file
   330  	fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0002.rdat", fname))
   331  	// Truncate third file: 45 ,45, 20
   332  	{
   333  		if err := assertFileSize(fileToCrop, 45); err != nil {
   334  			t.Fatal(err)
   335  		}
   336  		file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644)
   337  		if err != nil {
   338  			t.Fatal(err)
   339  		}
   340  		file.Truncate(20)
   341  		file.Close()
   342  	}
   343  
   344  	// Open db it again
   345  	// It should restore the file(s) to
   346  	// 45, 45, 15
   347  	// with 3+3+1 items
   348  	{
   349  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
   350  		if err != nil {
   351  			t.Fatal(err)
   352  		}
   353  		defer f.Close()
   354  		if f.items != 7 {
   355  			t.Fatalf("expected %d items, got %d", 7, f.items)
   356  		}
   357  		if err := assertFileSize(fileToCrop, 15); err != nil {
   358  			t.Fatal(err)
   359  		}
   360  	}
   361  }
   362  
   363  func TestFreezerTruncate(t *testing.T) {
   364  	t.Parallel()
   365  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   366  	fname := fmt.Sprintf("truncation-%d", rand.Uint64())
   367  
   368  	// Fill table
   369  	{
   370  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
   371  		if err != nil {
   372  			t.Fatal(err)
   373  		}
   374  		// Write 15 bytes 30 times
   375  		writeChunks(t, f, 30, 15)
   376  
   377  		// The last item should be there
   378  		if _, err = f.Retrieve(f.items - 1); err != nil {
   379  			t.Fatal(err)
   380  		}
   381  		f.Close()
   382  	}
   383  
   384  	// Reopen, truncate
   385  	{
   386  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
   387  		if err != nil {
   388  			t.Fatal(err)
   389  		}
   390  		defer f.Close()
   391  		f.truncate(10) // 150 bytes
   392  		if f.items != 10 {
   393  			t.Fatalf("expected %d items, got %d", 10, f.items)
   394  		}
   395  		// 45, 45, 45, 15 -- bytes should be 15
   396  		if f.headBytes != 15 {
   397  			t.Fatalf("expected %d bytes, got %d", 15, f.headBytes)
   398  		}
   399  	}
   400  }
   401  
   402  // TestFreezerRepairFirstFile tests a head file with the very first item only half-written.
   403  // That will rewind the index, and _should_ truncate the head file
   404  func TestFreezerRepairFirstFile(t *testing.T) {
   405  	t.Parallel()
   406  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   407  	fname := fmt.Sprintf("truncationfirst-%d", rand.Uint64())
   408  
   409  	// Fill table
   410  	{
   411  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
   412  		if err != nil {
   413  			t.Fatal(err)
   414  		}
   415  		// Write 80 bytes, splitting out into two files
   416  		batch := f.newBatch()
   417  		require.NoError(t, batch.AppendRaw(0, getChunk(40, 0xFF)))
   418  		require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xEE)))
   419  		require.NoError(t, batch.commit())
   420  
   421  		// The last item should be there
   422  		if _, err = f.Retrieve(1); err != nil {
   423  			t.Fatal(err)
   424  		}
   425  		f.Close()
   426  	}
   427  
   428  	// Truncate the file in half
   429  	fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0001.rdat", fname))
   430  	{
   431  		if err := assertFileSize(fileToCrop, 40); err != nil {
   432  			t.Fatal(err)
   433  		}
   434  		file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644)
   435  		if err != nil {
   436  			t.Fatal(err)
   437  		}
   438  		file.Truncate(20)
   439  		file.Close()
   440  	}
   441  
   442  	// Reopen
   443  	{
   444  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
   445  		if err != nil {
   446  			t.Fatal(err)
   447  		}
   448  		if f.items != 1 {
   449  			f.Close()
   450  			t.Fatalf("expected %d items, got %d", 0, f.items)
   451  		}
   452  
   453  		// Write 40 bytes
   454  		batch := f.newBatch()
   455  		require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xDD)))
   456  		require.NoError(t, batch.commit())
   457  
   458  		f.Close()
   459  
   460  		// Should have been truncated down to zero and then 40 written
   461  		if err := assertFileSize(fileToCrop, 40); err != nil {
   462  			t.Fatal(err)
   463  		}
   464  	}
   465  }
   466  
   467  // TestFreezerReadAndTruncate tests:
   468  // - we have a table open
   469  // - do some reads, so files are open in readonly
   470  // - truncate so those files are 'removed'
   471  // - check that we did not keep the rdonly file descriptors
   472  func TestFreezerReadAndTruncate(t *testing.T) {
   473  	t.Parallel()
   474  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   475  	fname := fmt.Sprintf("read_truncate-%d", rand.Uint64())
   476  
   477  	// Fill table
   478  	{
   479  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
   480  		if err != nil {
   481  			t.Fatal(err)
   482  		}
   483  		// Write 15 bytes 30 times
   484  		writeChunks(t, f, 30, 15)
   485  
   486  		// The last item should be there
   487  		if _, err = f.Retrieve(f.items - 1); err != nil {
   488  			t.Fatal(err)
   489  		}
   490  		f.Close()
   491  	}
   492  
   493  	// Reopen and read all files
   494  	{
   495  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
   496  		if err != nil {
   497  			t.Fatal(err)
   498  		}
   499  		if f.items != 30 {
   500  			f.Close()
   501  			t.Fatalf("expected %d items, got %d", 0, f.items)
   502  		}
   503  		for y := byte(0); y < 30; y++ {
   504  			f.Retrieve(uint64(y))
   505  		}
   506  
   507  		// Now, truncate back to zero
   508  		f.truncate(0)
   509  
   510  		// Write the data again
   511  		batch := f.newBatch()
   512  		for x := 0; x < 30; x++ {
   513  			require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x)))
   514  		}
   515  		require.NoError(t, batch.commit())
   516  		f.Close()
   517  	}
   518  }
   519  
   520  func TestFreezerOffset(t *testing.T) {
   521  	t.Parallel()
   522  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   523  	fname := fmt.Sprintf("offset-%d", rand.Uint64())
   524  
   525  	// Fill table
   526  	{
   527  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
   528  		if err != nil {
   529  			t.Fatal(err)
   530  		}
   531  
   532  		// Write 6 x 20 bytes, splitting out into three files
   533  		batch := f.newBatch()
   534  		require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
   535  		require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
   536  
   537  		require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
   538  		require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
   539  
   540  		require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
   541  		require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
   542  		require.NoError(t, batch.commit())
   543  
   544  		t.Log(f.dumpIndexString(0, 100))
   545  		f.Close()
   546  	}
   547  
   548  	// Now crop it.
   549  	{
   550  		// delete files 0 and 1
   551  		for i := 0; i < 2; i++ {
   552  			p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.%04d.rdat", fname, i))
   553  			if err := os.Remove(p); err != nil {
   554  				t.Fatal(err)
   555  			}
   556  		}
   557  		// Read the index file
   558  		p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname))
   559  		indexFile, err := os.OpenFile(p, os.O_RDWR, 0644)
   560  		if err != nil {
   561  			t.Fatal(err)
   562  		}
   563  		indexBuf := make([]byte, 7*indexEntrySize)
   564  		indexFile.Read(indexBuf)
   565  
   566  		// Update the index file, so that we store
   567  		// [ file = 2, offset = 4 ] at index zero
   568  
   569  		tailId := uint32(2)     // First file is 2
   570  		itemOffset := uint32(4) // We have removed four items
   571  		zeroIndex := indexEntry{
   572  			filenum: tailId,
   573  			offset:  itemOffset,
   574  		}
   575  		buf := zeroIndex.append(nil)
   576  		// Overwrite index zero
   577  		copy(indexBuf, buf)
   578  		// Remove the four next indices by overwriting
   579  		copy(indexBuf[indexEntrySize:], indexBuf[indexEntrySize*5:])
   580  		indexFile.WriteAt(indexBuf, 0)
   581  		// Need to truncate the moved index items
   582  		indexFile.Truncate(indexEntrySize * (1 + 2))
   583  		indexFile.Close()
   584  	}
   585  
   586  	// Now open again
   587  	{
   588  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
   589  		if err != nil {
   590  			t.Fatal(err)
   591  		}
   592  		defer f.Close()
   593  		t.Log(f.dumpIndexString(0, 100))
   594  
   595  		// It should allow writing item 6.
   596  		batch := f.newBatch()
   597  		require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x99)))
   598  		require.NoError(t, batch.commit())
   599  
   600  		checkRetrieveError(t, f, map[uint64]error{
   601  			0: errOutOfBounds,
   602  			1: errOutOfBounds,
   603  			2: errOutOfBounds,
   604  			3: errOutOfBounds,
   605  		})
   606  		checkRetrieve(t, f, map[uint64][]byte{
   607  			4: getChunk(20, 0xbb),
   608  			5: getChunk(20, 0xaa),
   609  			6: getChunk(20, 0x99),
   610  		})
   611  	}
   612  
   613  	// Edit the index again, with a much larger initial offset of 1M.
   614  	{
   615  		// Read the index file
   616  		p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname))
   617  		indexFile, err := os.OpenFile(p, os.O_RDWR, 0644)
   618  		if err != nil {
   619  			t.Fatal(err)
   620  		}
   621  		indexBuf := make([]byte, 3*indexEntrySize)
   622  		indexFile.Read(indexBuf)
   623  
   624  		// Update the index file, so that we store
   625  		// [ file = 2, offset = 1M ] at index zero
   626  
   627  		tailId := uint32(2)           // First file is 2
   628  		itemOffset := uint32(1000000) // We have removed 1M items
   629  		zeroIndex := indexEntry{
   630  			offset:  itemOffset,
   631  			filenum: tailId,
   632  		}
   633  		buf := zeroIndex.append(nil)
   634  		// Overwrite index zero
   635  		copy(indexBuf, buf)
   636  		indexFile.WriteAt(indexBuf, 0)
   637  		indexFile.Close()
   638  	}
   639  
   640  	// Check that existing items have been moved to index 1M.
   641  	{
   642  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
   643  		if err != nil {
   644  			t.Fatal(err)
   645  		}
   646  		defer f.Close()
   647  		t.Log(f.dumpIndexString(0, 100))
   648  
   649  		checkRetrieveError(t, f, map[uint64]error{
   650  			0:      errOutOfBounds,
   651  			1:      errOutOfBounds,
   652  			2:      errOutOfBounds,
   653  			3:      errOutOfBounds,
   654  			999999: errOutOfBounds,
   655  		})
   656  		checkRetrieve(t, f, map[uint64][]byte{
   657  			1000000: getChunk(20, 0xbb),
   658  			1000001: getChunk(20, 0xaa),
   659  		})
   660  	}
   661  }
   662  
   663  func checkRetrieve(t *testing.T, f *freezerTable, items map[uint64][]byte) {
   664  	t.Helper()
   665  
   666  	for item, wantBytes := range items {
   667  		value, err := f.Retrieve(item)
   668  		if err != nil {
   669  			t.Fatalf("can't get expected item %d: %v", item, err)
   670  		}
   671  		if !bytes.Equal(value, wantBytes) {
   672  			t.Fatalf("item %d has wrong value %x (want %x)", item, value, wantBytes)
   673  		}
   674  	}
   675  }
   676  
   677  func checkRetrieveError(t *testing.T, f *freezerTable, items map[uint64]error) {
   678  	t.Helper()
   679  
   680  	for item, wantError := range items {
   681  		value, err := f.Retrieve(item)
   682  		if err == nil {
   683  			t.Fatalf("unexpected value %x for item %d, want error %v", item, value, wantError)
   684  		}
   685  		if err != wantError {
   686  			t.Fatalf("wrong error for item %d: %v", item, err)
   687  		}
   688  	}
   689  }
   690  
   691  // Gets a chunk of data, filled with 'b'
   692  func getChunk(size int, b int) []byte {
   693  	data := make([]byte, size)
   694  	for i := range data {
   695  		data[i] = byte(b)
   696  	}
   697  	return data
   698  }
   699  
   700  // TODO (?)
   701  // - test that if we remove several head-files, aswell as data last data-file,
   702  //   the index is truncated accordingly
   703  // Right now, the freezer would fail on these conditions:
   704  // 1. have data files d0, d1, d2, d3
   705  // 2. remove d2,d3
   706  //
   707  // However, all 'normal' failure modes arising due to failing to sync() or save a file
   708  // should be handled already, and the case described above can only (?) happen if an
   709  // external process/user deletes files from the filesystem.
   710  
   711  func writeChunks(t *testing.T, ft *freezerTable, n int, length int) {
   712  	t.Helper()
   713  
   714  	batch := ft.newBatch()
   715  	for i := 0; i < n; i++ {
   716  		if err := batch.AppendRaw(uint64(i), getChunk(length, i)); err != nil {
   717  			t.Fatalf("AppendRaw(%d, ...) returned error: %v", i, err)
   718  		}
   719  	}
   720  	if err := batch.commit(); err != nil {
   721  		t.Fatalf("Commit returned error: %v", err)
   722  	}
   723  }
   724  
   725  // TestSequentialRead does some basic tests on the RetrieveItems.
   726  func TestSequentialRead(t *testing.T) {
   727  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   728  	fname := fmt.Sprintf("batchread-%d", rand.Uint64())
   729  	{ // Fill table
   730  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
   731  		if err != nil {
   732  			t.Fatal(err)
   733  		}
   734  		// Write 15 bytes 30 times
   735  		writeChunks(t, f, 30, 15)
   736  		f.DumpIndex(0, 30)
   737  		f.Close()
   738  	}
   739  	{ // Open it, iterate, verify iteration
   740  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
   741  		if err != nil {
   742  			t.Fatal(err)
   743  		}
   744  		items, err := f.RetrieveItems(0, 10000, 100000)
   745  		if err != nil {
   746  			t.Fatal(err)
   747  		}
   748  		if have, want := len(items), 30; have != want {
   749  			t.Fatalf("want %d items, have %d ", want, have)
   750  		}
   751  		for i, have := range items {
   752  			want := getChunk(15, i)
   753  			if !bytes.Equal(want, have) {
   754  				t.Fatalf("data corruption: have\n%x\n, want \n%x\n", have, want)
   755  			}
   756  		}
   757  		f.Close()
   758  	}
   759  	{ // Open it, iterate, verify byte limit. The byte limit is less than item
   760  		// size, so each lookup should only return one item
   761  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
   762  		if err != nil {
   763  			t.Fatal(err)
   764  		}
   765  		items, err := f.RetrieveItems(0, 10000, 10)
   766  		if err != nil {
   767  			t.Fatal(err)
   768  		}
   769  		if have, want := len(items), 1; have != want {
   770  			t.Fatalf("want %d items, have %d ", want, have)
   771  		}
   772  		for i, have := range items {
   773  			want := getChunk(15, i)
   774  			if !bytes.Equal(want, have) {
   775  				t.Fatalf("data corruption: have\n%x\n, want \n%x\n", have, want)
   776  			}
   777  		}
   778  		f.Close()
   779  	}
   780  }
   781  
   782  // TestSequentialReadByteLimit does some more advanced tests on batch reads.
   783  // These tests check that when the byte limit hits, we correctly abort in time,
   784  // but also properly do all the deferred reads for the previous data, regardless
   785  // of whether the data crosses a file boundary or not.
   786  func TestSequentialReadByteLimit(t *testing.T) {
   787  	rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
   788  	fname := fmt.Sprintf("batchread-2-%d", rand.Uint64())
   789  	{ // Fill table
   790  		f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true)
   791  		if err != nil {
   792  			t.Fatal(err)
   793  		}
   794  		// Write 10 bytes 30 times,
   795  		// Splitting it at every 100 bytes (10 items)
   796  		writeChunks(t, f, 30, 10)
   797  		f.Close()
   798  	}
   799  	for i, tc := range []struct {
   800  		items uint64
   801  		limit uint64
   802  		want  int
   803  	}{
   804  		{9, 89, 8},
   805  		{10, 99, 9},
   806  		{11, 109, 10},
   807  		{100, 89, 8},
   808  		{100, 99, 9},
   809  		{100, 109, 10},
   810  	} {
   811  		{
   812  			f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true)
   813  			if err != nil {
   814  				t.Fatal(err)
   815  			}
   816  			items, err := f.RetrieveItems(0, tc.items, tc.limit)
   817  			if err != nil {
   818  				t.Fatal(err)
   819  			}
   820  			if have, want := len(items), tc.want; have != want {
   821  				t.Fatalf("test %d: want %d items, have %d ", i, want, have)
   822  			}
   823  			for ii, have := range items {
   824  				want := getChunk(10, ii)
   825  				if !bytes.Equal(want, have) {
   826  					t.Fatalf("test %d: data corruption item %d: have\n%x\n, want \n%x\n", i, ii, have, want)
   827  				}
   828  			}
   829  			f.Close()
   830  		}
   831  	}
   832  }