github.com/ethereum/go-ethereum@v1.16.1/core/rawdb/ancienttest/testsuite.go (about)

     1  // Copyright 2024 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package ancienttest
    18  
    19  import (
    20  	"bytes"
    21  	"reflect"
    22  	"testing"
    23  
    24  	"github.com/ethereum/go-ethereum/ethdb"
    25  	"github.com/ethereum/go-ethereum/internal/testrand"
    26  )
    27  
    28  // TestAncientSuite runs a suite of tests against an ancient database
    29  // implementation.
    30  func TestAncientSuite(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) {
    31  	// Test basic read methods
    32  	t.Run("BasicRead", func(t *testing.T) { basicRead(t, newFn) })
    33  
    34  	// Test batch read method
    35  	t.Run("BatchRead", func(t *testing.T) { batchRead(t, newFn) })
    36  
    37  	// Test basic write methods
    38  	t.Run("BasicWrite", func(t *testing.T) { basicWrite(t, newFn) })
    39  
    40  	// Test if data mutation is allowed after db write
    41  	t.Run("nonMutable", func(t *testing.T) { nonMutable(t, newFn) })
    42  }
    43  
    44  func basicRead(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) {
    45  	var (
    46  		db   = newFn([]string{"a"})
    47  		data = makeDataset(100, 32)
    48  	)
    49  	defer db.Close()
    50  
    51  	db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
    52  		for i := 0; i < len(data); i++ {
    53  			op.AppendRaw("a", uint64(i), data[i])
    54  		}
    55  		return nil
    56  	})
    57  	db.TruncateTail(10)
    58  	db.TruncateHead(90)
    59  
    60  	// Test basic tail and head retrievals
    61  	tail, err := db.Tail()
    62  	if err != nil || tail != 10 {
    63  		t.Fatal("Failed to retrieve tail")
    64  	}
    65  	ancient, err := db.Ancients()
    66  	if err != nil || ancient != 90 {
    67  		t.Fatal("Failed to retrieve ancient")
    68  	}
    69  
    70  	// Test the deleted items shouldn't be reachable
    71  	var cases = []struct {
    72  		start int
    73  		limit int
    74  	}{
    75  		{0, 10},
    76  		{90, 100},
    77  	}
    78  	for _, c := range cases {
    79  		for i := c.start; i < c.limit; i++ {
    80  			_, err = db.Ancient("a", uint64(i))
    81  			if err == nil {
    82  				t.Fatal("Error is expected for non-existent item")
    83  			}
    84  		}
    85  	}
    86  
    87  	// Test the items in range should be reachable
    88  	for i := 10; i < 90; i++ {
    89  		blob, err := db.Ancient("a", uint64(i))
    90  		if err != nil {
    91  			t.Fatalf("Failed to retrieve item, %v", err)
    92  		}
    93  		if !bytes.Equal(blob, data[i]) {
    94  			t.Fatalf("Unexpected item content, want: %v, got: %v", data[i], blob)
    95  		}
    96  	}
    97  
    98  	// Test the items in unknown table shouldn't be reachable
    99  	_, err = db.Ancient("b", uint64(0))
   100  	if err == nil {
   101  		t.Fatal("Error is expected for unknown table")
   102  	}
   103  }
   104  
   105  func batchRead(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) {
   106  	var (
   107  		db   = newFn([]string{"a"})
   108  		data = makeDataset(100, 32)
   109  	)
   110  	defer db.Close()
   111  
   112  	db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
   113  		for i := 0; i < 100; i++ {
   114  			op.AppendRaw("a", uint64(i), data[i])
   115  		}
   116  		return nil
   117  	})
   118  	db.TruncateTail(10)
   119  	db.TruncateHead(90)
   120  
   121  	// Test the items in range should be reachable
   122  	var cases = []struct {
   123  		start    uint64
   124  		count    uint64
   125  		maxSize  uint64
   126  		expStart int
   127  		expLimit int
   128  	}{
   129  		// Items in range [10, 90) with no size limitation
   130  		{
   131  			10, 80, 0, 10, 90,
   132  		},
   133  		// Items in range [10, 90) with 32 size cap, single item is expected
   134  		{
   135  			10, 80, 32, 10, 11,
   136  		},
   137  		// Items in range [10, 90) with 31 size cap, single item is expected
   138  		{
   139  			10, 80, 31, 10, 11,
   140  		},
   141  		// Items in range [10, 90) with 32*80 size cap, all items are expected
   142  		{
   143  			10, 80, 32 * 80, 10, 90,
   144  		},
   145  		// Extra items above the last item are not returned
   146  		{
   147  			10, 90, 0, 10, 90,
   148  		},
   149  	}
   150  	for i, c := range cases {
   151  		batch, err := db.AncientRange("a", c.start, c.count, c.maxSize)
   152  		if err != nil {
   153  			t.Fatalf("Failed to retrieve item in range, %v", err)
   154  		}
   155  		if !reflect.DeepEqual(batch, data[c.expStart:c.expLimit]) {
   156  			t.Fatalf("Case %d, Batch content is not matched", i)
   157  		}
   158  	}
   159  
   160  	// Test out-of-range / zero-size retrieval should be rejected
   161  	_, err := db.AncientRange("a", 0, 1, 0)
   162  	if err == nil {
   163  		t.Fatal("Out-of-range retrieval should be rejected")
   164  	}
   165  	_, err = db.AncientRange("a", 90, 1, 0)
   166  	if err == nil {
   167  		t.Fatal("Out-of-range retrieval should be rejected")
   168  	}
   169  	_, err = db.AncientRange("a", 10, 0, 0)
   170  	if err == nil {
   171  		t.Fatal("Zero-size retrieval should be rejected")
   172  	}
   173  
   174  	// Test item in unknown table shouldn't be reachable
   175  	_, err = db.AncientRange("b", 10, 1, 0)
   176  	if err == nil {
   177  		t.Fatal("Item in unknown table shouldn't be found")
   178  	}
   179  }
   180  
   181  func basicWrite(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) {
   182  	var (
   183  		db    = newFn([]string{"a", "b"})
   184  		dataA = makeDataset(100, 32)
   185  		dataB = makeDataset(100, 32)
   186  	)
   187  	defer db.Close()
   188  
   189  	// The ancient write to tables should be aligned
   190  	_, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
   191  		for i := 0; i < 100; i++ {
   192  			op.AppendRaw("a", uint64(i), dataA[i])
   193  		}
   194  		return nil
   195  	})
   196  	if err == nil {
   197  		t.Fatal("Unaligned ancient write should be rejected")
   198  	}
   199  
   200  	// Test normal ancient write
   201  	size, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
   202  		for i := 0; i < 100; i++ {
   203  			op.AppendRaw("a", uint64(i), dataA[i])
   204  			op.AppendRaw("b", uint64(i), dataB[i])
   205  		}
   206  		return nil
   207  	})
   208  	if err != nil {
   209  		t.Fatalf("Failed to write ancient data %v", err)
   210  	}
   211  	wantSize := int64(6400)
   212  	if size != wantSize {
   213  		t.Fatalf("Ancient write size is not expected, want: %d, got: %d", wantSize, size)
   214  	}
   215  
   216  	// Write should work after head truncating
   217  	db.TruncateHead(90)
   218  	_, err = db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
   219  		for i := 90; i < 100; i++ {
   220  			op.AppendRaw("a", uint64(i), dataA[i])
   221  			op.AppendRaw("b", uint64(i), dataB[i])
   222  		}
   223  		return nil
   224  	})
   225  	if err != nil {
   226  		t.Fatalf("Failed to write ancient data %v", err)
   227  	}
   228  
   229  	// Write should work after truncating everything
   230  	db.TruncateTail(0)
   231  	_, err = db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
   232  		for i := 0; i < 100; i++ {
   233  			op.AppendRaw("a", uint64(i), dataA[i])
   234  			op.AppendRaw("b", uint64(i), dataB[i])
   235  		}
   236  		return nil
   237  	})
   238  	if err != nil {
   239  		t.Fatalf("Failed to write ancient data %v", err)
   240  	}
   241  }
   242  
   243  func nonMutable(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) {
   244  	db := newFn([]string{"a"})
   245  	defer db.Close()
   246  
   247  	// We write 100 zero-bytes to the freezer and immediately mutate the slice
   248  	db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
   249  		data := make([]byte, 100)
   250  		op.AppendRaw("a", uint64(0), data)
   251  		for i := range data {
   252  			data[i] = 0xff
   253  		}
   254  		return nil
   255  	})
   256  	// Now read it.
   257  	data, err := db.Ancient("a", uint64(0))
   258  	if err != nil {
   259  		t.Fatal(err)
   260  	}
   261  	for k, v := range data {
   262  		if v != 0 {
   263  			t.Fatalf("byte %d != 0: %x", k, v)
   264  		}
   265  	}
   266  }
   267  
   268  // TestResettableAncientSuite runs a suite of tests against a resettable ancient
   269  // database implementation.
   270  func TestResettableAncientSuite(t *testing.T, newFn func(kinds []string) ethdb.ResettableAncientStore) {
   271  	t.Run("Reset", func(t *testing.T) {
   272  		var (
   273  			db   = newFn([]string{"a"})
   274  			data = makeDataset(100, 32)
   275  		)
   276  		defer db.Close()
   277  
   278  		db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
   279  			for i := 0; i < 100; i++ {
   280  				op.AppendRaw("a", uint64(i), data[i])
   281  			}
   282  			return nil
   283  		})
   284  		db.TruncateTail(10)
   285  		db.TruncateHead(90)
   286  
   287  		// Ancient write should work after resetting
   288  		db.Reset()
   289  		db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
   290  			for i := 0; i < 100; i++ {
   291  				op.AppendRaw("a", uint64(i), data[i])
   292  			}
   293  			return nil
   294  		})
   295  	})
   296  }
   297  
   298  func makeDataset(size, value int) [][]byte {
   299  	var vals [][]byte
   300  	for i := 0; i < size; i += 1 {
   301  		vals = append(vals, testrand.Bytes(value))
   302  	}
   303  	return vals
   304  }