github.com/ledgerwatch/erigon-lib@v1.0.0/state/history_test.go (about)

     1  /*
     2     Copyright 2022 Erigon contributors
     3  
     4     Licensed under the Apache License, Version 2.0 (the "License");
     5     you may not use this file except in compliance with the License.
     6     You may obtain a copy of the License at
     7  
     8         http://www.apache.org/licenses/LICENSE-2.0
     9  
    10     Unless required by applicable law or agreed to in writing, software
    11     distributed under the License is distributed on an "AS IS" BASIS,
    12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13     See the License for the specific language governing permissions and
    14     limitations under the License.
    15  */
    16  
    17  package state
    18  
    19  import (
    20  	"context"
    21  	"encoding/binary"
    22  	"fmt"
    23  	"math"
    24  	"strings"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/ledgerwatch/erigon-lib/common/background"
    29  	"github.com/ledgerwatch/erigon-lib/common/hexutility"
    30  	"github.com/ledgerwatch/erigon-lib/kv"
    31  	"github.com/ledgerwatch/erigon-lib/kv/iter"
    32  	"github.com/ledgerwatch/erigon-lib/kv/mdbx"
    33  	"github.com/ledgerwatch/erigon-lib/kv/order"
    34  	"github.com/ledgerwatch/erigon-lib/recsplit"
    35  	"github.com/ledgerwatch/erigon-lib/recsplit/eliasfano32"
    36  	"github.com/ledgerwatch/log/v3"
    37  	"github.com/stretchr/testify/require"
    38  	btree2 "github.com/tidwall/btree"
    39  )
    40  
    41  func testDbAndHistory(tb testing.TB, largeValues bool, logger log.Logger) (string, kv.RwDB, *History) {
    42  	tb.Helper()
    43  	path := tb.TempDir()
    44  	keysTable := "AccountKeys"
    45  	indexTable := "AccountIndex"
    46  	valsTable := "AccountVals"
    47  	settingsTable := "Settings"
    48  	db := mdbx.NewMDBX(logger).InMem(path).WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg {
    49  		return kv.TableCfg{
    50  			keysTable:     kv.TableCfgItem{Flags: kv.DupSort},
    51  			indexTable:    kv.TableCfgItem{Flags: kv.DupSort},
    52  			valsTable:     kv.TableCfgItem{Flags: kv.DupSort},
    53  			settingsTable: kv.TableCfgItem{},
    54  		}
    55  	}).MustOpen()
    56  	h, err := NewHistory(path, path, 16, "hist", keysTable, indexTable, valsTable, false, nil, false, logger)
    57  	require.NoError(tb, err)
    58  	h.DisableFsync()
    59  	tb.Cleanup(db.Close)
    60  	tb.Cleanup(h.Close)
    61  	return path, db, h
    62  }
    63  
    64  func TestHistoryCollationBuild(t *testing.T) {
    65  	logger := log.New()
    66  	logEvery := time.NewTicker(30 * time.Second)
    67  	defer logEvery.Stop()
    68  	ctx := context.Background()
    69  
    70  	test := func(t *testing.T, h *History, db kv.RwDB) {
    71  		t.Helper()
    72  		require := require.New(t)
    73  		tx, err := db.BeginRw(ctx)
    74  		require.NoError(err)
    75  		defer tx.Rollback()
    76  		h.SetTx(tx)
    77  		h.StartWrites()
    78  		defer h.FinishWrites()
    79  
    80  		h.SetTxNum(2)
    81  		err = h.AddPrevValue([]byte("key1"), nil, nil)
    82  		require.NoError(err)
    83  
    84  		h.SetTxNum(3)
    85  		err = h.AddPrevValue([]byte("key2"), nil, nil)
    86  		require.NoError(err)
    87  
    88  		h.SetTxNum(6)
    89  		err = h.AddPrevValue([]byte("key1"), nil, []byte("value1.1"))
    90  		require.NoError(err)
    91  		err = h.AddPrevValue([]byte("key2"), nil, []byte("value2.1"))
    92  		require.NoError(err)
    93  
    94  		flusher := h.Rotate()
    95  
    96  		h.SetTxNum(7)
    97  		err = h.AddPrevValue([]byte("key2"), nil, []byte("value2.2"))
    98  		require.NoError(err)
    99  		err = h.AddPrevValue([]byte("key3"), nil, nil)
   100  		require.NoError(err)
   101  
   102  		err = flusher.Flush(ctx, tx)
   103  		require.NoError(err)
   104  
   105  		err = h.Rotate().Flush(ctx, tx)
   106  		require.NoError(err)
   107  
   108  		c, err := h.collate(0, 0, 8, tx)
   109  		require.NoError(err)
   110  		require.True(strings.HasSuffix(c.historyPath, "hist.0-1.v"))
   111  		require.Equal(6, c.historyCount)
   112  		require.Equal(3, len(c.indexBitmaps))
   113  		require.Equal([]uint64{7}, c.indexBitmaps["key3"].ToArray())
   114  		require.Equal([]uint64{3, 6, 7}, c.indexBitmaps["key2"].ToArray())
   115  		require.Equal([]uint64{2, 6}, c.indexBitmaps["key1"].ToArray())
   116  
   117  		sf, err := h.buildFiles(ctx, 0, c, background.NewProgressSet())
   118  		require.NoError(err)
   119  		defer sf.Close()
   120  		var valWords []string
   121  		g := sf.historyDecomp.MakeGetter()
   122  		g.Reset(0)
   123  		for g.HasNext() {
   124  			w, _ := g.Next(nil)
   125  			valWords = append(valWords, string(w))
   126  		}
   127  		require.Equal([]string{"", "value1.1", "", "value2.1", "value2.2", ""}, valWords)
   128  		require.Equal(6, int(sf.historyIdx.KeyCount()))
   129  		g = sf.efHistoryDecomp.MakeGetter()
   130  		g.Reset(0)
   131  		var keyWords []string
   132  		var intArrs [][]uint64
   133  		for g.HasNext() {
   134  			w, _ := g.Next(nil)
   135  			keyWords = append(keyWords, string(w))
   136  			w, _ = g.Next(w[:0])
   137  			ef, _ := eliasfano32.ReadEliasFano(w)
   138  			ints, err := iter.ToU64Arr(ef.Iterator())
   139  			require.NoError(err)
   140  			intArrs = append(intArrs, ints)
   141  		}
   142  		require.Equal([]string{"key1", "key2", "key3"}, keyWords)
   143  		require.Equal([][]uint64{{2, 6}, {3, 6, 7}, {7}}, intArrs)
   144  		r := recsplit.NewIndexReader(sf.efHistoryIdx)
   145  		for i := 0; i < len(keyWords); i++ {
   146  			offset := r.Lookup([]byte(keyWords[i]))
   147  			g.Reset(offset)
   148  			w, _ := g.Next(nil)
   149  			require.Equal(keyWords[i], string(w))
   150  		}
   151  		r = recsplit.NewIndexReader(sf.historyIdx)
   152  		g = sf.historyDecomp.MakeGetter()
   153  		var vi int
   154  		for i := 0; i < len(keyWords); i++ {
   155  			ints := intArrs[i]
   156  			for j := 0; j < len(ints); j++ {
   157  				var txKey [8]byte
   158  				binary.BigEndian.PutUint64(txKey[:], ints[j])
   159  				offset := r.Lookup2(txKey[:], []byte(keyWords[i]))
   160  				g.Reset(offset)
   161  				w, _ := g.Next(nil)
   162  				require.Equal(valWords[vi], string(w))
   163  				vi++
   164  			}
   165  		}
   166  	}
   167  	t.Run("large_values", func(t *testing.T) {
   168  		_, db, h := testDbAndHistory(t, true, logger)
   169  		test(t, h, db)
   170  	})
   171  	t.Run("small_values", func(t *testing.T) {
   172  		_, db, h := testDbAndHistory(t, false, logger)
   173  		test(t, h, db)
   174  	})
   175  }
   176  
   177  func TestHistoryAfterPrune(t *testing.T) {
   178  	logger := log.New()
   179  	logEvery := time.NewTicker(30 * time.Second)
   180  	defer logEvery.Stop()
   181  	ctx := context.Background()
   182  	test := func(t *testing.T, h *History, db kv.RwDB) {
   183  		t.Helper()
   184  		require := require.New(t)
   185  		tx, err := db.BeginRw(ctx)
   186  		require.NoError(err)
   187  		defer tx.Rollback()
   188  		h.SetTx(tx)
   189  		h.StartWrites()
   190  		defer h.FinishWrites()
   191  
   192  		h.SetTxNum(2)
   193  		err = h.AddPrevValue([]byte("key1"), nil, nil)
   194  		require.NoError(err)
   195  
   196  		h.SetTxNum(3)
   197  		err = h.AddPrevValue([]byte("key2"), nil, nil)
   198  		require.NoError(err)
   199  
   200  		h.SetTxNum(6)
   201  		err = h.AddPrevValue([]byte("key1"), nil, []byte("value1.1"))
   202  		require.NoError(err)
   203  		err = h.AddPrevValue([]byte("key2"), nil, []byte("value2.1"))
   204  		require.NoError(err)
   205  
   206  		h.SetTxNum(7)
   207  		err = h.AddPrevValue([]byte("key2"), nil, []byte("value2.2"))
   208  		require.NoError(err)
   209  		err = h.AddPrevValue([]byte("key3"), nil, nil)
   210  		require.NoError(err)
   211  
   212  		err = h.Rotate().Flush(ctx, tx)
   213  		require.NoError(err)
   214  
   215  		c, err := h.collate(0, 0, 16, tx)
   216  		require.NoError(err)
   217  
   218  		sf, err := h.buildFiles(ctx, 0, c, background.NewProgressSet())
   219  		require.NoError(err)
   220  
   221  		h.integrateFiles(sf, 0, 16)
   222  
   223  		err = h.prune(ctx, 0, 16, math.MaxUint64, logEvery)
   224  		require.NoError(err)
   225  		h.SetTx(tx)
   226  
   227  		for _, table := range []string{h.indexKeysTable, h.historyValsTable, h.indexTable} {
   228  			var cur kv.Cursor
   229  			cur, err = tx.Cursor(table)
   230  			require.NoError(err)
   231  			defer cur.Close()
   232  			var k []byte
   233  			k, _, err = cur.First()
   234  			require.NoError(err)
   235  			require.Nil(k, table)
   236  		}
   237  	}
   238  	t.Run("large_values", func(t *testing.T) {
   239  		_, db, h := testDbAndHistory(t, true, logger)
   240  		test(t, h, db)
   241  	})
   242  	t.Run("small_values", func(t *testing.T) {
   243  		_, db, h := testDbAndHistory(t, false, logger)
   244  		test(t, h, db)
   245  	})
   246  }
   247  
   248  func filledHistory(tb testing.TB, largeValues bool, logger log.Logger) (string, kv.RwDB, *History, uint64) {
   249  	tb.Helper()
   250  	path, db, h := testDbAndHistory(tb, largeValues, logger)
   251  	ctx := context.Background()
   252  	tx, err := db.BeginRw(ctx)
   253  	require.NoError(tb, err)
   254  	defer tx.Rollback()
   255  	h.SetTx(tx)
   256  	h.StartWrites()
   257  	defer h.FinishWrites()
   258  
   259  	txs := uint64(1000)
   260  	// keys are encodings of numbers 1..31
   261  	// each key changes value on every txNum which is multiple of the key
   262  	var prevVal [32][]byte
   263  	var flusher flusher
   264  	for txNum := uint64(1); txNum <= txs; txNum++ {
   265  		h.SetTxNum(txNum)
   266  		for keyNum := uint64(1); keyNum <= uint64(31); keyNum++ {
   267  			if txNum%keyNum == 0 {
   268  				valNum := txNum / keyNum
   269  				var k [8]byte
   270  				var v [8]byte
   271  				binary.BigEndian.PutUint64(k[:], keyNum)
   272  				binary.BigEndian.PutUint64(v[:], valNum)
   273  				k[0] = 1   //mark key to simplify debug
   274  				v[0] = 255 //mark value to simplify debug
   275  				err = h.AddPrevValue(k[:], nil, prevVal[keyNum])
   276  				require.NoError(tb, err)
   277  				prevVal[keyNum] = v[:]
   278  			}
   279  		}
   280  		if flusher != nil {
   281  			err = flusher.Flush(ctx, tx)
   282  			require.NoError(tb, err)
   283  			flusher = nil
   284  		}
   285  		if txNum%10 == 0 {
   286  			flusher = h.Rotate()
   287  		}
   288  	}
   289  	if flusher != nil {
   290  		err = flusher.Flush(ctx, tx)
   291  		require.NoError(tb, err)
   292  	}
   293  	err = h.Rotate().Flush(ctx, tx)
   294  	require.NoError(tb, err)
   295  	err = tx.Commit()
   296  	require.NoError(tb, err)
   297  
   298  	return path, db, h, txs
   299  }
   300  
   301  func checkHistoryHistory(t *testing.T, h *History, txs uint64) {
   302  	t.Helper()
   303  	// Check the history
   304  	hc := h.MakeContext()
   305  	defer hc.Close()
   306  
   307  	for txNum := uint64(0); txNum <= txs; txNum++ {
   308  		for keyNum := uint64(1); keyNum <= uint64(31); keyNum++ {
   309  			valNum := txNum / keyNum
   310  			var k [8]byte
   311  			var v [8]byte
   312  			label := fmt.Sprintf("txNum=%d, keyNum=%d", txNum, keyNum)
   313  			//fmt.Printf("label=%s\n", label)
   314  			binary.BigEndian.PutUint64(k[:], keyNum)
   315  			binary.BigEndian.PutUint64(v[:], valNum)
   316  			k[0], v[0] = 0x01, 0xff
   317  			val, ok, err := hc.GetNoState(k[:], txNum+1)
   318  			//require.Equal(t, ok, txNum < 976)
   319  			if ok {
   320  				require.NoError(t, err, label)
   321  				if txNum >= keyNum {
   322  					require.Equal(t, v[:], val, label)
   323  				} else {
   324  					require.Equal(t, []byte{}, val, label)
   325  				}
   326  			}
   327  		}
   328  	}
   329  }
   330  
   331  func TestHistoryHistory(t *testing.T) {
   332  	logger := log.New()
   333  	logEvery := time.NewTicker(30 * time.Second)
   334  	defer logEvery.Stop()
   335  	ctx := context.Background()
   336  	test := func(t *testing.T, h *History, db kv.RwDB, txs uint64) {
   337  		t.Helper()
   338  		require := require.New(t)
   339  		tx, err := db.BeginRw(ctx)
   340  		require.NoError(err)
   341  		h.SetTx(tx)
   342  		defer tx.Rollback()
   343  
   344  		// Leave the last 2 aggregation steps un-collated
   345  		for step := uint64(0); step < txs/h.aggregationStep-1; step++ {
   346  			func() {
   347  				c, err := h.collate(step, step*h.aggregationStep, (step+1)*h.aggregationStep, tx)
   348  				require.NoError(err)
   349  				sf, err := h.buildFiles(ctx, step, c, background.NewProgressSet())
   350  				require.NoError(err)
   351  				h.integrateFiles(sf, step*h.aggregationStep, (step+1)*h.aggregationStep)
   352  				err = h.prune(ctx, step*h.aggregationStep, (step+1)*h.aggregationStep, math.MaxUint64, logEvery)
   353  				require.NoError(err)
   354  			}()
   355  		}
   356  		checkHistoryHistory(t, h, txs)
   357  	}
   358  	t.Run("large_values", func(t *testing.T) {
   359  		_, db, h, txs := filledHistory(t, true, logger)
   360  		test(t, h, db, txs)
   361  	})
   362  	t.Run("small_values", func(t *testing.T) {
   363  		_, db, h, txs := filledHistory(t, false, logger)
   364  		test(t, h, db, txs)
   365  	})
   366  
   367  }
   368  
   369  func collateAndMergeHistory(tb testing.TB, db kv.RwDB, h *History, txs uint64) {
   370  	tb.Helper()
   371  	require := require.New(tb)
   372  
   373  	logEvery := time.NewTicker(30 * time.Second)
   374  	defer logEvery.Stop()
   375  	ctx := context.Background()
   376  	tx, err := db.BeginRwNosync(ctx)
   377  	require.NoError(err)
   378  	h.SetTx(tx)
   379  	defer tx.Rollback()
   380  
   381  	// Leave the last 2 aggregation steps un-collated
   382  	for step := uint64(0); step < txs/h.aggregationStep-1; step++ {
   383  		c, err := h.collate(step, step*h.aggregationStep, (step+1)*h.aggregationStep, tx)
   384  		require.NoError(err)
   385  		sf, err := h.buildFiles(ctx, step, c, background.NewProgressSet())
   386  		require.NoError(err)
   387  		h.integrateFiles(sf, step*h.aggregationStep, (step+1)*h.aggregationStep)
   388  		err = h.prune(ctx, step*h.aggregationStep, (step+1)*h.aggregationStep, math.MaxUint64, logEvery)
   389  		require.NoError(err)
   390  	}
   391  
   392  	var r HistoryRanges
   393  	maxEndTxNum := h.endTxNumMinimax()
   394  
   395  	maxSpan := h.aggregationStep * StepsInBiggestFile
   396  
   397  	for {
   398  		if stop := func() bool {
   399  			hc := h.MakeContext()
   400  			defer hc.Close()
   401  			r = h.findMergeRange(maxEndTxNum, maxSpan)
   402  			if !r.any() {
   403  				return true
   404  			}
   405  			indexOuts, historyOuts, _, err := hc.staticFilesInRange(r)
   406  			require.NoError(err)
   407  			indexIn, historyIn, err := h.mergeFiles(ctx, indexOuts, historyOuts, r, 1, background.NewProgressSet())
   408  			require.NoError(err)
   409  			h.integrateMergedFiles(indexOuts, historyOuts, indexIn, historyIn)
   410  			return false
   411  		}(); stop {
   412  			break
   413  		}
   414  	}
   415  
   416  	hc := h.MakeContext()
   417  	defer hc.Close()
   418  	err = hc.BuildOptionalMissedIndices(ctx)
   419  	require.NoError(err)
   420  
   421  	err = tx.Commit()
   422  	require.NoError(err)
   423  }
   424  
   425  func TestHistoryMergeFiles(t *testing.T) {
   426  	logger := log.New()
   427  	test := func(t *testing.T, h *History, db kv.RwDB, txs uint64) {
   428  		t.Helper()
   429  		collateAndMergeHistory(t, db, h, txs)
   430  		checkHistoryHistory(t, h, txs)
   431  	}
   432  
   433  	t.Run("large_values", func(t *testing.T) {
   434  		_, db, h, txs := filledHistory(t, true, logger)
   435  		test(t, h, db, txs)
   436  	})
   437  	t.Run("small_values", func(t *testing.T) {
   438  		_, db, h, txs := filledHistory(t, false, logger)
   439  		test(t, h, db, txs)
   440  	})
   441  }
   442  
   443  func TestHistoryScanFiles(t *testing.T) {
   444  	logger := log.New()
   445  	logEvery := time.NewTicker(30 * time.Second)
   446  	defer logEvery.Stop()
   447  	test := func(t *testing.T, h *History, db kv.RwDB, txs uint64) {
   448  		t.Helper()
   449  		require := require.New(t)
   450  
   451  		collateAndMergeHistory(t, db, h, txs)
   452  		// Recreate domain and re-scan the files
   453  		txNum := h.txNum
   454  		require.NoError(h.OpenFolder())
   455  		h.SetTxNum(txNum)
   456  		// Check the history
   457  		checkHistoryHistory(t, h, txs)
   458  	}
   459  
   460  	t.Run("large_values", func(t *testing.T) {
   461  		_, db, h, txs := filledHistory(t, true, logger)
   462  		test(t, h, db, txs)
   463  	})
   464  	t.Run("small_values", func(t *testing.T) {
   465  		_, db, h, txs := filledHistory(t, false, logger)
   466  		test(t, h, db, txs)
   467  	})
   468  }
   469  
   470  func TestIterateChanged(t *testing.T) {
   471  	logger := log.New()
   472  	logEvery := time.NewTicker(30 * time.Second)
   473  	defer logEvery.Stop()
   474  	ctx := context.Background()
   475  
   476  	test := func(t *testing.T, h *History, db kv.RwDB, txs uint64) {
   477  		t.Helper()
   478  		require := require.New(t)
   479  
   480  		collateAndMergeHistory(t, db, h, txs)
   481  
   482  		tx, err := db.BeginRo(ctx)
   483  		require.NoError(err)
   484  		defer tx.Rollback()
   485  		var keys, vals []string
   486  		ic := h.MakeContext()
   487  		defer ic.Close()
   488  
   489  		it, err := ic.HistoryRange(2, 20, order.Asc, -1, tx)
   490  		require.NoError(err)
   491  		for it.HasNext() {
   492  			k, v, err := it.Next()
   493  			require.NoError(err)
   494  			keys = append(keys, fmt.Sprintf("%x", k))
   495  			vals = append(vals, fmt.Sprintf("%x", v))
   496  		}
   497  		require.Equal([]string{
   498  			"0100000000000001",
   499  			"0100000000000002",
   500  			"0100000000000003",
   501  			"0100000000000004",
   502  			"0100000000000005",
   503  			"0100000000000006",
   504  			"0100000000000007",
   505  			"0100000000000008",
   506  			"0100000000000009",
   507  			"010000000000000a",
   508  			"010000000000000b",
   509  			"010000000000000c",
   510  			"010000000000000d",
   511  			"010000000000000e",
   512  			"010000000000000f",
   513  			"0100000000000010",
   514  			"0100000000000011",
   515  			"0100000000000012",
   516  			"0100000000000013"}, keys)
   517  		require.Equal([]string{
   518  			"ff00000000000001",
   519  			"",
   520  			"",
   521  			"",
   522  			"",
   523  			"",
   524  			"",
   525  			"",
   526  			"",
   527  			"",
   528  			"",
   529  			"",
   530  			"",
   531  			"",
   532  			"",
   533  			"",
   534  			"",
   535  			"",
   536  			""}, vals)
   537  		it, err = ic.HistoryRange(995, 1000, order.Asc, -1, tx)
   538  		require.NoError(err)
   539  		keys, vals = keys[:0], vals[:0]
   540  		for it.HasNext() {
   541  			k, v, err := it.Next()
   542  			require.NoError(err)
   543  			keys = append(keys, fmt.Sprintf("%x", k))
   544  			vals = append(vals, fmt.Sprintf("%x", v))
   545  		}
   546  		require.Equal([]string{
   547  			"0100000000000001",
   548  			"0100000000000002",
   549  			"0100000000000003",
   550  			"0100000000000004",
   551  			"0100000000000005",
   552  			"0100000000000006",
   553  			"0100000000000009",
   554  			"010000000000000c",
   555  			"010000000000001b",
   556  		}, keys)
   557  
   558  		require.Equal([]string{
   559  			"ff000000000003e2",
   560  			"ff000000000001f1",
   561  			"ff0000000000014b",
   562  			"ff000000000000f8",
   563  			"ff000000000000c6",
   564  			"ff000000000000a5",
   565  			"ff0000000000006e",
   566  			"ff00000000000052",
   567  			"ff00000000000024"}, vals)
   568  
   569  		// no upper bound
   570  		it, err = ic.HistoryRange(995, -1, order.Asc, -1, tx)
   571  		require.NoError(err)
   572  		keys, vals = keys[:0], vals[:0]
   573  		for it.HasNext() {
   574  			k, v, err := it.Next()
   575  			require.NoError(err)
   576  			keys = append(keys, fmt.Sprintf("%x", k))
   577  			vals = append(vals, fmt.Sprintf("%x", v))
   578  		}
   579  		require.Equal([]string{"0100000000000001", "0100000000000002", "0100000000000003", "0100000000000004", "0100000000000005", "0100000000000006", "0100000000000008", "0100000000000009", "010000000000000a", "010000000000000c", "0100000000000014", "0100000000000019", "010000000000001b"}, keys)
   580  		require.Equal([]string{"ff000000000003e2", "ff000000000001f1", "ff0000000000014b", "ff000000000000f8", "ff000000000000c6", "ff000000000000a5", "ff0000000000007c", "ff0000000000006e", "ff00000000000063", "ff00000000000052", "ff00000000000031", "ff00000000000027", "ff00000000000024"}, vals)
   581  
   582  		// no upper bound, limit=2
   583  		it, err = ic.HistoryRange(995, -1, order.Asc, 2, tx)
   584  		require.NoError(err)
   585  		keys, vals = keys[:0], vals[:0]
   586  		for it.HasNext() {
   587  			k, v, err := it.Next()
   588  			require.NoError(err)
   589  			keys = append(keys, fmt.Sprintf("%x", k))
   590  			vals = append(vals, fmt.Sprintf("%x", v))
   591  		}
   592  		require.Equal([]string{"0100000000000001", "0100000000000002"}, keys)
   593  		require.Equal([]string{"ff000000000003e2", "ff000000000001f1"}, vals)
   594  
   595  		// no lower bound, limit=2
   596  		it, err = ic.HistoryRange(-1, 1000, order.Asc, 2, tx)
   597  		require.NoError(err)
   598  		keys, vals = keys[:0], vals[:0]
   599  		for it.HasNext() {
   600  			k, v, err := it.Next()
   601  			require.NoError(err)
   602  			keys = append(keys, fmt.Sprintf("%x", k))
   603  			vals = append(vals, fmt.Sprintf("%x", v))
   604  		}
   605  		require.Equal([]string{"0100000000000001", "0100000000000002"}, keys)
   606  		require.Equal([]string{"ff000000000003cf", "ff000000000001e7"}, vals)
   607  	}
   608  	t.Run("large_values", func(t *testing.T) {
   609  		_, db, h, txs := filledHistory(t, true, logger)
   610  		test(t, h, db, txs)
   611  	})
   612  	t.Run("small_values", func(t *testing.T) {
   613  		_, db, h, txs := filledHistory(t, false, logger)
   614  		test(t, h, db, txs)
   615  	})
   616  }
   617  
   618  func TestIterateChanged2(t *testing.T) {
   619  	logger := log.New()
   620  	logEvery := time.NewTicker(30 * time.Second)
   621  	defer logEvery.Stop()
   622  	ctx := context.Background()
   623  
   624  	test := func(t *testing.T, h *History, db kv.RwDB, txs uint64) {
   625  		t.Helper()
   626  		roTx, err := db.BeginRo(ctx)
   627  		require.NoError(t, err)
   628  		defer roTx.Rollback()
   629  
   630  		type testCase struct {
   631  			k, v  string
   632  			txNum uint64
   633  		}
   634  		testCases := []testCase{
   635  			{txNum: 0, k: "0100000000000001", v: ""},
   636  			{txNum: 900, k: "0100000000000001", v: "ff00000000000383"},
   637  			{txNum: 1000, k: "0100000000000001", v: "ff000000000003e7"},
   638  		}
   639  		var keys, vals []string
   640  		t.Run("before merge", func(t *testing.T) {
   641  			hc, require := h.MakeContext(), require.New(t)
   642  			defer hc.Close()
   643  
   644  			it, err := hc.HistoryRange(2, 20, order.Asc, -1, roTx)
   645  			require.NoError(err)
   646  			for it.HasNext() {
   647  				k, v, err := it.Next()
   648  				require.NoError(err)
   649  				keys = append(keys, fmt.Sprintf("%x", k))
   650  				vals = append(vals, fmt.Sprintf("%x", v))
   651  			}
   652  			require.NoError(err)
   653  			require.Equal([]string{
   654  				"0100000000000001",
   655  				"0100000000000002",
   656  				"0100000000000003",
   657  				"0100000000000004",
   658  				"0100000000000005",
   659  				"0100000000000006",
   660  				"0100000000000007",
   661  				"0100000000000008",
   662  				"0100000000000009",
   663  				"010000000000000a",
   664  				"010000000000000b",
   665  				"010000000000000c",
   666  				"010000000000000d",
   667  				"010000000000000e",
   668  				"010000000000000f",
   669  				"0100000000000010",
   670  				"0100000000000011",
   671  				"0100000000000012",
   672  				"0100000000000013"}, keys)
   673  			require.Equal([]string{
   674  				"ff00000000000001",
   675  				"",
   676  				"",
   677  				"",
   678  				"",
   679  				"",
   680  				"",
   681  				"",
   682  				"",
   683  				"",
   684  				"",
   685  				"",
   686  				"",
   687  				"",
   688  				"",
   689  				"",
   690  				"",
   691  				"",
   692  				""}, vals)
   693  			keys, vals = keys[:0], vals[:0]
   694  
   695  			it, err = hc.HistoryRange(995, 1000, order.Asc, -1, roTx)
   696  			require.NoError(err)
   697  			for it.HasNext() {
   698  				k, v, err := it.Next()
   699  				require.NoError(err)
   700  				keys = append(keys, fmt.Sprintf("%x", k))
   701  				vals = append(vals, fmt.Sprintf("%x", v))
   702  			}
   703  			require.NoError(err)
   704  			require.Equal([]string{
   705  				"0100000000000001",
   706  				"0100000000000002",
   707  				"0100000000000003",
   708  				"0100000000000004",
   709  				"0100000000000005",
   710  				"0100000000000006",
   711  				"0100000000000009",
   712  				"010000000000000c",
   713  				"010000000000001b",
   714  			}, keys)
   715  
   716  			require.Equal([]string{
   717  				"ff000000000003e2",
   718  				"ff000000000001f1",
   719  				"ff0000000000014b",
   720  				"ff000000000000f8",
   721  				"ff000000000000c6",
   722  				"ff000000000000a5",
   723  				"ff0000000000006e",
   724  				"ff00000000000052",
   725  				"ff00000000000024"}, vals)
   726  
   727  			// single Get test-cases
   728  			tx, err := db.BeginRo(ctx)
   729  			require.NoError(err)
   730  			defer tx.Rollback()
   731  
   732  			v, ok, err := hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 900, tx)
   733  			require.NoError(err)
   734  			require.True(ok)
   735  			require.Equal(hexutility.MustDecodeHex("ff00000000000383"), v)
   736  			v, ok, err = hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 0, tx)
   737  			require.NoError(err)
   738  			require.True(ok)
   739  			require.Equal([]byte{}, v)
   740  			v, ok, err = hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 1000, tx)
   741  			require.NoError(err)
   742  			require.True(ok)
   743  			require.Equal(hexutility.MustDecodeHex("ff000000000003e7"), v)
   744  			_ = testCases
   745  		})
   746  		t.Run("after merge", func(t *testing.T) {
   747  			collateAndMergeHistory(t, db, h, txs)
   748  			hc, require := h.MakeContext(), require.New(t)
   749  			defer hc.Close()
   750  
   751  			keys = keys[:0]
   752  			it, err := hc.HistoryRange(2, 20, order.Asc, -1, roTx)
   753  			require.NoError(err)
   754  			for it.HasNext() {
   755  				k, _, err := it.Next()
   756  				require.NoError(err)
   757  				keys = append(keys, fmt.Sprintf("%x", k))
   758  			}
   759  			require.NoError(err)
   760  			require.Equal([]string{
   761  				"0100000000000001",
   762  				"0100000000000002",
   763  				"0100000000000003",
   764  				"0100000000000004",
   765  				"0100000000000005",
   766  				"0100000000000006",
   767  				"0100000000000007",
   768  				"0100000000000008",
   769  				"0100000000000009",
   770  				"010000000000000a",
   771  				"010000000000000b",
   772  				"010000000000000c",
   773  				"010000000000000d",
   774  				"010000000000000e",
   775  				"010000000000000f",
   776  				"0100000000000010",
   777  				"0100000000000011",
   778  				"0100000000000012",
   779  				"0100000000000013"}, keys)
   780  
   781  			// single Get test-cases
   782  			tx, err := db.BeginRo(ctx)
   783  			require.NoError(err)
   784  			defer tx.Rollback()
   785  
   786  			v, ok, err := hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 900, tx)
   787  			require.NoError(err)
   788  			require.True(ok)
   789  			require.Equal(hexutility.MustDecodeHex("ff00000000000383"), v)
   790  			v, ok, err = hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 0, tx)
   791  			require.NoError(err)
   792  			require.True(ok)
   793  			require.Equal([]byte{}, v)
   794  			v, ok, err = hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 1000, tx)
   795  			require.NoError(err)
   796  			require.True(ok)
   797  			require.Equal(hexutility.MustDecodeHex("ff000000000003e7"), v)
   798  		})
   799  	}
   800  	t.Run("large_values", func(t *testing.T) {
   801  		_, db, h, txs := filledHistory(t, true, logger)
   802  		test(t, h, db, txs)
   803  	})
   804  	t.Run("small_values", func(t *testing.T) {
   805  		_, db, h, txs := filledHistory(t, false, logger)
   806  		test(t, h, db, txs)
   807  	})
   808  }
   809  
   810  func TestScanStaticFilesH(t *testing.T) {
   811  	logger := log.New()
   812  	h := &History{InvertedIndex: &InvertedIndex{filenameBase: "test", aggregationStep: 1, logger: logger},
   813  		files:  btree2.NewBTreeG[*filesItem](filesItemLess),
   814  		logger: logger,
   815  	}
   816  	files := []string{
   817  		"test.0-1.v",
   818  		"test.1-2.v",
   819  		"test.0-4.v",
   820  		"test.2-3.v",
   821  		"test.3-4.v",
   822  		"test.4-5.v",
   823  	}
   824  	h.scanStateFiles(files)
   825  	require.Equal(t, 6, h.files.Len())
   826  
   827  	h.files.Clear()
   828  	h.integrityFileExtensions = []string{"kv"}
   829  	h.scanStateFiles(files)
   830  	require.Equal(t, 0, h.files.Len())
   831  
   832  }