github.com/ledgerwatch/erigon-lib@v1.0.0/state/domain_test.go (about)

     1  /*
     2     Copyright 2022 Erigon contributors
     3  
     4     Licensed under the Apache License, VerSsion 2.0 (the "License");
     5     you may not use this file except in compliance with the License.
     6     You may obtain a copy of the License at
     7  
     8         http://www.apache.org/licenses/LICENSE-2.0
     9  
    10     Unless required by applicable law or agreed to in writing, software
    11     distributed under the License is distributed on an "AS IS" BASIS,
    12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13     See the License for the specific language governing permissions and
    14     limitations under the License.
    15  */
    16  
    17  package state
    18  
    19  import (
    20  	"context"
    21  	"encoding/binary"
    22  	"fmt"
    23  	"math"
    24  	"os"
    25  	"strings"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/ledgerwatch/erigon-lib/common/background"
    30  	"github.com/ledgerwatch/log/v3"
    31  	"github.com/stretchr/testify/require"
    32  	btree2 "github.com/tidwall/btree"
    33  
    34  	"github.com/ledgerwatch/erigon-lib/kv"
    35  	"github.com/ledgerwatch/erigon-lib/kv/mdbx"
    36  	"github.com/ledgerwatch/erigon-lib/recsplit"
    37  )
    38  
    39  func testDbAndDomain(t *testing.T, logger log.Logger) (string, kv.RwDB, *Domain) {
    40  	t.Helper()
    41  	path := t.TempDir()
    42  	keysTable := "Keys"
    43  	valsTable := "Vals"
    44  	historyKeysTable := "HistoryKeys"
    45  	historyValsTable := "HistoryVals"
    46  	settingsTable := "Settings"
    47  	indexTable := "Index"
    48  	db := mdbx.NewMDBX(logger).InMem(path).WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg {
    49  		return kv.TableCfg{
    50  			keysTable:        kv.TableCfgItem{Flags: kv.DupSort},
    51  			valsTable:        kv.TableCfgItem{},
    52  			historyKeysTable: kv.TableCfgItem{Flags: kv.DupSort},
    53  			historyValsTable: kv.TableCfgItem{Flags: kv.DupSort},
    54  			settingsTable:    kv.TableCfgItem{},
    55  			indexTable:       kv.TableCfgItem{Flags: kv.DupSort},
    56  		}
    57  	}).MustOpen()
    58  	t.Cleanup(db.Close)
    59  	d, err := NewDomain(path, path, 16, "base", keysTable, valsTable, historyKeysTable, historyValsTable, indexTable, true, false, logger)
    60  	require.NoError(t, err)
    61  	t.Cleanup(d.Close)
    62  	d.DisableFsync()
    63  	return path, db, d
    64  }
    65  
    66  // btree index should work correctly if K < m
    67  func TestCollationBuild(t *testing.T) {
    68  	logger := log.New()
    69  	logEvery := time.NewTicker(30 * time.Second)
    70  	defer logEvery.Stop()
    71  	_, db, d := testDbAndDomain(t, logger)
    72  	ctx := context.Background()
    73  	defer d.Close()
    74  
    75  	tx, err := db.BeginRw(ctx)
    76  	require.NoError(t, err)
    77  	defer tx.Rollback()
    78  	d.SetTx(tx)
    79  	d.StartWrites()
    80  	defer d.FinishWrites()
    81  
    82  	d.SetTxNum(2)
    83  	err = d.Put([]byte("key1"), nil, []byte("value1.1"))
    84  	require.NoError(t, err)
    85  
    86  	d.SetTxNum(3)
    87  	err = d.Put([]byte("key2"), nil, []byte("value2.1"))
    88  	require.NoError(t, err)
    89  
    90  	d.SetTxNum(6)
    91  	err = d.Put([]byte("key1"), nil, []byte("value1.2"))
    92  	require.NoError(t, err)
    93  
    94  	err = d.Rotate().Flush(ctx, tx)
    95  	require.NoError(t, err)
    96  
    97  	c, err := d.collate(ctx, 0, 0, 7, tx, logEvery)
    98  
    99  	require.NoError(t, err)
   100  	require.True(t, strings.HasSuffix(c.valuesPath, "base.0-1.kv"))
   101  	require.Equal(t, 2, c.valuesCount)
   102  	require.True(t, strings.HasSuffix(c.historyPath, "base.0-1.v"))
   103  	require.Equal(t, 3, c.historyCount)
   104  	require.Equal(t, 2, len(c.indexBitmaps))
   105  	require.Equal(t, []uint64{3}, c.indexBitmaps["key2"].ToArray())
   106  	require.Equal(t, []uint64{2, 6}, c.indexBitmaps["key1"].ToArray())
   107  
   108  	sf, err := d.buildFiles(ctx, 0, c, background.NewProgressSet())
   109  	require.NoError(t, err)
   110  	defer sf.Close()
   111  	c.Close()
   112  
   113  	g := sf.valuesDecomp.MakeGetter()
   114  	g.Reset(0)
   115  	var words []string
   116  	for g.HasNext() {
   117  		w, _ := g.Next(nil)
   118  		words = append(words, string(w))
   119  	}
   120  	require.Equal(t, []string{"key1", "value1.2", "key2", "value2.1"}, words)
   121  	// Check index
   122  	require.Equal(t, 2, int(sf.valuesIdx.KeyCount()))
   123  
   124  	r := recsplit.NewIndexReader(sf.valuesIdx)
   125  	defer r.Close()
   126  	for i := 0; i < len(words); i += 2 {
   127  		offset := r.Lookup([]byte(words[i]))
   128  		g.Reset(offset)
   129  		w, _ := g.Next(nil)
   130  		require.Equal(t, words[i], string(w))
   131  		w, _ = g.Next(nil)
   132  		require.Equal(t, words[i+1], string(w))
   133  	}
   134  }
   135  
   136  func TestIterationBasic(t *testing.T) {
   137  	logger := log.New()
   138  	_, db, d := testDbAndDomain(t, logger)
   139  	ctx := context.Background()
   140  	tx, err := db.BeginRw(ctx)
   141  	require.NoError(t, err)
   142  	defer tx.Rollback()
   143  	d.SetTx(tx)
   144  	d.StartWrites()
   145  	defer d.FinishWrites()
   146  
   147  	d.SetTxNum(2)
   148  	err = d.Put([]byte("addr1"), []byte("loc1"), []byte("value1"))
   149  	require.NoError(t, err)
   150  	err = d.Put([]byte("addr1"), []byte("loc2"), []byte("value1"))
   151  	require.NoError(t, err)
   152  	err = d.Put([]byte("addr1"), []byte("loc3"), []byte("value1"))
   153  	require.NoError(t, err)
   154  	err = d.Put([]byte("addr2"), []byte("loc1"), []byte("value1"))
   155  	require.NoError(t, err)
   156  	err = d.Put([]byte("addr2"), []byte("loc2"), []byte("value1"))
   157  	require.NoError(t, err)
   158  	err = d.Put([]byte("addr3"), []byte("loc1"), []byte("value1"))
   159  	require.NoError(t, err)
   160  	err = d.Put([]byte("addr3"), []byte("loc2"), []byte("value1"))
   161  	require.NoError(t, err)
   162  
   163  	var keys, vals []string
   164  	dc := d.MakeContext()
   165  	defer dc.Close()
   166  	err = dc.IteratePrefix([]byte("addr2"), func(k, v []byte) {
   167  		keys = append(keys, string(k))
   168  		vals = append(vals, string(v))
   169  	})
   170  	require.NoError(t, err)
   171  	require.Equal(t, []string{"addr2loc1", "addr2loc2"}, keys)
   172  	require.Equal(t, []string{"value1", "value1"}, vals)
   173  }
   174  
   175  func TestAfterPrune(t *testing.T) {
   176  	logger := log.New()
   177  	logEvery := time.NewTicker(30 * time.Second)
   178  	defer logEvery.Stop()
   179  	_, db, d := testDbAndDomain(t, logger)
   180  	ctx := context.Background()
   181  
   182  	tx, err := db.BeginRw(ctx)
   183  	require.NoError(t, err)
   184  	defer tx.Rollback()
   185  	d.SetTx(tx)
   186  	d.StartWrites()
   187  	defer d.FinishWrites()
   188  
   189  	d.SetTxNum(2)
   190  	err = d.Put([]byte("key1"), nil, []byte("value1.1"))
   191  	require.NoError(t, err)
   192  
   193  	d.SetTxNum(3)
   194  	err = d.Put([]byte("key2"), nil, []byte("value2.1"))
   195  	require.NoError(t, err)
   196  
   197  	d.SetTxNum(6)
   198  	err = d.Put([]byte("key1"), nil, []byte("value1.2"))
   199  	require.NoError(t, err)
   200  
   201  	d.SetTxNum(17)
   202  	err = d.Put([]byte("key1"), nil, []byte("value1.3"))
   203  	require.NoError(t, err)
   204  
   205  	d.SetTxNum(18)
   206  	err = d.Put([]byte("key2"), nil, []byte("value2.2"))
   207  	require.NoError(t, err)
   208  
   209  	err = d.Rotate().Flush(ctx, tx)
   210  	require.NoError(t, err)
   211  
   212  	c, err := d.collate(ctx, 0, 0, 16, tx, logEvery)
   213  	require.NoError(t, err)
   214  
   215  	sf, err := d.buildFiles(ctx, 0, c, background.NewProgressSet())
   216  	require.NoError(t, err)
   217  
   218  	d.integrateFiles(sf, 0, 16)
   219  	var v []byte
   220  	dc := d.MakeContext()
   221  	defer dc.Close()
   222  	v, err = dc.Get([]byte("key1"), nil, tx)
   223  	require.NoError(t, err)
   224  	require.Equal(t, []byte("value1.3"), v)
   225  	v, err = dc.Get([]byte("key2"), nil, tx)
   226  	require.NoError(t, err)
   227  	require.Equal(t, []byte("value2.2"), v)
   228  
   229  	err = d.prune(ctx, 0, 0, 16, math.MaxUint64, logEvery)
   230  	require.NoError(t, err)
   231  
   232  	isEmpty, err := d.isEmpty(tx)
   233  	require.NoError(t, err)
   234  	require.False(t, isEmpty)
   235  
   236  	v, err = dc.Get([]byte("key1"), nil, tx)
   237  	require.NoError(t, err)
   238  	require.Equal(t, []byte("value1.3"), v)
   239  	v, err = dc.Get([]byte("key2"), nil, tx)
   240  	require.NoError(t, err)
   241  	require.Equal(t, []byte("value2.2"), v)
   242  }
   243  
   244  func filledDomain(t *testing.T, logger log.Logger) (string, kv.RwDB, *Domain, uint64) {
   245  	t.Helper()
   246  	path, db, d := testDbAndDomain(t, logger)
   247  	ctx := context.Background()
   248  	tx, err := db.BeginRw(ctx)
   249  	require.NoError(t, err)
   250  	defer tx.Rollback()
   251  	d.SetTx(tx)
   252  	d.StartWrites()
   253  	defer d.FinishWrites()
   254  
   255  	txs := uint64(1000)
   256  	// keys are encodings of numbers 1..31
   257  	// each key changes value on every txNum which is multiple of the key
   258  	for txNum := uint64(1); txNum <= txs; txNum++ {
   259  		d.SetTxNum(txNum)
   260  		for keyNum := uint64(1); keyNum <= uint64(31); keyNum++ {
   261  			if txNum%keyNum == 0 {
   262  				valNum := txNum / keyNum
   263  				var k [8]byte
   264  				var v [8]byte
   265  				binary.BigEndian.PutUint64(k[:], keyNum)
   266  				binary.BigEndian.PutUint64(v[:], valNum)
   267  				err = d.Put(k[:], nil, v[:])
   268  				require.NoError(t, err)
   269  			}
   270  		}
   271  		if txNum%10 == 0 {
   272  			err = d.Rotate().Flush(ctx, tx)
   273  			require.NoError(t, err)
   274  		}
   275  	}
   276  	err = d.Rotate().Flush(ctx, tx)
   277  	require.NoError(t, err)
   278  	err = tx.Commit()
   279  	require.NoError(t, err)
   280  	return path, db, d, txs
   281  }
   282  
   283  func checkHistory(t *testing.T, db kv.RwDB, d *Domain, txs uint64) {
   284  	t.Helper()
   285  	ctx := context.Background()
   286  	var err error
   287  	// Check the history
   288  	var roTx kv.Tx
   289  	dc := d.MakeContext()
   290  	defer dc.Close()
   291  	for txNum := uint64(0); txNum <= txs; txNum++ {
   292  		if txNum == 976 {
   293  			// Create roTx obnly for the last several txNum, because all history before that
   294  			// we should be able to read without any DB access
   295  			roTx, err = db.BeginRo(ctx)
   296  			require.NoError(t, err)
   297  			defer roTx.Rollback()
   298  		}
   299  		for keyNum := uint64(1); keyNum <= uint64(31); keyNum++ {
   300  			valNum := txNum / keyNum
   301  			var k [8]byte
   302  			var v [8]byte
   303  			label := fmt.Sprintf("txNum=%d, keyNum=%d", txNum, keyNum)
   304  			binary.BigEndian.PutUint64(k[:], keyNum)
   305  			binary.BigEndian.PutUint64(v[:], valNum)
   306  			val, err := dc.GetBeforeTxNum(k[:], txNum+1, roTx)
   307  			require.NoError(t, err, label)
   308  			if txNum >= keyNum {
   309  				require.Equal(t, v[:], val, label)
   310  			} else {
   311  				require.Nil(t, val, label)
   312  			}
   313  			if txNum == txs {
   314  				val, err := dc.Get(k[:], nil, roTx)
   315  				require.NoError(t, err)
   316  				require.EqualValues(t, v[:], val)
   317  			}
   318  		}
   319  	}
   320  }
   321  
   322  func TestHistory(t *testing.T) {
   323  	logger := log.New()
   324  	logEvery := time.NewTicker(30 * time.Second)
   325  	defer logEvery.Stop()
   326  	_, db, d, txs := filledDomain(t, logger)
   327  	ctx := context.Background()
   328  	tx, err := db.BeginRw(ctx)
   329  	require.NoError(t, err)
   330  	d.SetTx(tx)
   331  	defer tx.Rollback()
   332  
   333  	// Leave the last 2 aggregation steps un-collated
   334  	for step := uint64(0); step < txs/d.aggregationStep-1; step++ {
   335  		func() {
   336  			c, err := d.collate(ctx, step, step*d.aggregationStep, (step+1)*d.aggregationStep, tx, logEvery)
   337  			require.NoError(t, err)
   338  			sf, err := d.buildFiles(ctx, step, c, background.NewProgressSet())
   339  			require.NoError(t, err)
   340  			d.integrateFiles(sf, step*d.aggregationStep, (step+1)*d.aggregationStep)
   341  
   342  			err = d.prune(ctx, step, step*d.aggregationStep, (step+1)*d.aggregationStep, math.MaxUint64, logEvery)
   343  			require.NoError(t, err)
   344  		}()
   345  	}
   346  	err = tx.Commit()
   347  	require.NoError(t, err)
   348  	checkHistory(t, db, d, txs)
   349  }
   350  
   351  func TestIterationMultistep(t *testing.T) {
   352  	logger := log.New()
   353  	logEvery := time.NewTicker(30 * time.Second)
   354  	defer logEvery.Stop()
   355  	_, db, d := testDbAndDomain(t, logger)
   356  	ctx := context.Background()
   357  	tx, err := db.BeginRw(ctx)
   358  	require.NoError(t, err)
   359  	defer tx.Rollback()
   360  	d.SetTx(tx)
   361  	d.StartWrites()
   362  	defer d.FinishWrites()
   363  
   364  	d.SetTxNum(2)
   365  	err = d.Put([]byte("addr1"), []byte("loc1"), []byte("value1"))
   366  	require.NoError(t, err)
   367  	err = d.Put([]byte("addr1"), []byte("loc2"), []byte("value1"))
   368  	require.NoError(t, err)
   369  	err = d.Put([]byte("addr1"), []byte("loc3"), []byte("value1"))
   370  	require.NoError(t, err)
   371  	err = d.Put([]byte("addr2"), []byte("loc1"), []byte("value1"))
   372  	require.NoError(t, err)
   373  	err = d.Put([]byte("addr2"), []byte("loc2"), []byte("value1"))
   374  	require.NoError(t, err)
   375  	err = d.Put([]byte("addr3"), []byte("loc1"), []byte("value1"))
   376  	require.NoError(t, err)
   377  	err = d.Put([]byte("addr3"), []byte("loc2"), []byte("value1"))
   378  	require.NoError(t, err)
   379  
   380  	d.SetTxNum(2 + 16)
   381  	err = d.Put([]byte("addr2"), []byte("loc1"), []byte("value1"))
   382  	require.NoError(t, err)
   383  	err = d.Put([]byte("addr2"), []byte("loc2"), []byte("value1"))
   384  	require.NoError(t, err)
   385  	err = d.Put([]byte("addr2"), []byte("loc3"), []byte("value1"))
   386  	require.NoError(t, err)
   387  	err = d.Put([]byte("addr2"), []byte("loc4"), []byte("value1"))
   388  	require.NoError(t, err)
   389  
   390  	d.SetTxNum(2 + 16 + 16)
   391  	err = d.Delete([]byte("addr2"), []byte("loc1"))
   392  	require.NoError(t, err)
   393  
   394  	err = d.Rotate().Flush(ctx, tx)
   395  	require.NoError(t, err)
   396  
   397  	for step := uint64(0); step <= 2; step++ {
   398  		func() {
   399  			c, err := d.collate(ctx, step, step*d.aggregationStep, (step+1)*d.aggregationStep, tx, logEvery)
   400  			require.NoError(t, err)
   401  			sf, err := d.buildFiles(ctx, step, c, background.NewProgressSet())
   402  			require.NoError(t, err)
   403  			d.integrateFiles(sf, step*d.aggregationStep, (step+1)*d.aggregationStep)
   404  			err = d.prune(ctx, step, step*d.aggregationStep, (step+1)*d.aggregationStep, math.MaxUint64, logEvery)
   405  			require.NoError(t, err)
   406  		}()
   407  	}
   408  
   409  	var keys []string
   410  	var vals []string
   411  	dc := d.MakeContext()
   412  	defer dc.Close()
   413  	err = dc.IteratePrefix([]byte("addr2"), func(k, v []byte) {
   414  		keys = append(keys, string(k))
   415  		vals = append(vals, string(v))
   416  	})
   417  	require.NoError(t, err)
   418  	require.Equal(t, []string{"addr2loc2", "addr2loc3", "addr2loc4"}, keys)
   419  	require.Equal(t, []string{"value1", "value1", "value1"}, vals)
   420  }
   421  
   422  func collateAndMerge(t *testing.T, db kv.RwDB, tx kv.RwTx, d *Domain, txs uint64) {
   423  	t.Helper()
   424  
   425  	logEvery := time.NewTicker(30 * time.Second)
   426  	defer logEvery.Stop()
   427  	ctx := context.Background()
   428  	var err error
   429  	useExternalTx := tx != nil
   430  	if !useExternalTx {
   431  		tx, err = db.BeginRw(ctx)
   432  		require.NoError(t, err)
   433  		defer tx.Rollback()
   434  	}
   435  	d.SetTx(tx)
   436  	// Leave the last 2 aggregation steps un-collated
   437  	for step := uint64(0); step < txs/d.aggregationStep-1; step++ {
   438  		c, err := d.collate(ctx, step, step*d.aggregationStep, (step+1)*d.aggregationStep, tx, logEvery)
   439  		require.NoError(t, err)
   440  		sf, err := d.buildFiles(ctx, step, c, background.NewProgressSet())
   441  		require.NoError(t, err)
   442  		d.integrateFiles(sf, step*d.aggregationStep, (step+1)*d.aggregationStep)
   443  		err = d.prune(ctx, step, step*d.aggregationStep, (step+1)*d.aggregationStep, math.MaxUint64, logEvery)
   444  		require.NoError(t, err)
   445  	}
   446  	var r DomainRanges
   447  	maxEndTxNum := d.endTxNumMinimax()
   448  	maxSpan := d.aggregationStep * StepsInBiggestFile
   449  
   450  	for {
   451  		if stop := func() bool {
   452  			dc := d.MakeContext()
   453  			defer dc.Close()
   454  			r = d.findMergeRange(maxEndTxNum, maxSpan)
   455  			if !r.any() {
   456  				return true
   457  			}
   458  			valuesOuts, indexOuts, historyOuts, _ := dc.staticFilesInRange(r)
   459  			valuesIn, indexIn, historyIn, err := d.mergeFiles(ctx, valuesOuts, indexOuts, historyOuts, r, 1, background.NewProgressSet())
   460  			require.NoError(t, err)
   461  			d.integrateMergedFiles(valuesOuts, indexOuts, historyOuts, valuesIn, indexIn, historyIn)
   462  			return false
   463  		}(); stop {
   464  			break
   465  		}
   466  	}
   467  	if !useExternalTx {
   468  		err := tx.Commit()
   469  		require.NoError(t, err)
   470  	}
   471  }
   472  
   473  func collateAndMergeOnce(t *testing.T, d *Domain, step uint64) {
   474  	t.Helper()
   475  	logEvery := time.NewTicker(30 * time.Second)
   476  	defer logEvery.Stop()
   477  	ctx := context.Background()
   478  	txFrom, txTo := (step)*d.aggregationStep, (step+1)*d.aggregationStep
   479  
   480  	c, err := d.collate(ctx, step, txFrom, txTo, d.tx, logEvery)
   481  	require.NoError(t, err)
   482  
   483  	sf, err := d.buildFiles(ctx, step, c, background.NewProgressSet())
   484  	require.NoError(t, err)
   485  	d.integrateFiles(sf, txFrom, txTo)
   486  
   487  	err = d.prune(ctx, step, txFrom, txTo, math.MaxUint64, logEvery)
   488  	require.NoError(t, err)
   489  
   490  	var r DomainRanges
   491  	maxEndTxNum := d.endTxNumMinimax()
   492  	maxSpan := d.aggregationStep * StepsInBiggestFile
   493  	for r = d.findMergeRange(maxEndTxNum, maxSpan); r.any(); r = d.findMergeRange(maxEndTxNum, maxSpan) {
   494  		dc := d.MakeContext()
   495  		valuesOuts, indexOuts, historyOuts, _ := dc.staticFilesInRange(r)
   496  		valuesIn, indexIn, historyIn, err := d.mergeFiles(ctx, valuesOuts, indexOuts, historyOuts, r, 1, background.NewProgressSet())
   497  		require.NoError(t, err)
   498  
   499  		d.integrateMergedFiles(valuesOuts, indexOuts, historyOuts, valuesIn, indexIn, historyIn)
   500  		dc.Close()
   501  	}
   502  }
   503  
   504  func TestDomain_MergeFiles(t *testing.T) {
   505  	logger := log.New()
   506  	_, db, d, txs := filledDomain(t, logger)
   507  
   508  	collateAndMerge(t, db, nil, d, txs)
   509  	checkHistory(t, db, d, txs)
   510  }
   511  
   512  func TestDomain_ScanFiles(t *testing.T) {
   513  	logger := log.New()
   514  	path, db, d, txs := filledDomain(t, logger)
   515  	_ = path
   516  	collateAndMerge(t, db, nil, d, txs)
   517  	// Recreate domain and re-scan the files
   518  	txNum := d.txNum
   519  	d.closeWhatNotInList([]string{})
   520  	d.OpenFolder()
   521  
   522  	d.SetTxNum(txNum)
   523  	// Check the history
   524  	checkHistory(t, db, d, txs)
   525  }
   526  
   527  func TestDomain_Delete(t *testing.T) {
   528  	logger := log.New()
   529  	_, db, d := testDbAndDomain(t, logger)
   530  	ctx, require := context.Background(), require.New(t)
   531  	tx, err := db.BeginRw(ctx)
   532  	require.NoError(err)
   533  	defer tx.Rollback()
   534  	d.SetTx(tx)
   535  	d.StartWrites()
   536  	defer d.FinishWrites()
   537  
   538  	// Put on even txNum, delete on odd txNum
   539  	for txNum := uint64(0); txNum < uint64(1000); txNum++ {
   540  		d.SetTxNum(txNum)
   541  		if txNum%2 == 0 {
   542  			err = d.Put([]byte("key1"), nil, []byte("value1"))
   543  		} else {
   544  			err = d.Delete([]byte("key1"), nil)
   545  		}
   546  		require.NoError(err)
   547  	}
   548  	err = d.Rotate().Flush(ctx, tx)
   549  	require.NoError(err)
   550  	collateAndMerge(t, db, tx, d, 1000)
   551  	// Check the history
   552  	dc := d.MakeContext()
   553  	defer dc.Close()
   554  	for txNum := uint64(0); txNum < 1000; txNum++ {
   555  		label := fmt.Sprintf("txNum=%d", txNum)
   556  		//val, ok, err := dc.GetBeforeTxNum([]byte("key1"), txNum+1, tx)
   557  		//require.NoError(err)
   558  		//require.True(ok)
   559  		//if txNum%2 == 0 {
   560  		//	require.Equal([]byte("value1"), val, label)
   561  		//} else {
   562  		//	require.Nil(val, label)
   563  		//}
   564  		//if txNum == 976 {
   565  		val, err := dc.GetBeforeTxNum([]byte("key2"), txNum+1, tx)
   566  		require.NoError(err)
   567  		//require.False(ok, label)
   568  		require.Nil(val, label)
   569  		//}
   570  	}
   571  }
   572  
   573  func filledDomainFixedSize(t *testing.T, keysCount, txCount uint64, logger log.Logger) (string, kv.RwDB, *Domain, map[string][]bool) {
   574  	t.Helper()
   575  	path, db, d := testDbAndDomain(t, logger)
   576  	ctx := context.Background()
   577  	tx, err := db.BeginRw(ctx)
   578  	require.NoError(t, err)
   579  	defer tx.Rollback()
   580  	d.SetTx(tx)
   581  	d.StartWrites()
   582  	defer d.FinishWrites()
   583  
   584  	// keys are encodings of numbers 1..31
   585  	// each key changes value on every txNum which is multiple of the key
   586  	dat := make(map[string][]bool) // K:V is key -> list of bools. If list[i] == true, i'th txNum should persists
   587  
   588  	for txNum := uint64(1); txNum <= txCount; txNum++ {
   589  		d.SetTxNum(txNum)
   590  		for keyNum := uint64(1); keyNum <= keysCount; keyNum++ {
   591  			if keyNum == txNum%d.aggregationStep {
   592  				continue
   593  			}
   594  			var k [8]byte
   595  			var v [8]byte
   596  			binary.BigEndian.PutUint64(k[:], keyNum)
   597  			binary.BigEndian.PutUint64(v[:], txNum)
   598  			err = d.Put(k[:], nil, v[:])
   599  			require.NoError(t, err)
   600  
   601  			if _, ok := dat[fmt.Sprintf("%d", keyNum)]; !ok {
   602  				dat[fmt.Sprintf("%d", keyNum)] = make([]bool, txCount+1)
   603  			}
   604  			dat[fmt.Sprintf("%d", keyNum)][txNum] = true
   605  		}
   606  		if txNum%d.aggregationStep == 0 {
   607  			err = d.Rotate().Flush(ctx, tx)
   608  			require.NoError(t, err)
   609  		}
   610  	}
   611  	err = tx.Commit()
   612  	require.NoError(t, err)
   613  	return path, db, d, dat
   614  }
   615  
   616  // firstly we write all the data to domain
   617  // then we collate-merge-prune
   618  // then check.
   619  // in real life we periodically do collate-merge-prune without stopping adding data
   620  func TestDomain_Prune_AfterAllWrites(t *testing.T) {
   621  	logger := log.New()
   622  	keyCount, txCount := uint64(4), uint64(64)
   623  	_, db, dom, data := filledDomainFixedSize(t, keyCount, txCount, logger)
   624  
   625  	collateAndMerge(t, db, nil, dom, txCount)
   626  
   627  	ctx := context.Background()
   628  	roTx, err := db.BeginRo(ctx)
   629  	require.NoError(t, err)
   630  	defer roTx.Rollback()
   631  
   632  	// Check the history
   633  	dc := dom.MakeContext()
   634  	defer dc.Close()
   635  	for txNum := uint64(1); txNum <= txCount; txNum++ {
   636  		for keyNum := uint64(1); keyNum <= keyCount; keyNum++ {
   637  			var k [8]byte
   638  			var v [8]byte
   639  			label := fmt.Sprintf("txNum=%d, keyNum=%d\n", txNum, keyNum)
   640  			binary.BigEndian.PutUint64(k[:], keyNum)
   641  			binary.BigEndian.PutUint64(v[:], txNum)
   642  
   643  			val, err := dc.GetBeforeTxNum(k[:], txNum+1, roTx)
   644  			// during generation such keys are skipped so value should be nil for this call
   645  			require.NoError(t, err, label)
   646  			if !data[fmt.Sprintf("%d", keyNum)][txNum] {
   647  				if txNum > 1 {
   648  					binary.BigEndian.PutUint64(v[:], txNum-1)
   649  				} else {
   650  					require.Nil(t, val, label)
   651  					continue
   652  				}
   653  			}
   654  			require.EqualValues(t, v[:], val)
   655  		}
   656  	}
   657  
   658  	var v [8]byte
   659  	binary.BigEndian.PutUint64(v[:], txCount)
   660  
   661  	for keyNum := uint64(1); keyNum <= keyCount; keyNum++ {
   662  		var k [8]byte
   663  		label := fmt.Sprintf("txNum=%d, keyNum=%d\n", txCount, keyNum)
   664  		binary.BigEndian.PutUint64(k[:], keyNum)
   665  
   666  		storedV, err := dc.Get(k[:], nil, roTx)
   667  		require.NoError(t, err, label)
   668  		require.EqualValues(t, v[:], storedV, label)
   669  	}
   670  }
   671  
   672  func TestDomain_PruneOnWrite(t *testing.T) {
   673  	logger := log.New()
   674  	keysCount, txCount := uint64(16), uint64(64)
   675  
   676  	path, db, d := testDbAndDomain(t, logger)
   677  	ctx := context.Background()
   678  	defer os.Remove(path)
   679  
   680  	tx, err := db.BeginRw(ctx)
   681  	require.NoError(t, err)
   682  	defer tx.Rollback()
   683  	d.SetTx(tx)
   684  	d.StartWrites()
   685  	defer d.FinishWrites()
   686  
   687  	// keys are encodings of numbers 1..31
   688  	// each key changes value on every txNum which is multiple of the key
   689  	data := make(map[string][]uint64)
   690  
   691  	for txNum := uint64(1); txNum <= txCount; txNum++ {
   692  		d.SetTxNum(txNum)
   693  		for keyNum := uint64(1); keyNum <= keysCount; keyNum++ {
   694  			if keyNum == txNum%d.aggregationStep {
   695  				continue
   696  			}
   697  			var k [8]byte
   698  			var v [8]byte
   699  			binary.BigEndian.PutUint64(k[:], keyNum)
   700  			binary.BigEndian.PutUint64(v[:], txNum)
   701  			err = d.Put(k[:], nil, v[:])
   702  			require.NoError(t, err)
   703  
   704  			list, ok := data[fmt.Sprintf("%d", keyNum)]
   705  			if !ok {
   706  				data[fmt.Sprintf("%d", keyNum)] = make([]uint64, 0)
   707  			}
   708  			data[fmt.Sprintf("%d", keyNum)] = append(list, txNum)
   709  		}
   710  		if txNum%d.aggregationStep == 0 {
   711  			step := txNum/d.aggregationStep - 1
   712  			if step == 0 {
   713  				continue
   714  			}
   715  			step--
   716  			err = d.Rotate().Flush(ctx, tx)
   717  			require.NoError(t, err)
   718  
   719  			collateAndMergeOnce(t, d, step)
   720  		}
   721  	}
   722  	err = d.Rotate().Flush(ctx, tx)
   723  	require.NoError(t, err)
   724  
   725  	// Check the history
   726  	dc := d.MakeContext()
   727  	defer dc.Close()
   728  	for txNum := uint64(1); txNum <= txCount; txNum++ {
   729  		for keyNum := uint64(1); keyNum <= keysCount; keyNum++ {
   730  			valNum := txNum
   731  			var k [8]byte
   732  			var v [8]byte
   733  			label := fmt.Sprintf("txNum=%d, keyNum=%d\n", txNum, keyNum)
   734  			binary.BigEndian.PutUint64(k[:], keyNum)
   735  			binary.BigEndian.PutUint64(v[:], valNum)
   736  
   737  			val, err := dc.GetBeforeTxNum(k[:], txNum+1, tx)
   738  			require.NoError(t, err)
   739  			if keyNum == txNum%d.aggregationStep {
   740  				if txNum > 1 {
   741  					binary.BigEndian.PutUint64(v[:], txNum-1)
   742  					require.EqualValues(t, v[:], val)
   743  					continue
   744  				} else {
   745  					require.Nil(t, val, label)
   746  					continue
   747  				}
   748  			}
   749  			require.NoError(t, err, label)
   750  			require.EqualValues(t, v[:], val, label)
   751  		}
   752  	}
   753  
   754  	var v [8]byte
   755  	binary.BigEndian.PutUint64(v[:], txCount)
   756  
   757  	for keyNum := uint64(1); keyNum <= keysCount; keyNum++ {
   758  		var k [8]byte
   759  		label := fmt.Sprintf("txNum=%d, keyNum=%d\n", txCount, keyNum)
   760  		binary.BigEndian.PutUint64(k[:], keyNum)
   761  
   762  		storedV, err := dc.Get(k[:], nil, tx)
   763  		require.NoError(t, err, label)
   764  		require.EqualValues(t, v[:], storedV, label)
   765  	}
   766  }
   767  
   768  func TestScanStaticFilesD(t *testing.T) {
   769  	logger := log.New()
   770  	ii := &Domain{History: &History{InvertedIndex: &InvertedIndex{filenameBase: "test", aggregationStep: 1, logger: logger}, logger: logger},
   771  		files:  btree2.NewBTreeG[*filesItem](filesItemLess),
   772  		logger: logger,
   773  	}
   774  	files := []string{
   775  		"test.0-1.kv",
   776  		"test.1-2.kv",
   777  		"test.0-4.kv",
   778  		"test.2-3.kv",
   779  		"test.3-4.kv",
   780  		"test.4-5.kv",
   781  	}
   782  	ii.scanStateFiles(files)
   783  	var found []string
   784  	ii.files.Walk(func(items []*filesItem) bool {
   785  		for _, item := range items {
   786  			found = append(found, fmt.Sprintf("%d-%d", item.startTxNum, item.endTxNum))
   787  		}
   788  		return true
   789  	})
   790  	require.Equal(t, 6, len(found))
   791  }