github.com/fibonacci-chain/fbc@v0.0.0-20231124064014-c7636198c1e9/libs/iavl/tree_random_test.go (about)

     1  package iavl
     2  
     3  import (
     4  	"encoding/base64"
     5  	"fmt"
     6  	"io/ioutil"
     7  	"math/rand"
     8  	"os"
     9  	"sort"
    10  	"strconv"
    11  	"strings"
    12  	"testing"
    13  
    14  	"github.com/stretchr/testify/require"
    15  
    16  	db "github.com/fibonacci-chain/fbc/libs/tm-db"
    17  )
    18  
    19  func TestRandomOperations(t *testing.T) {
    20  	// In short mode (specifically, when running in CI with the race detector),
    21  	// we only run the first couple of seeds.
    22  	seeds := []int64{
    23  		498727689,
    24  		756509998,
    25  		480459882,
    26  		324736440,
    27  		581827344,
    28  		470870060,
    29  		390970079,
    30  		846023066,
    31  		518638291,
    32  		957382170,
    33  	}
    34  
    35  	for i, seed := range seeds {
    36  		i, seed := i, seed
    37  		t.Run(fmt.Sprintf("Seed %v", seed), func(t *testing.T) {
    38  			if testing.Short() && i >= 2 {
    39  				t.Skip("Skipping seed in short mode")
    40  			}
    41  			t.Parallel() // comment out to disable parallel tests, or use -parallel 1
    42  			testRandomOperations(t, seed)
    43  		})
    44  	}
    45  }
    46  
    47  // Randomized test that runs all sorts of random operations, mirrors them in a known-good
    48  // map, and verifies the state of the tree against the map.
    49  func testRandomOperations(t *testing.T, randSeed int64) {
    50  	const (
    51  		keySize   = 16 // before base64-encoding
    52  		valueSize = 16 // before base64-encoding
    53  
    54  		versions          = 32   // number of final versions to generate
    55  		reloadChance      = 0.1  // chance of tree reload after save
    56  		deleteChance      = 0.2  // chance of random version deletion after save
    57  		deleteRangeChance = 0.3  // chance of deleting a version range (DeleteVersionsRange)
    58  		deleteMultiChance = 0.3  // chance of deleting multiple versions (DeleteVersions)
    59  		deleteMax         = 5    // max number of versions to delete
    60  		revertChance      = 0.05 // chance to revert tree to random version with LoadVersionForOverwriting
    61  		syncChance        = 0.2  // chance of enabling sync writes on tree load
    62  		cacheChance       = 0.4  // chance of enabling caching
    63  		cacheSizeMax      = 256  // maximum size of cache (will be random from 1)
    64  
    65  		versionOps  = 64  // number of operations (create/update/delete) per version
    66  		updateRatio = 0.4 // ratio of updates out of all operations
    67  		deleteRatio = 0.2 // ratio of deletes out of all operations
    68  	)
    69  
    70  	r := rand.New(rand.NewSource(randSeed))
    71  
    72  	// loadTree loads the last persisted version of a tree with random pruning settings.
    73  	loadTree := func(levelDB db.DB) (tree *MutableTree, version int64, options *Options) {
    74  		var err error
    75  		options = &Options{
    76  			Sync: r.Float64() < syncChance,
    77  		}
    78  		// set the cache size regardless of whether caching is enabled. This ensures we always
    79  		// call the RNG the same number of times, such that changing settings does not affect
    80  		// the RNG sequence.
    81  		cacheSize := int(r.Int63n(cacheSizeMax + 1))
    82  		if !(r.Float64() < cacheChance) {
    83  			cacheSize = 0
    84  		}
    85  		tree, err = NewMutableTreeWithOpts(levelDB, cacheSize, options)
    86  		require.NoError(t, err)
    87  		version, err = tree.Load()
    88  		require.NoError(t, err)
    89  		t.Logf("Loaded version %v (sync=%v cache=%v)", version, options.Sync, cacheSize)
    90  		return
    91  	}
    92  
    93  	// generates random keys and values
    94  	randString := func(size int) string {
    95  		buf := make([]byte, size)
    96  		r.Read(buf)
    97  		return base64.StdEncoding.EncodeToString(buf)
    98  	}
    99  
   100  	// Use the same on-disk database for the entire run.
   101  	tempdir, err := ioutil.TempDir("", "iavl")
   102  	require.NoError(t, err)
   103  	defer os.RemoveAll(tempdir)
   104  
   105  	levelDB, err := db.NewGoLevelDB("leveldb", tempdir)
   106  	require.NoError(t, err)
   107  
   108  	tree, version, _ := loadTree(levelDB)
   109  
   110  	// Set up a mirror of the current IAVL state, as well as the history of saved mirrors
   111  	// on disk and in memory. Since pruning was removed we currently persist all versions,
   112  	// thus memMirrors is never used, but it is left here for the future when it is re-introduces.
   113  	mirror := make(map[string]string, versionOps)
   114  	mirrorKeys := make([]string, 0, versionOps)
   115  	diskMirrors := make(map[int64]map[string]string)
   116  	memMirrors := make(map[int64]map[string]string)
   117  
   118  	for version < versions {
   119  		for i := 0; i < versionOps; i++ {
   120  			switch {
   121  			case len(mirror) > 0 && r.Float64() < deleteRatio:
   122  				index := r.Intn(len(mirrorKeys))
   123  				key := mirrorKeys[index]
   124  				mirrorKeys = append(mirrorKeys[:index], mirrorKeys[index+1:]...)
   125  				_, removed := tree.Remove([]byte(key))
   126  				require.True(t, removed)
   127  				delete(mirror, key)
   128  
   129  			case len(mirror) > 0 && r.Float64() < updateRatio:
   130  				key := mirrorKeys[r.Intn(len(mirrorKeys))]
   131  				value := randString(valueSize)
   132  				updated := tree.Set([]byte(key), []byte(value))
   133  				require.True(t, updated)
   134  				mirror[key] = value
   135  
   136  			default:
   137  				key := randString(keySize)
   138  				value := randString(valueSize)
   139  				for tree.Has([]byte(key)) {
   140  					key = randString(keySize)
   141  				}
   142  				updated := tree.Set([]byte(key), []byte(value))
   143  				require.False(t, updated)
   144  				mirror[key] = value
   145  				mirrorKeys = append(mirrorKeys, key)
   146  			}
   147  		}
   148  		_, version, _, err = tree.SaveVersion(false)
   149  		require.NoError(t, err)
   150  
   151  		t.Logf("Saved tree at version %v with %v keys and %v versions",
   152  			version, tree.Size(), len(tree.AvailableVersions()))
   153  
   154  		// Verify that the version matches the mirror.
   155  		assertMirror(t, tree, mirror, 0)
   156  
   157  		// Save the mirror as a disk mirror, since we currently persist all versions.
   158  		diskMirrors[version] = copyMirror(mirror)
   159  
   160  		// Delete random versions if requested, but never the latest version.
   161  		if r.Float64() < deleteChance {
   162  			versions := getMirrorVersions(diskMirrors, memMirrors)
   163  			switch {
   164  			case len(versions) < 2:
   165  
   166  			case r.Float64() < deleteRangeChance:
   167  				indexFrom := r.Intn(len(versions) - 1)
   168  				from := versions[indexFrom]
   169  				batch := r.Intn(deleteMax)
   170  				if batch > len(versions[indexFrom:])-2 {
   171  					batch = len(versions[indexFrom:]) - 2
   172  				}
   173  				to := versions[indexFrom+batch] + 1
   174  				t.Logf("Deleting versions %v-%v", from, to-1)
   175  				err = tree.DeleteVersionsRange(int64(from), int64(to))
   176  				require.NoError(t, err)
   177  				for version := from; version < to; version++ {
   178  					delete(diskMirrors, int64(version))
   179  					delete(memMirrors, int64(version))
   180  				}
   181  
   182  			// adjust probability to take into account probability of range delete not happening
   183  			case r.Float64() < deleteMultiChance/(1.0-deleteRangeChance):
   184  				deleteVersions := []int64{}
   185  				desc := ""
   186  				batchSize := 1 + r.Intn(deleteMax)
   187  				if batchSize > len(versions)-1 {
   188  					batchSize = len(versions) - 1
   189  				}
   190  				for _, i := range r.Perm(len(versions) - 1)[:batchSize] {
   191  					deleteVersions = append(deleteVersions, int64(versions[i]))
   192  					delete(diskMirrors, int64(versions[i]))
   193  					delete(memMirrors, int64(versions[i]))
   194  					if len(desc) > 0 {
   195  						desc += ","
   196  					}
   197  					desc += fmt.Sprintf("%v", versions[i])
   198  				}
   199  				t.Logf("Deleting versions %v", desc)
   200  				err = tree.DeleteVersions(deleteVersions...)
   201  				require.NoError(t, err)
   202  
   203  			default:
   204  				i := r.Intn(len(versions) - 1)
   205  				deleteVersion := int64(versions[i])
   206  				t.Logf("Deleting version %v", deleteVersion)
   207  				err = tree.DeleteVersion(deleteVersion)
   208  				require.NoError(t, err)
   209  				delete(diskMirrors, deleteVersion)
   210  				delete(memMirrors, deleteVersion)
   211  			}
   212  		}
   213  
   214  		// Reload tree from last persisted version if requested, checking that it matches the
   215  		// latest disk mirror version and discarding memory mirrors.
   216  		if r.Float64() < reloadChance {
   217  			tree, version, _ = loadTree(levelDB)
   218  			assertMaxVersion(t, tree, version, diskMirrors)
   219  			memMirrors = make(map[int64]map[string]string)
   220  			mirror = copyMirror(diskMirrors[version])
   221  			mirrorKeys = getMirrorKeys(mirror)
   222  		}
   223  
   224  		// Revert tree to historical version if requested, deleting all subsequent versions.
   225  		if r.Float64() < revertChance {
   226  			versions := getMirrorVersions(diskMirrors, memMirrors)
   227  			if len(versions) > 1 {
   228  				version = int64(versions[r.Intn(len(versions)-1)])
   229  				t.Logf("Reverting to version %v", version)
   230  				_, err = tree.LoadVersionForOverwriting(version)
   231  				require.NoError(t, err, "Failed to revert to version %v", version)
   232  				if m, ok := diskMirrors[version]; ok {
   233  					mirror = copyMirror(m)
   234  				} else if m, ok := memMirrors[version]; ok {
   235  					mirror = copyMirror(m)
   236  				} else {
   237  					t.Fatalf("Mirror not found for revert target %v", version)
   238  				}
   239  				mirrorKeys = getMirrorKeys(mirror)
   240  				for v := range diskMirrors {
   241  					if v > version {
   242  						delete(diskMirrors, v)
   243  					}
   244  				}
   245  				for v := range memMirrors {
   246  					if v > version {
   247  						delete(memMirrors, v)
   248  					}
   249  				}
   250  			}
   251  		}
   252  
   253  		// Verify all historical versions.
   254  		assertVersions(t, tree, diskMirrors, memMirrors)
   255  
   256  		for diskVersion, diskMirror := range diskMirrors {
   257  			assertMirror(t, tree, diskMirror, diskVersion)
   258  		}
   259  
   260  		for memVersion, memMirror := range memMirrors {
   261  			assertMirror(t, tree, memMirror, memVersion)
   262  		}
   263  	}
   264  
   265  	// Once we're done, delete all prior versions in random order, make sure all orphans have been
   266  	// removed, and check that the latest versions matches the mirror.
   267  	remaining := tree.AvailableVersions()
   268  	remaining = remaining[:len(remaining)-1]
   269  
   270  	switch {
   271  	case len(remaining) == 0:
   272  
   273  	case r.Float64() < deleteRangeChance:
   274  		t.Logf("Deleting versions %v-%v", remaining[0], remaining[len(remaining)-1])
   275  		err = tree.DeleteVersionsRange(int64(remaining[0]), int64(remaining[len(remaining)-1]+1))
   276  		require.NoError(t, err)
   277  
   278  	// adjust probability to take into account probability of range delete not happening
   279  	case r.Float64() < deleteMultiChance/(1.0-deleteRangeChance):
   280  		deleteVersions := []int64{}
   281  		desc := ""
   282  		for _, i := range r.Perm(len(remaining)) {
   283  			deleteVersions = append(deleteVersions, int64(remaining[i]))
   284  			if len(desc) > 0 {
   285  				desc += ","
   286  			}
   287  			desc += fmt.Sprintf("%v", remaining[i])
   288  		}
   289  		t.Logf("Deleting versions %v", desc)
   290  		err = tree.DeleteVersions(deleteVersions...)
   291  		require.NoError(t, err)
   292  
   293  	default:
   294  		for len(remaining) > 0 {
   295  			i := r.Intn(len(remaining))
   296  			deleteVersion := int64(remaining[i])
   297  			remaining = append(remaining[:i], remaining[i+1:]...)
   298  			t.Logf("Deleting version %v", deleteVersion)
   299  			err = tree.DeleteVersion(deleteVersion)
   300  			require.NoError(t, err)
   301  		}
   302  	}
   303  
   304  	require.EqualValues(t, []int{int(version)}, tree.AvailableVersions())
   305  	assertMirror(t, tree, mirror, version)
   306  	assertMirror(t, tree, mirror, 0)
   307  	assertOrphans(t, tree, 0)
   308  	t.Logf("Final version %v is correct, with no stray orphans", version)
   309  
   310  	// Now, let's delete all remaining key/value pairs, and make sure no stray
   311  	// data is left behind in the database.
   312  	prevVersion := tree.Version()
   313  	keys := [][]byte{}
   314  	tree.Iterate(func(key, value []byte) bool {
   315  		keys = append(keys, key)
   316  		return false
   317  	})
   318  	for _, key := range keys {
   319  		_, removed := tree.Remove(key)
   320  		require.True(t, removed)
   321  	}
   322  	_, _, _, err = tree.SaveVersion(false)
   323  	require.NoError(t, err)
   324  	err = tree.DeleteVersion(prevVersion)
   325  	require.NoError(t, err)
   326  	assertEmptyDatabase(t, tree)
   327  	t.Logf("Final version %v deleted, no stray database entries", prevVersion)
   328  }
   329  
   330  // Checks that the database is empty, only containing a single root entry
   331  // at the given version.
   332  func assertEmptyDatabase(t *testing.T, tree *MutableTree) {
   333  	version := tree.Version()
   334  	iter, err := tree.ndb.db.Iterator(nil, nil)
   335  	require.NoError(t, err)
   336  
   337  	var (
   338  		foundKeys []string
   339  	)
   340  	for ; iter.Valid(); iter.Next() {
   341  		foundKeys = append(foundKeys, string(iter.Key()))
   342  	}
   343  	require.NoError(t, iter.Error())
   344  	require.EqualValues(t, 2, len(foundKeys), "Found %v database entries, expected 1", len(foundKeys)) // 1 for storage version and 1 for root
   345  
   346  	firstKey := foundKeys[0]
   347  	secondKey := foundKeys[1]
   348  
   349  	require.True(t, strings.HasPrefix(firstKey, metadataKeyFormat.Prefix()))
   350  	require.True(t, strings.HasPrefix(secondKey, rootKeyFormat.Prefix()))
   351  
   352  	require.Equal(t, string(metadataKeyFormat.KeyBytes([]byte(storageVersionKey))), firstKey, "Unexpected storage version key")
   353  
   354  	storageVersionValue, err := tree.ndb.db.Get([]byte(firstKey))
   355  	require.NoError(t, err)
   356  	latestVersion := tree.ndb.getLatestVersion()
   357  	require.Equal(t, fastStorageVersionValue+fastStorageVersionDelimiter+strconv.Itoa(int(latestVersion)), string(storageVersionValue))
   358  
   359  	var foundVersion int64
   360  	rootKeyFormat.Scan([]byte(secondKey), &foundVersion)
   361  	require.Equal(t, version, foundVersion, "Unexpected root version")
   362  }
   363  
   364  // Checks that the tree has the given number of orphan nodes.
   365  func assertOrphans(t *testing.T, tree *MutableTree, expected int) {
   366  	count := 0
   367  	tree.ndb.traverseOrphans(func(k, v []byte) {
   368  		count++
   369  	})
   370  	require.EqualValues(t, expected, count, "Expected %v orphans, got %v", expected, count)
   371  }
   372  
   373  // Checks that a version is the maximum mirrored version.
   374  func assertMaxVersion(t *testing.T, tree *MutableTree, version int64, mirrors map[int64]map[string]string) {
   375  	max := int64(0)
   376  	for v := range mirrors {
   377  		if v > max {
   378  			max = v
   379  		}
   380  	}
   381  	require.Equal(t, max, version)
   382  }
   383  
   384  // Checks that a mirror, optionally for a given version, matches the tree contents.
   385  func assertMirror(t *testing.T, tree *MutableTree, mirror map[string]string, version int64) {
   386  	var err error
   387  	itree := tree.ImmutableTree
   388  	if version > 0 {
   389  		itree, err = tree.GetImmutable(version)
   390  		require.NoError(t, err, "loading version %v", version)
   391  	}
   392  	// We check both ways: first check that iterated keys match the mirror, then iterate over the
   393  	// mirror and check with get. This is to exercise both the iteration and Get() code paths.
   394  	iterated := 0
   395  	itree.Iterate(func(key, value []byte) bool {
   396  		require.Equal(t, string(value), mirror[string(key)], "Invalid value for key %q", key)
   397  		iterated++
   398  		return false
   399  	})
   400  	require.EqualValues(t, len(mirror), itree.Size())
   401  	require.EqualValues(t, len(mirror), iterated)
   402  	for key, value := range mirror {
   403  		actualFast := itree.Get([]byte(key))
   404  		require.Equal(t, value, string(actualFast))
   405  		_, actual := itree.GetWithIndex([]byte(key))
   406  		require.Equal(t, value, string(actual))
   407  	}
   408  	assertFastNodeCacheIsLive(t, tree, mirror, version)
   409  	assertFastNodeDiskIsLive(t, tree, mirror, version)
   410  }
   411  
   412  func assertFastNodeCacheIsLive(t *testing.T, tree *MutableTree, mirror map[string]string, version int64) {
   413  	if tree.ndb.getLatestVersion() != version {
   414  		// The fast node cache check should only be done to the latest version
   415  		return
   416  	}
   417  
   418  	for key, cacheElem := range tree.ndb.fastNodeCache.items {
   419  		liveFastNode, ok := mirror[key]
   420  
   421  		require.True(t, ok, "cached fast node must be in the live tree")
   422  		require.Equal(t, liveFastNode, string(cacheElem.Value.(*FastNode).value), "cached fast node's value must be equal to live state value")
   423  	}
   424  }
   425  
   426  // Checks that fast nodes on disk match live state.
   427  func assertFastNodeDiskIsLive(t *testing.T, tree *MutableTree, mirror map[string]string, version int64) {
   428  	if EnableAsyncCommit { // AC would not persist
   429  		return
   430  	}
   431  	if tree.ndb.getLatestMemoryVersion() != version {
   432  		// The fast node disk check should only be done to the latest version
   433  		return
   434  	}
   435  
   436  	count := 0
   437  	tree.ndb.traverseFastNodes(func(keyWithPrefix, v []byte) {
   438  		key := keyWithPrefix[1:]
   439  		count++
   440  		fastNode, err := DeserializeFastNode(key, v)
   441  		require.Nil(t, err)
   442  
   443  		mirrorVal := mirror[string(fastNode.key)]
   444  
   445  		require.NotNil(t, mirrorVal)
   446  		require.Equal(t, []byte(mirrorVal), fastNode.value)
   447  	})
   448  	require.Equal(t, len(mirror), count)
   449  }
   450  
   451  // Checks that all versions in the tree are present in the mirrors, and vice-versa.
   452  func assertVersions(t *testing.T, tree *MutableTree, mirrors ...map[int64]map[string]string) {
   453  	require.Equal(t, getMirrorVersions(mirrors...), tree.AvailableVersions())
   454  }
   455  
   456  // copyMirror copies a mirror map.
   457  func copyMirror(mirror map[string]string) map[string]string {
   458  	c := make(map[string]string, len(mirror))
   459  	for k, v := range mirror {
   460  		c[k] = v
   461  	}
   462  	return c
   463  }
   464  
   465  // getMirrorKeys returns the keys of a mirror, unsorted.
   466  func getMirrorKeys(mirror map[string]string) []string {
   467  	keys := make([]string, 0, len(mirror))
   468  	for key := range mirror {
   469  		keys = append(keys, key)
   470  	}
   471  	return keys
   472  }
   473  
   474  // getMirrorVersions returns the versions of the given mirrors, sorted. Returns []int to
   475  // match tree.AvailableVersions().
   476  func getMirrorVersions(mirrors ...map[int64]map[string]string) []int {
   477  	versionMap := make(map[int]bool)
   478  	for _, m := range mirrors {
   479  		for version := range m {
   480  			versionMap[int(version)] = true
   481  		}
   482  	}
   483  	versions := make([]int, 0, len(versionMap))
   484  	for version := range versionMap {
   485  		versions = append(versions, version)
   486  	}
   487  	sort.Ints(versions)
   488  	return versions
   489  }