github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/ledger/complete/compactor_test.go (about)

     1  package complete
     2  
     3  import (
     4  	"fmt"
     5  	"os"
     6  	"path"
     7  	"reflect"
     8  	"regexp"
     9  	"sync"
    10  	"testing"
    11  	"time"
    12  
    13  	prometheusWAL "github.com/onflow/wal/wal"
    14  	"github.com/rs/zerolog"
    15  	"github.com/rs/zerolog/log"
    16  	"github.com/stretchr/testify/assert"
    17  	"github.com/stretchr/testify/require"
    18  	"go.uber.org/atomic"
    19  
    20  	"github.com/onflow/flow-go/ledger"
    21  	"github.com/onflow/flow-go/ledger/common/testutils"
    22  	"github.com/onflow/flow-go/ledger/complete/mtrie"
    23  	"github.com/onflow/flow-go/ledger/complete/mtrie/trie"
    24  	realWAL "github.com/onflow/flow-go/ledger/complete/wal"
    25  	"github.com/onflow/flow-go/module/metrics"
    26  	"github.com/onflow/flow-go/utils/unittest"
    27  )
    28  
    29  // slow down updating the ledger, because running too fast would cause the previous checkpoint
    30  // to not finish and get delayed, and this might cause tests to stuck
    31  const LedgerUpdateDelay = time.Millisecond * 500
    32  
    33  // Compactor observer that waits until it gets notified of a
    34  // latest checkpoint larger than fromBound
    35  type CompactorObserver struct {
    36  	fromBound int
    37  	done      chan struct{}
    38  }
    39  
    40  func (co *CompactorObserver) OnNext(val interface{}) {
    41  	res, ok := val.(int)
    42  	if ok {
    43  		newCheckpoint := res
    44  		fmt.Printf("Compactor observer received checkpoint num %d, will stop when checkpoint num (%v) >= %v\n",
    45  			newCheckpoint, newCheckpoint, co.fromBound)
    46  		if newCheckpoint >= co.fromBound {
    47  			co.done <- struct{}{}
    48  		}
    49  	}
    50  	fmt.Println("Compactor observer res:", res)
    51  }
    52  
    53  func (co *CompactorObserver) OnError(err error) {}
    54  func (co *CompactorObserver) OnComplete() {
    55  	close(co.done)
    56  }
    57  
    58  // TestCompactorCreation tests creation of WAL segments and checkpoints, and
    59  // checks if the rebuilt ledger state matches previous ledger state.
    60  func TestCompactorCreation(t *testing.T) {
    61  	const (
    62  		numInsPerStep      = 2
    63  		pathByteSize       = 32
    64  		minPayloadByteSize = 2 << 15 // 64  KB
    65  		maxPayloadByteSize = 2 << 16 // 128 KB
    66  		size               = 10
    67  		checkpointDistance = 3
    68  		checkpointsToKeep  = 1
    69  		forestCapacity     = size * 10
    70  		segmentSize        = 32 * 1024 // 32 KB
    71  	)
    72  
    73  	metricsCollector := &metrics.NoopCollector{}
    74  
    75  	unittest.RunWithTempDir(t, func(dir string) {
    76  
    77  		var l *Ledger
    78  
    79  		// saved data after updates
    80  		savedData := make(map[ledger.RootHash]map[string]*ledger.Payload)
    81  
    82  		t.Run("creates checkpoints", func(t *testing.T) {
    83  
    84  			wal, err := realWAL.NewDiskWAL(unittest.Logger(), nil, metrics.NewNoopCollector(), dir, forestCapacity, pathByteSize, segmentSize)
    85  			require.NoError(t, err)
    86  
    87  			l, err = NewLedger(wal, size*10, metricsCollector, unittest.Logger(), DefaultPathFinderVersion)
    88  			require.NoError(t, err)
    89  
    90  			// WAL segments are 32kB, so here we generate 2 keys 64kB each, times `size`
    91  			// so we should get at least `size` segments
    92  
    93  			compactor, err := NewCompactor(l, wal, unittest.Logger(), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector())
    94  			require.NoError(t, err)
    95  
    96  			co := CompactorObserver{fromBound: 8, done: make(chan struct{})}
    97  			compactor.Subscribe(&co)
    98  
    99  			// Run Compactor in background.
   100  			<-compactor.Ready()
   101  
   102  			log.Info().Msgf("compactor is ready")
   103  
   104  			rootState := l.InitialState()
   105  
   106  			// Generate the tree and create WAL
   107  			// update the ledger size (10) times, since each update will trigger a segment file creation
   108  			// and checkpointDistance is 3, then, 10 segment files should trigger generating checkpoint:
   109  			// 2, 5, 8, that's why the fromBound is 8
   110  			for i := 0; i < size; i++ {
   111  				// slow down updating the ledger, because running too fast would cause the previous checkpoint
   112  				// to not finish and get delayed
   113  				time.Sleep(LedgerUpdateDelay)
   114  
   115  				payloads := testutils.RandomPayloads(numInsPerStep, minPayloadByteSize, maxPayloadByteSize)
   116  
   117  				keys := make([]ledger.Key, len(payloads))
   118  				values := make([]ledger.Value, len(payloads))
   119  				for i, p := range payloads {
   120  					k, err := p.Key()
   121  					require.NoError(t, err)
   122  					keys[i] = k
   123  					values[i] = p.Value()
   124  				}
   125  
   126  				update, err := ledger.NewUpdate(rootState, keys, values)
   127  				require.NoError(t, err)
   128  
   129  				log.Info().Msgf("update ledger with min payload byte size: %v, %v will trigger segment file creation",
   130  					numInsPerStep*minPayloadByteSize, segmentSize)
   131  				newState, _, err := l.Set(update)
   132  				require.NoError(t, err)
   133  
   134  				require.FileExists(t, path.Join(dir, realWAL.NumberToFilenamePart(i)))
   135  
   136  				data := make(map[string]*ledger.Payload, len(keys))
   137  				for j, k := range keys {
   138  					ks := string(k.CanonicalForm())
   139  					data[ks] = payloads[j]
   140  				}
   141  
   142  				savedData[ledger.RootHash(newState)] = data
   143  
   144  				rootState = newState
   145  			}
   146  
   147  			log.Info().Msgf("wait for the bound-checking observer to confirm checkpoints have been made")
   148  
   149  			select {
   150  			case <-co.done:
   151  				// continue
   152  			case <-time.After(60 * time.Second):
   153  				// Log segment and checkpoint files
   154  				files, err := os.ReadDir(dir)
   155  				require.NoError(t, err)
   156  
   157  				for _, file := range files {
   158  					info, err := file.Info()
   159  					require.NoError(t, err)
   160  					fmt.Printf("%s, size %d\n", file.Name(), info.Size())
   161  				}
   162  
   163  				assert.FailNow(t, "timed out")
   164  			}
   165  
   166  			checkpointer, err := wal.NewCheckpointer()
   167  			require.NoError(t, err)
   168  
   169  			from, to, err := checkpointer.NotCheckpointedSegments()
   170  			require.NoError(t, err)
   171  
   172  			assert.True(t, from == 9 && to == 10, "from: %v, to: %v", from, to) // Make sure there is no leftover
   173  
   174  			require.NoFileExists(t, path.Join(dir, "checkpoint.00000000"))
   175  			require.NoFileExists(t, path.Join(dir, "checkpoint.00000001"))
   176  			require.NoFileExists(t, path.Join(dir, "checkpoint.00000002"))
   177  			require.NoFileExists(t, path.Join(dir, "checkpoint.00000003"))
   178  			require.NoFileExists(t, path.Join(dir, "checkpoint.00000004"))
   179  			require.NoFileExists(t, path.Join(dir, "checkpoint.00000005"))
   180  			require.NoFileExists(t, path.Join(dir, "checkpoint.00000006"))
   181  			require.NoFileExists(t, path.Join(dir, "checkpoint.00000007"))
   182  			require.FileExists(t, path.Join(dir, "checkpoint.00000008"))
   183  			require.NoFileExists(t, path.Join(dir, "checkpoint.00000009"))
   184  
   185  			<-l.Done()
   186  			<-compactor.Done()
   187  		})
   188  
   189  		time.Sleep(2 * time.Second)
   190  
   191  		t.Run("remove unnecessary files", func(t *testing.T) {
   192  			// Remove all files apart from target checkpoint and WAL segments ahead of it
   193  			// We know their names, so just hardcode them
   194  			dirF, _ := os.Open(dir)
   195  			files, _ := dirF.Readdir(0)
   196  
   197  			find008, err := regexp.Compile("checkpoint.00000008*")
   198  			require.NoError(t, err)
   199  
   200  			for _, fileInfo := range files {
   201  
   202  				name := fileInfo.Name()
   203  				if name == "00000009" ||
   204  					name == "00000010" ||
   205  					name == "00000011" {
   206  					log.Info().Msgf("keep file %v/%v", dir, name)
   207  					continue
   208  				}
   209  
   210  				// checkpoint V6 has multiple files
   211  				if find008.MatchString(name) {
   212  					log.Info().Msgf("keep file %v/%v", dir, name)
   213  					continue
   214  				}
   215  
   216  				err := os.Remove(path.Join(dir, name))
   217  				require.NoError(t, err)
   218  				log.Info().Msgf("removed file %v/%v", dir, name)
   219  			}
   220  		})
   221  
   222  		var l2 *Ledger
   223  
   224  		time.Sleep(2 * time.Second)
   225  
   226  		t.Run("load data from checkpoint and WAL", func(t *testing.T) {
   227  
   228  			wal2, err := realWAL.NewDiskWAL(unittest.Logger(), nil, metrics.NewNoopCollector(), dir, size*10, pathByteSize, 32*1024)
   229  			require.NoError(t, err)
   230  
   231  			l2, err = NewLedger(wal2, size*10, metricsCollector, unittest.Logger(), DefaultPathFinderVersion)
   232  			require.NoError(t, err)
   233  
   234  			<-wal2.Done()
   235  		})
   236  
   237  		t.Run("make sure forests are equal", func(t *testing.T) {
   238  
   239  			// Check for same data
   240  			for rootHash, data := range savedData {
   241  
   242  				keys := make([]ledger.Key, 0, len(data))
   243  				for _, p := range data {
   244  					k, err := p.Key()
   245  					require.NoError(t, err)
   246  					keys = append(keys, k)
   247  				}
   248  
   249  				q, err := ledger.NewQuery(ledger.State(rootHash), keys)
   250  				require.NoError(t, err)
   251  
   252  				values, err := l.Get(q)
   253  				require.NoError(t, err)
   254  
   255  				values2, err := l2.Get(q)
   256  				require.NoError(t, err)
   257  
   258  				for i, k := range keys {
   259  					ks := k.CanonicalForm()
   260  					require.Equal(t, data[string(ks)].Value(), values[i])
   261  					require.Equal(t, data[string(ks)].Value(), values2[i])
   262  				}
   263  			}
   264  
   265  			forestTries, err := l.Tries()
   266  			require.NoError(t, err)
   267  
   268  			forestTriesSet := make(map[ledger.RootHash]struct{})
   269  			for _, trie := range forestTries {
   270  				forestTriesSet[trie.RootHash()] = struct{}{}
   271  			}
   272  
   273  			forestTries2, err := l.Tries()
   274  			require.NoError(t, err)
   275  
   276  			forestTries2Set := make(map[ledger.RootHash]struct{})
   277  			for _, trie := range forestTries2 {
   278  				forestTries2Set[trie.RootHash()] = struct{}{}
   279  			}
   280  
   281  			require.Equal(t, forestTriesSet, forestTries2Set)
   282  		})
   283  
   284  	})
   285  }
   286  
   287  // TestCompactorSkipCheckpointing tests that only one
   288  // checkpointing is running at a time.
   289  func TestCompactorSkipCheckpointing(t *testing.T) {
   290  	const (
   291  		numInsPerStep      = 2
   292  		pathByteSize       = 32
   293  		minPayloadByteSize = 2 << 15
   294  		maxPayloadByteSize = 2 << 16
   295  		size               = 10
   296  		checkpointDistance = 1 // checkpointDistance 1 triggers checkpointing for every segment.
   297  		checkpointsToKeep  = 0
   298  		forestCapacity     = size * 10
   299  	)
   300  
   301  	metricsCollector := &metrics.NoopCollector{}
   302  
   303  	unittest.RunWithTempDir(t, func(dir string) {
   304  
   305  		var l *Ledger
   306  
   307  		// saved data after updates
   308  		savedData := make(map[ledger.RootHash]map[string]*ledger.Payload)
   309  
   310  		wal, err := realWAL.NewDiskWAL(unittest.Logger(), nil, metrics.NewNoopCollector(), dir, forestCapacity, pathByteSize, 32*1024)
   311  		require.NoError(t, err)
   312  
   313  		l, err = NewLedger(wal, size*10, metricsCollector, zerolog.Logger{}, DefaultPathFinderVersion)
   314  		require.NoError(t, err)
   315  
   316  		// WAL segments are 32kB, so here we generate 2 keys 64kB each, times `size`
   317  		// so we should get at least `size` segments
   318  
   319  		compactor, err := NewCompactor(l, wal, unittest.Logger(), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector())
   320  		require.NoError(t, err)
   321  
   322  		co := CompactorObserver{fromBound: 8, done: make(chan struct{})}
   323  		compactor.Subscribe(&co)
   324  
   325  		// Run Compactor in background.
   326  		<-compactor.Ready()
   327  
   328  		rootState := l.InitialState()
   329  
   330  		// Generate the tree and create WAL
   331  		for i := 0; i < size; i++ {
   332  
   333  			// slow down updating the ledger, because running too fast would cause the previous checkpoint
   334  			// to not finish and get delayed
   335  			time.Sleep(LedgerUpdateDelay)
   336  
   337  			payloads := testutils.RandomPayloads(numInsPerStep, minPayloadByteSize, maxPayloadByteSize)
   338  
   339  			keys := make([]ledger.Key, len(payloads))
   340  			values := make([]ledger.Value, len(payloads))
   341  			for i, p := range payloads {
   342  				k, err := p.Key()
   343  				require.NoError(t, err)
   344  				keys[i] = k
   345  				values[i] = p.Value()
   346  			}
   347  
   348  			update, err := ledger.NewUpdate(rootState, keys, values)
   349  			require.NoError(t, err)
   350  
   351  			newState, _, err := l.Set(update)
   352  			require.NoError(t, err)
   353  
   354  			require.FileExists(t, path.Join(dir, realWAL.NumberToFilenamePart(i)))
   355  
   356  			data := make(map[string]*ledger.Payload, len(keys))
   357  			for j, k := range keys {
   358  				ks := string(k.CanonicalForm())
   359  				data[ks] = payloads[j]
   360  			}
   361  
   362  			savedData[ledger.RootHash(newState)] = data
   363  
   364  			rootState = newState
   365  		}
   366  
   367  		// wait for the bound-checking observer to confirm checkpoints have been made
   368  		select {
   369  		case <-co.done:
   370  			// continue
   371  		case <-time.After(60 * time.Second):
   372  			// Log segment and checkpoint files
   373  			files, err := os.ReadDir(dir)
   374  			require.NoError(t, err)
   375  
   376  			for _, file := range files {
   377  				info, err := file.Info()
   378  				require.NoError(t, err)
   379  				fmt.Printf("%s, size %d\n", file.Name(), info.Size())
   380  			}
   381  
   382  			// This assert can be flaky because of speed fluctuations (GitHub CI slowdowns, etc.).
   383  			// Because this test only cares about number of created checkpoint files,
   384  			// we don't need to fail the test here and keeping commented out for documentation.
   385  			// assert.FailNow(t, "timed out")
   386  		}
   387  
   388  		<-l.Done()
   389  		<-compactor.Done()
   390  
   391  		first, last, err := wal.Segments()
   392  		require.NoError(t, err)
   393  
   394  		segmentCount := last - first + 1
   395  
   396  		checkpointer, err := wal.NewCheckpointer()
   397  		require.NoError(t, err)
   398  
   399  		nums, err := checkpointer.Checkpoints()
   400  		require.NoError(t, err)
   401  
   402  		// Check that there are gaps between checkpoints (some checkpoints are skipped)
   403  		firstNum, lastNum := nums[0], nums[len(nums)-1]
   404  		require.True(t, (len(nums) < lastNum-firstNum+1) || (len(nums) < segmentCount))
   405  	})
   406  }
   407  
   408  // TestCompactorAccuracy expects checkpointed tries to match replayed tries.
   409  // Replayed tries are tries updated by replaying all WAL segments
   410  // (from segment 0, ignoring prior checkpoints) to the checkpoint number.
   411  // This verifies that checkpointed tries are snapshopt of segments and at segment boundary.
   412  func TestCompactorAccuracy(t *testing.T) {
   413  
   414  	const (
   415  		numInsPerStep      = 2
   416  		pathByteSize       = 32
   417  		minPayloadByteSize = 2<<11 - 256 // 3840 bytes
   418  		maxPayloadByteSize = 2 << 11     // 4096 bytes
   419  		size               = 20
   420  		checkpointDistance = 5
   421  		checkpointsToKeep  = 0 // keep all
   422  		forestCapacity     = 500
   423  	)
   424  
   425  	metricsCollector := &metrics.NoopCollector{}
   426  
   427  	unittest.RunWithTempDir(t, func(dir string) {
   428  
   429  		// There appears to be 1-2 records per segment (according to logs), so
   430  		// generate size/2 segments.
   431  
   432  		lastCheckpointNum := -1
   433  
   434  		rootHash := trie.EmptyTrieRootHash()
   435  
   436  		// Create DiskWAL and Ledger repeatedly to test rebuilding ledger state at restart.
   437  		for i := 0; i < 3; i++ {
   438  
   439  			wal, err := realWAL.NewDiskWAL(unittest.Logger(), nil, metrics.NewNoopCollector(), dir, forestCapacity, pathByteSize, 32*1024)
   440  			require.NoError(t, err)
   441  
   442  			l, err := NewLedger(wal, forestCapacity, metricsCollector, zerolog.Logger{}, DefaultPathFinderVersion)
   443  			require.NoError(t, err)
   444  
   445  			compactor, err := NewCompactor(l, wal, unittest.Logger(), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector())
   446  			require.NoError(t, err)
   447  
   448  			fromBound := lastCheckpointNum + (size / 2)
   449  
   450  			co := CompactorObserver{fromBound: fromBound, done: make(chan struct{})}
   451  			compactor.Subscribe(&co)
   452  
   453  			// Run Compactor in background.
   454  			<-compactor.Ready()
   455  
   456  			// Generate the tree and create WAL
   457  			// size+2 is used to ensure that size/2 segments are finalized.
   458  			for i := 0; i < size+2; i++ {
   459  				// slow down updating the ledger, because running too fast would cause the previous checkpoint
   460  				// to not finish and get delayed
   461  				time.Sleep(LedgerUpdateDelay)
   462  
   463  				payloads := testutils.RandomPayloads(numInsPerStep, minPayloadByteSize, maxPayloadByteSize)
   464  
   465  				keys := make([]ledger.Key, len(payloads))
   466  				values := make([]ledger.Value, len(payloads))
   467  				for i, p := range payloads {
   468  					k, err := p.Key()
   469  					require.NoError(t, err)
   470  					keys[i] = k
   471  					values[i] = p.Value()
   472  				}
   473  
   474  				update, err := ledger.NewUpdate(ledger.State(rootHash), keys, values)
   475  				require.NoError(t, err)
   476  
   477  				newState, _, err := l.Set(update)
   478  				require.NoError(t, err)
   479  
   480  				rootHash = ledger.RootHash(newState)
   481  			}
   482  
   483  			// wait for the bound-checking observer to confirm checkpoints have been made
   484  			select {
   485  			case <-co.done:
   486  				// continue
   487  			case <-time.After(60 * time.Second):
   488  				assert.FailNow(t, "timed out")
   489  			}
   490  
   491  			// Shutdown ledger and compactor
   492  			<-l.Done()
   493  			<-compactor.Done()
   494  
   495  			checkpointer, err := wal.NewCheckpointer()
   496  			require.NoError(t, err)
   497  
   498  			nums, err := checkpointer.Checkpoints()
   499  			require.NoError(t, err)
   500  
   501  			for _, n := range nums {
   502  				// TODO:  After the LRU Cache (outside of checkpointing code) is replaced
   503  				// by a queue, etc. we should make sure the insertion order is preserved.
   504  
   505  				checkSequence := false
   506  				if i == 0 {
   507  					// Sequence check only works when initial state is blank.
   508  					// When initial state is from ledger's forest (LRU cache),
   509  					// its sequence is altered by reads when replaying segment records.
   510  					// Insertion order is not preserved (which is the way
   511  					// it is currently on mainnet).  However, with PR 2792, only the
   512  					// initial values are affected and those would likely
   513  					// get into insertion order for the next checkpoint.  Once
   514  					// the LRU Cache (outside of checkpointing code) is replaced,
   515  					// then we can verify insertion order.
   516  					checkSequence = true
   517  				}
   518  				testCheckpointedTriesMatchReplayedTriesFromSegments(t, checkpointer, n, dir, checkSequence)
   519  			}
   520  
   521  			lastCheckpointNum = nums[len(nums)-1]
   522  		}
   523  	})
   524  }
   525  
   526  // TestCompactorTriggeredByAdminTool tests that the compactor will listen to the signal from admin tool
   527  // to trigger checkpoint when current segment file is finished.
   528  func TestCompactorTriggeredByAdminTool(t *testing.T) {
   529  
   530  	const (
   531  		numInsPerStep      = 2 // the number of payloads in each trie update
   532  		pathByteSize       = 32
   533  		minPayloadByteSize = 2<<11 - 256 // 3840 bytes
   534  		maxPayloadByteSize = 2 << 11     // 4096 bytes
   535  		size               = 20
   536  		checkpointDistance = 5   // create checkpoint on every 5 segment files
   537  		checkpointsToKeep  = 0   // keep all
   538  		forestCapacity     = 500 // the number of tries to be included in a checkpoint file
   539  	)
   540  
   541  	metricsCollector := &metrics.NoopCollector{}
   542  
   543  	unittest.RunWithTempDir(t, func(dir string) {
   544  
   545  		rootHash := trie.EmptyTrieRootHash()
   546  
   547  		// Create DiskWAL and Ledger repeatedly to test rebuilding ledger state at restart.
   548  
   549  		wal, err := realWAL.NewDiskWAL(unittest.LoggerWithName("wal"), nil, metrics.NewNoopCollector(), dir, forestCapacity, pathByteSize, 32*1024)
   550  		require.NoError(t, err)
   551  
   552  		l, err := NewLedger(wal, forestCapacity, metricsCollector, unittest.LoggerWithName("ledger"), DefaultPathFinderVersion)
   553  		require.NoError(t, err)
   554  
   555  		compactor, err := NewCompactor(l, wal, unittest.LoggerWithName("compactor"), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(true), metrics.NewNoopCollector())
   556  		require.NoError(t, err)
   557  
   558  		fmt.Println("should stop as soon as segment 5 is generated, which should trigger checkpoint 5 to be created")
   559  		fmt.Println("note checkpoint 0 will be be notified to the CompactorObserver because checkpoint 0 is the root checkpoint")
   560  		fmt.Println("Why fromBound =5? because without forcing checkpoint to trigger, it won't trigger until segment 4 is finished, with ")
   561  		fmt.Println("forcing checkpoint to trigger, it will trigger when segment 0 and 5 is finished")
   562  		fromBound := 5
   563  
   564  		co := CompactorObserver{fromBound: fromBound, done: make(chan struct{})}
   565  		compactor.Subscribe(&co)
   566  
   567  		// Run Compactor in background.
   568  		<-compactor.Ready()
   569  
   570  		fmt.Println("generate the tree and create WAL")
   571  		fmt.Println("2 trie updates will fill a segment file, and 12 trie updates will fill 6 segment files")
   572  		fmt.Println("13 trie updates in total will trigger segment 5 to be finished, which should trigger checkpoint 5")
   573  		for i := 0; i < 13; i++ {
   574  			// slow down updating the ledger, because running too fast would cause the previous checkpoint
   575  			// to not finish and get delayed
   576  			time.Sleep(LedgerUpdateDelay)
   577  
   578  			payloads := testutils.RandomPayloads(numInsPerStep, minPayloadByteSize, maxPayloadByteSize)
   579  
   580  			keys := make([]ledger.Key, len(payloads))
   581  			values := make([]ledger.Value, len(payloads))
   582  			for i, p := range payloads {
   583  				k, err := p.Key()
   584  				require.NoError(t, err)
   585  				keys[i] = k
   586  				values[i] = p.Value()
   587  			}
   588  
   589  			update, err := ledger.NewUpdate(ledger.State(rootHash), keys, values)
   590  			require.NoError(t, err)
   591  
   592  			newState, _, err := l.Set(update)
   593  			require.NoError(t, err)
   594  
   595  			rootHash = ledger.RootHash(newState)
   596  		}
   597  
   598  		// wait for the bound-checking observer to confirm checkpoints have been made
   599  		select {
   600  		case <-co.done:
   601  			// continue
   602  		case <-time.After(60 * time.Second):
   603  			assert.FailNow(t, "timed out")
   604  		}
   605  
   606  		// Shutdown ledger and compactor
   607  		<-l.Done()
   608  		<-compactor.Done()
   609  
   610  		checkpointer, err := wal.NewCheckpointer()
   611  		require.NoError(t, err)
   612  
   613  		nums, err := checkpointer.Checkpoints()
   614  		require.NoError(t, err)
   615  		// 0 is the first checkpoint triggered because of the force triggering
   616  		// 5 is triggered after 4 segments are filled.
   617  		require.Equal(t, []int{0, 5}, nums)
   618  	})
   619  }
   620  
   621  // TestCompactorConcurrency expects checkpointed tries to
   622  // match replayed tries in sequence with concurrent updates.
   623  // Replayed tries are tries updated by replaying all WAL segments
   624  // (from segment 0, ignoring prior checkpoints) to the checkpoint number.
   625  // This verifies that checkpointed tries are snapshopt of segments
   626  // and at segment boundary.
   627  // Note: sequence check only works when initial state is blank.
   628  // When initial state is from ledger's forest (LRU cache), its
   629  // sequence is altered by reads when replaying segment records.
   630  func TestCompactorConcurrency(t *testing.T) {
   631  	const (
   632  		numInsPerStep      = 2
   633  		pathByteSize       = 32
   634  		minPayloadByteSize = 2<<11 - 256 // 3840 bytes
   635  		maxPayloadByteSize = 2 << 11     // 4096 bytes
   636  		size               = 20
   637  		checkpointDistance = 5
   638  		checkpointsToKeep  = 0 // keep all
   639  		forestCapacity     = 500
   640  		numGoroutine       = 4
   641  		lastCheckpointNum  = -1
   642  	)
   643  
   644  	rootState := ledger.State(trie.EmptyTrieRootHash())
   645  
   646  	metricsCollector := &metrics.NoopCollector{}
   647  
   648  	unittest.RunWithTempDir(t, func(dir string) {
   649  
   650  		// There are 1-2 records per segment (according to logs), so
   651  		// generate size/2 segments.
   652  
   653  		wal, err := realWAL.NewDiskWAL(unittest.Logger(), nil, metrics.NewNoopCollector(), dir, forestCapacity, pathByteSize, 32*1024)
   654  		require.NoError(t, err)
   655  
   656  		l, err := NewLedger(wal, forestCapacity, metricsCollector, zerolog.Logger{}, DefaultPathFinderVersion)
   657  		require.NoError(t, err)
   658  
   659  		compactor, err := NewCompactor(l, wal, unittest.Logger(), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector())
   660  		require.NoError(t, err)
   661  
   662  		fromBound := lastCheckpointNum + (size / 2 * numGoroutine)
   663  
   664  		co := CompactorObserver{fromBound: fromBound, done: make(chan struct{})}
   665  		compactor.Subscribe(&co)
   666  
   667  		// Run Compactor in background.
   668  		<-compactor.Ready()
   669  
   670  		var wg sync.WaitGroup
   671  		wg.Add(numGoroutine)
   672  
   673  		// Run 4 goroutines and each goroutine updates size+1 tries.
   674  		for j := 0; j < numGoroutine; j++ {
   675  			go func(parentState ledger.State) {
   676  				defer wg.Done()
   677  
   678  				// size+1 is used to ensure that size/2*numGoroutine segments are finalized.
   679  				for i := 0; i < size+1; i++ {
   680  					// slow down updating the ledger, because running too fast would cause the previous checkpoint
   681  					// to not finish and get delayed
   682  					time.Sleep(LedgerUpdateDelay)
   683  					payloads := testutils.RandomPayloads(numInsPerStep, minPayloadByteSize, maxPayloadByteSize)
   684  
   685  					keys := make([]ledger.Key, len(payloads))
   686  					values := make([]ledger.Value, len(payloads))
   687  					for i, p := range payloads {
   688  						k, err := p.Key()
   689  						require.NoError(t, err)
   690  						keys[i] = k
   691  						values[i] = p.Value()
   692  					}
   693  
   694  					update, err := ledger.NewUpdate(parentState, keys, values)
   695  					require.NoError(t, err)
   696  
   697  					newState, _, err := l.Set(update)
   698  					require.NoError(t, err)
   699  
   700  					parentState = newState
   701  				}
   702  			}(rootState)
   703  		}
   704  
   705  		// wait for goroutines updating ledger
   706  		wg.Wait()
   707  
   708  		// wait for the bound-checking observer to confirm checkpoints have been made
   709  		select {
   710  		case <-co.done:
   711  			// continue
   712  		case <-time.After(120 * time.Second):
   713  			assert.FailNow(t, "timed out")
   714  		}
   715  
   716  		// Shutdown ledger and compactor
   717  		<-l.Done()
   718  		<-compactor.Done()
   719  
   720  		checkpointer, err := wal.NewCheckpointer()
   721  		require.NoError(t, err)
   722  
   723  		nums, err := checkpointer.Checkpoints()
   724  		require.NoError(t, err)
   725  
   726  		for _, n := range nums {
   727  			// For each created checkpoint:
   728  			// - get tries by loading checkpoint
   729  			// - get tries by replaying segments up to checkpoint number (ignoring all prior checkpoints)
   730  			// - test that these 2 sets of tries match in content and sequence (insertion order).
   731  			testCheckpointedTriesMatchReplayedTriesFromSegments(t, checkpointer, n, dir, true)
   732  		}
   733  	})
   734  }
   735  
   736  func testCheckpointedTriesMatchReplayedTriesFromSegments(
   737  	t *testing.T,
   738  	checkpointer *realWAL.Checkpointer,
   739  	checkpointNum int,
   740  	dir string,
   741  	inSequence bool,
   742  ) {
   743  	// Get tries by loading checkpoint
   744  	triesFromLoadingCheckpoint, err := checkpointer.LoadCheckpoint(checkpointNum)
   745  	require.NoError(t, err)
   746  
   747  	// Get tries by replaying segments up to checkpoint number (ignoring checkpoints)
   748  	triesFromReplayingSegments, err := triesUpToSegment(dir, checkpointNum, len(triesFromLoadingCheckpoint))
   749  	require.NoError(t, err)
   750  
   751  	if inSequence {
   752  		// Test that checkpointed tries match replayed tries in content and sequence (insertion order).
   753  		require.Equal(t, len(triesFromReplayingSegments), len(triesFromLoadingCheckpoint))
   754  		for i := 0; i < len(triesFromReplayingSegments); i++ {
   755  			require.Equal(t, triesFromReplayingSegments[i].RootHash(), triesFromLoadingCheckpoint[i].RootHash())
   756  		}
   757  		return
   758  	}
   759  
   760  	// Test that checkpointed tries match replayed tries in content (ignore order).
   761  	triesSetFromReplayingSegments := make(map[ledger.RootHash]struct{})
   762  	for _, t := range triesFromReplayingSegments {
   763  		triesSetFromReplayingSegments[t.RootHash()] = struct{}{}
   764  	}
   765  
   766  	triesSetFromLoadingCheckpoint := make(map[ledger.RootHash]struct{})
   767  	for _, t := range triesFromLoadingCheckpoint {
   768  		triesSetFromLoadingCheckpoint[t.RootHash()] = struct{}{}
   769  	}
   770  
   771  	require.True(t, reflect.DeepEqual(triesSetFromReplayingSegments, triesSetFromLoadingCheckpoint))
   772  }
   773  
   774  func triesUpToSegment(dir string, to int, capacity int) ([]*trie.MTrie, error) {
   775  
   776  	// forest is used to create new trie.
   777  	forest, err := mtrie.NewForest(capacity, &metrics.NoopCollector{}, nil)
   778  	if err != nil {
   779  		return nil, fmt.Errorf("cannot create Forest: %w", err)
   780  	}
   781  
   782  	initialTries, err := forest.GetTries()
   783  	if err != nil {
   784  		return nil, fmt.Errorf("cannot get tries from forest: %w", err)
   785  	}
   786  
   787  	// TrieQueue is used to store last n tries from segment files in order (n = capacity)
   788  	tries := realWAL.NewTrieQueueWithValues(uint(capacity), initialTries)
   789  
   790  	err = replaySegments(
   791  		dir,
   792  		to,
   793  		func(update *ledger.TrieUpdate) error {
   794  			t, err := forest.NewTrie(update)
   795  			if err == nil {
   796  				err = forest.AddTrie(t)
   797  				if err != nil {
   798  					return err
   799  				}
   800  				tries.Push(t)
   801  			}
   802  			return err
   803  		}, func(rootHash ledger.RootHash) error {
   804  			return nil
   805  		})
   806  	if err != nil {
   807  		return nil, err
   808  	}
   809  
   810  	return tries.Tries(), nil
   811  }
   812  
   813  func replaySegments(
   814  	dir string,
   815  	to int,
   816  	updateFn func(update *ledger.TrieUpdate) error,
   817  	deleteFn func(rootHash ledger.RootHash) error,
   818  ) error {
   819  	sr, err := prometheusWAL.NewSegmentsRangeReader(unittest.Logger(), prometheusWAL.SegmentRange{
   820  		Dir:   dir,
   821  		First: 0,
   822  		Last:  to,
   823  	})
   824  	if err != nil {
   825  		return fmt.Errorf("cannot create segment reader: %w", err)
   826  	}
   827  
   828  	reader := prometheusWAL.NewReader(sr)
   829  
   830  	defer sr.Close()
   831  
   832  	for reader.Next() {
   833  		record := reader.Record()
   834  		operation, rootHash, update, err := realWAL.Decode(record)
   835  		if err != nil {
   836  			return fmt.Errorf("cannot decode LedgerWAL record: %w", err)
   837  		}
   838  
   839  		switch operation {
   840  		case realWAL.WALUpdate:
   841  			err = updateFn(update)
   842  			if err != nil {
   843  				return fmt.Errorf("error while processing LedgerWAL update: %w", err)
   844  			}
   845  		case realWAL.WALDelete:
   846  			err = deleteFn(rootHash)
   847  			if err != nil {
   848  				return fmt.Errorf("error while processing LedgerWAL deletion: %w", err)
   849  			}
   850  		}
   851  
   852  		err = reader.Err()
   853  		if err != nil {
   854  			return fmt.Errorf("cannot read LedgerWAL: %w", err)
   855  		}
   856  	}
   857  
   858  	return nil
   859  }