github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/engine/consensus/approvals/assignment_collector_tree.go (about)

     1  package approvals
     2  
     3  import (
     4  	"fmt"
     5  	"sync"
     6  
     7  	"github.com/rs/zerolog/log"
     8  
     9  	"github.com/onflow/flow-go/engine"
    10  	"github.com/onflow/flow-go/model/flow"
    11  	"github.com/onflow/flow-go/module/forest"
    12  	"github.com/onflow/flow-go/storage"
    13  )
    14  
    15  // assignmentCollectorVertex is a helper structure that wraps an AssignmentCollector
    16  // so it implements the LevelledForest's `Vertex` interface:
    17  //   - VertexID is defined as the ID of the execution result
    18  //   - Level is defined as the height of the executed block
    19  type assignmentCollectorVertex struct {
    20  	collector AssignmentCollector
    21  }
    22  
    23  /* Methods implementing LevelledForest's Vertex interface */
    24  
    25  func (v *assignmentCollectorVertex) VertexID() flow.Identifier { return v.collector.ResultID() }
    26  func (v *assignmentCollectorVertex) Level() uint64             { return v.collector.Block().Height }
    27  func (v *assignmentCollectorVertex) Parent() (flow.Identifier, uint64) {
    28  	return v.collector.Result().PreviousResultID, v.collector.Block().Height - 1
    29  }
    30  
    31  // NewCollectorFactoryMethod is a factory method to generate an AssignmentCollector for an execution result
    32  type NewCollectorFactoryMethod = func(result *flow.ExecutionResult) (AssignmentCollector, error)
    33  
    34  // AssignmentCollectorTree is a mempool holding assignment collectors, which is aware of the tree structure
    35  // formed by the execution results. The mempool supports pruning by height: only collectors
    36  // descending from the latest sealed and finalized result are relevant.
    37  // Safe for concurrent access. Internally, the mempool utilizes the LevelledForest.
    38  type AssignmentCollectorTree struct {
    39  	forest              *forest.LevelledForest
    40  	lock                sync.RWMutex
    41  	createCollector     NewCollectorFactoryMethod
    42  	lastSealedID        flow.Identifier
    43  	lastSealedHeight    uint64
    44  	lastFinalizedHeight uint64
    45  	headers             storage.Headers
    46  }
    47  
    48  func NewAssignmentCollectorTree(lastSealed *flow.Header, headers storage.Headers, createCollector NewCollectorFactoryMethod) *AssignmentCollectorTree {
    49  	return &AssignmentCollectorTree{
    50  		forest:              forest.NewLevelledForest(lastSealed.Height),
    51  		lock:                sync.RWMutex{},
    52  		createCollector:     createCollector,
    53  		lastSealedID:        lastSealed.ID(),
    54  		lastFinalizedHeight: lastSealed.Height,
    55  		lastSealedHeight:    lastSealed.Height,
    56  		headers:             headers,
    57  	}
    58  }
    59  
    60  func (t *AssignmentCollectorTree) GetSize() uint64 {
    61  	t.lock.RLock()
    62  	defer t.lock.RUnlock()
    63  	//locking is still needed, since forest.GetSize is not concurrent safe.
    64  	return t.forest.GetSize()
    65  }
    66  
    67  // GetCollector returns assignment collector for the given result.
    68  func (t *AssignmentCollectorTree) GetCollector(resultID flow.Identifier) AssignmentCollector {
    69  	t.lock.RLock()
    70  	defer t.lock.RUnlock()
    71  	vertex, found := t.forest.GetVertex(resultID)
    72  	if !found {
    73  		return nil
    74  	}
    75  
    76  	v := vertex.(*assignmentCollectorVertex)
    77  	return v.collector
    78  }
    79  
    80  // FinalizeForkAtLevel orphans forks in the AssignmentCollectorTree and prunes levels below the
    81  // sealed finalized height. When a block is finalized we can mark results for conflicting forks as
    82  // orphaned and stop processing approvals for them. Eventually all forks will be cleaned up by height.
    83  func (t *AssignmentCollectorTree) FinalizeForkAtLevel(finalized *flow.Header, sealed *flow.Header) error {
    84  	t.lock.Lock()
    85  	defer t.lock.Unlock()
    86  
    87  	if t.lastFinalizedHeight >= finalized.Height {
    88  		return nil
    89  	}
    90  
    91  	// STEP 1: orphan forks in the AssignmentCollectorTree whose results are
    92  	// for blocks that are conflicting with the finalized blocks
    93  	t.lastSealedID = sealed.ID()
    94  	for height := finalized.Height; height > t.lastFinalizedHeight; height-- {
    95  		finalizedBlock, err := t.headers.ByHeight(height)
    96  		if err != nil {
    97  			return fmt.Errorf("could not retrieve finalized block at height %d: %w", height, err)
    98  		}
    99  		finalizedBlockID := finalizedBlock.ID()
   100  		iter := t.forest.GetVerticesAtLevel(height)
   101  		for iter.HasNext() {
   102  			vertex := iter.NextVertex().(*assignmentCollectorVertex)
   103  			if finalizedBlockID != vertex.collector.BlockID() {
   104  				err = t.updateForkState(vertex, Orphaned)
   105  				if err != nil {
   106  					return err
   107  				}
   108  			}
   109  		}
   110  	}
   111  
   112  	t.lastFinalizedHeight = finalized.Height
   113  
   114  	// WARNING: next block of code implements a special fallback mechanism to recover from sealing halt.
   115  	// CONTEXT: As blocks are incorporated into chain they are picked up by sealing.Core and added to AssignmentCollectorTree.
   116  	// By definition, all blocks should be reported to sealing.Core and that's why all results should be saved in AssignmentCollectorTree.
   117  	// When finalization kicks in, we must have a finalized processable fork of assignment collectors.
   118  	// Next section checks if we indeed have a finalized fork, starting from last finalized seal. By definition it has to be
   119  	// processable. If it's not then we have a critical bug which results in blocks being missed by sealing.Core.
   120  	// TODO: remove this at some point when this logic matures.
   121  	if t.lastSealedHeight < sealed.Height {
   122  		collectors, err := t.selectCollectorsForFinalizedFork(sealed.Height+1, finalized.Height)
   123  		if err != nil {
   124  			return fmt.Errorf("could not select finalized fork: %w", err)
   125  		}
   126  
   127  		for _, collectorVertex := range collectors {
   128  			clr := collectorVertex.collector
   129  			if clr.ProcessingStatus() != VerifyingApprovals {
   130  				log.Error().Msgf("AssignmentCollectorTree has found not processable finalized fork %v,"+
   131  					" this is unexpected and shouldn't happen, recovering", clr.BlockID())
   132  			}
   133  			currentStatus := clr.ProcessingStatus()
   134  			if clr.Block().Height < finalized.Height {
   135  				err = clr.ChangeProcessingStatus(currentStatus, VerifyingApprovals)
   136  			} else {
   137  				err = t.updateForkState(collectorVertex, VerifyingApprovals)
   138  			}
   139  			if err != nil {
   140  				return err
   141  			}
   142  		}
   143  
   144  		t.lastSealedHeight = sealed.Height
   145  	}
   146  
   147  	// STEP 2: prune levels below the latest sealed finalized height.
   148  	err := t.pruneUpToHeight(sealed.Height)
   149  	if err != nil {
   150  		return fmt.Errorf("could not prune collectors tree up to height %d: %w", sealed.Height, err)
   151  	}
   152  
   153  	return nil
   154  }
   155  
   156  // selectCollectorsForFinalizedFork collects all collectors for blocks with height
   157  // in [startHeight, finalizedHeight], whose block is finalized.
   158  // NOT concurrency safe.
   159  func (t *AssignmentCollectorTree) selectCollectorsForFinalizedFork(startHeight, finalizedHeight uint64) ([]*assignmentCollectorVertex, error) {
   160  	var fork []*assignmentCollectorVertex
   161  	for height := startHeight; height <= finalizedHeight; height++ {
   162  		iter := t.forest.GetVerticesAtLevel(height)
   163  		finalizedBlockID, err := t.headers.BlockIDByHeight(height)
   164  		if err != nil {
   165  			return nil, fmt.Errorf("could not retrieve finalized block at height %d: %w", height, err)
   166  		}
   167  		for iter.HasNext() {
   168  			vertex := iter.NextVertex().(*assignmentCollectorVertex)
   169  			if finalizedBlockID == vertex.collector.BlockID() {
   170  				fork = append(fork, vertex)
   171  				break
   172  			}
   173  		}
   174  	}
   175  	return fork, nil
   176  }
   177  
   178  // updateForkState changes the state of `vertex` and all its descendants to `newState`.
   179  // NOT concurrency safe.
   180  func (t *AssignmentCollectorTree) updateForkState(vertex *assignmentCollectorVertex, newState ProcessingStatus) error {
   181  	currentStatus := vertex.collector.ProcessingStatus()
   182  	if currentStatus == newState {
   183  		return nil
   184  	}
   185  	err := vertex.collector.ChangeProcessingStatus(currentStatus, newState)
   186  	if err != nil {
   187  		return err
   188  	}
   189  
   190  	iter := t.forest.GetChildren(vertex.VertexID())
   191  	for iter.HasNext() {
   192  		err := t.updateForkState(iter.NextVertex().(*assignmentCollectorVertex), newState)
   193  		if err != nil {
   194  			return err
   195  		}
   196  	}
   197  
   198  	return nil
   199  }
   200  
   201  // GetCollectorsByInterval returns all collectors in state `VerifyingApprovals`
   202  // whose executed block has height in [from; to)
   203  func (t *AssignmentCollectorTree) GetCollectorsByInterval(from, to uint64) []AssignmentCollector {
   204  	var vertices []AssignmentCollector
   205  	t.lock.RLock()
   206  	defer t.lock.RUnlock()
   207  
   208  	if from < t.forest.LowestLevel {
   209  		from = t.forest.LowestLevel
   210  	}
   211  
   212  	for l := from; l < to; l++ {
   213  		iter := t.forest.GetVerticesAtLevel(l)
   214  		for iter.HasNext() {
   215  			vertex := iter.NextVertex().(*assignmentCollectorVertex)
   216  			if vertex.collector.ProcessingStatus() == VerifyingApprovals {
   217  				vertices = append(vertices, vertex.collector)
   218  			}
   219  		}
   220  	}
   221  
   222  	return vertices
   223  }
   224  
   225  // LazyInitCollector is a helper structure that is used to return collector which is lazy initialized
   226  type LazyInitCollector struct {
   227  	Collector AssignmentCollector
   228  	Created   bool // whether collector was created or retrieved from cache
   229  }
   230  
   231  // GetOrCreateCollector performs lazy initialization of AssignmentCollector using double-checked locking.
   232  func (t *AssignmentCollectorTree) GetOrCreateCollector(result *flow.ExecutionResult) (*LazyInitCollector, error) {
   233  	resultID := result.ID()
   234  	// first let's check if we have a collector already
   235  	cachedCollector := t.GetCollector(resultID)
   236  	if cachedCollector != nil {
   237  		return &LazyInitCollector{
   238  			Collector: cachedCollector,
   239  			Created:   false,
   240  		}, nil
   241  	}
   242  
   243  	collector, err := t.createCollector(result)
   244  	if err != nil {
   245  		return nil, fmt.Errorf("could not create assignment collector for result %v: %w", resultID, err)
   246  	}
   247  	vertex := &assignmentCollectorVertex{
   248  		collector: collector,
   249  	}
   250  
   251  	// Initial check showed that there was no collector. However, it's possible that after the
   252  	// initial check but before acquiring the lock to add the newly-created collector, another
   253  	// goroutine already added the needed collector. Hence, check again after acquiring the lock:
   254  	t.lock.Lock()
   255  	defer t.lock.Unlock()
   256  
   257  	// leveled forest doesn't treat this case as error, we shouldn't create collectors
   258  	// for vertices lower that forest.LowestLevel
   259  	if vertex.Level() < t.forest.LowestLevel {
   260  		return nil, engine.NewOutdatedInputErrorf("cannot add collector because its height %d is smaller than the lowest height %d", vertex.Level(), t.forest.LowestLevel)
   261  	}
   262  
   263  	v, found := t.forest.GetVertex(resultID)
   264  	if found {
   265  		return &LazyInitCollector{
   266  			Collector: v.(*assignmentCollectorVertex).collector,
   267  			Created:   false,
   268  		}, nil
   269  	}
   270  
   271  	// add AssignmentCollector as vertex to tree
   272  	err = t.forest.VerifyVertex(vertex)
   273  	if err != nil {
   274  		return nil, fmt.Errorf("failed to store assignment collector into the tree: %w", err)
   275  	}
   276  
   277  	t.forest.AddVertex(vertex)
   278  
   279  	// An assignment collector is processable if and only if:
   280  	// either (i) the parent result is the latest sealed result (seal is finalized)
   281  	//    or (ii) the result's parent is processable
   282  	parent, parentFound := t.forest.GetVertex(result.PreviousResultID)
   283  	newStatus := CachingApprovals
   284  	if parentFound {
   285  		newStatus = parent.(*assignmentCollectorVertex).collector.ProcessingStatus()
   286  	}
   287  	if collector.Block().ParentID == t.lastSealedID {
   288  		newStatus = VerifyingApprovals
   289  	}
   290  	err = t.updateForkState(vertex, newStatus)
   291  	if err != nil {
   292  		return nil, fmt.Errorf("failed to update fork state: %w", err)
   293  	}
   294  
   295  	return &LazyInitCollector{
   296  		Collector: vertex.collector,
   297  		Created:   true,
   298  	}, nil
   299  }
   300  
   301  // pruneUpToHeight prunes all assignment collectors for results with height up to but
   302  // NOT INCLUDING `limit`. Noop, if limit is lower than the previous value (caution:
   303  // this is different from the levelled forest's convention).
   304  // This function is NOT concurrency safe.
   305  func (t *AssignmentCollectorTree) pruneUpToHeight(limit uint64) error {
   306  	if t.forest.LowestLevel >= limit {
   307  		return nil
   308  	}
   309  
   310  	// remove vertices and adjust size
   311  	err := t.forest.PruneUpToLevel(limit)
   312  	if err != nil {
   313  		return fmt.Errorf("pruning Levelled Forest up to height (aka level) %d failed: %w", limit, err)
   314  	}
   315  
   316  	return nil
   317  }