github.com/koko1123/flow-go-1@v0.29.6/engine/consensus/approvals/assignment_collector_tree.go (about)

     1  package approvals
     2  
     3  import (
     4  	"fmt"
     5  	"sync"
     6  
     7  	"github.com/rs/zerolog/log"
     8  
     9  	"github.com/koko1123/flow-go-1/engine"
    10  	"github.com/koko1123/flow-go-1/model/flow"
    11  	"github.com/koko1123/flow-go-1/module/forest"
    12  	"github.com/koko1123/flow-go-1/storage"
    13  )
    14  
    15  // assignmentCollectorVertex is a helper structure that wraps an AssignmentCollector
    16  // so it implements the LevelledForest's `Vertex` interface:
    17  //   - VertexID is defined as the ID of the execution result
    18  //   - Level is defined as the height of the executed block
    19  type assignmentCollectorVertex struct {
    20  	collector AssignmentCollector
    21  }
    22  
    23  /* Methods implementing LevelledForest's Vertex interface */
    24  
    25  func (v *assignmentCollectorVertex) VertexID() flow.Identifier { return v.collector.ResultID() }
    26  func (v *assignmentCollectorVertex) Level() uint64             { return v.collector.Block().Height }
    27  func (v *assignmentCollectorVertex) Parent() (flow.Identifier, uint64) {
    28  	return v.collector.Result().PreviousResultID, v.collector.Block().Height - 1
    29  }
    30  
    31  // NewCollectorFactoryMethod is a factory method to generate an AssignmentCollector for an execution result
    32  type NewCollectorFactoryMethod = func(result *flow.ExecutionResult) (AssignmentCollector, error)
    33  
    34  // AssignmentCollectorTree is a mempool holding assignment collectors, which is aware of the tree structure
    35  // formed by the execution results. The mempool supports pruning by height: only collectors
    36  // descending from the latest sealed and finalized result are relevant.
    37  // Safe for concurrent access. Internally, the mempool utilizes the LevelledForest.
    38  type AssignmentCollectorTree struct {
    39  	forest              *forest.LevelledForest
    40  	lock                sync.RWMutex
    41  	createCollector     NewCollectorFactoryMethod
    42  	lastSealedID        flow.Identifier
    43  	lastSealedHeight    uint64
    44  	lastFinalizedHeight uint64
    45  	headers             storage.Headers
    46  }
    47  
    48  func NewAssignmentCollectorTree(lastSealed *flow.Header, headers storage.Headers, createCollector NewCollectorFactoryMethod) *AssignmentCollectorTree {
    49  	return &AssignmentCollectorTree{
    50  		forest:              forest.NewLevelledForest(lastSealed.Height),
    51  		lock:                sync.RWMutex{},
    52  		createCollector:     createCollector,
    53  		lastSealedID:        lastSealed.ID(),
    54  		lastFinalizedHeight: lastSealed.Height,
    55  		lastSealedHeight:    lastSealed.Height,
    56  		headers:             headers,
    57  	}
    58  }
    59  
    60  func (t *AssignmentCollectorTree) GetSize() uint64 {
    61  	t.lock.RLock()
    62  	defer t.lock.RUnlock()
    63  	//locking is still needed, since forest.GetSize is not concurrent safe.
    64  	return t.forest.GetSize()
    65  }
    66  
    67  // GetCollector returns assignment collector for the given result.
    68  func (t *AssignmentCollectorTree) GetCollector(resultID flow.Identifier) AssignmentCollector {
    69  	t.lock.RLock()
    70  	defer t.lock.RUnlock()
    71  	vertex, found := t.forest.GetVertex(resultID)
    72  	if !found {
    73  		return nil
    74  	}
    75  
    76  	v := vertex.(*assignmentCollectorVertex)
    77  	return v.collector
    78  }
    79  
    80  // FinalizeForkAtLevel orphans forks in the AssignmentCollectorTree and prunes levels below the
    81  // sealed finalized height. When a block is finalized we can mark results for conflicting forks as
    82  // orphaned and stop processing approvals for them. Eventually all forks will be cleaned up by height.
    83  func (t *AssignmentCollectorTree) FinalizeForkAtLevel(finalized *flow.Header, sealed *flow.Header) error {
    84  	t.lock.Lock()
    85  	defer t.lock.Unlock()
    86  
    87  	if t.lastFinalizedHeight >= finalized.Height {
    88  		return nil
    89  	}
    90  
    91  	// STEP 1: orphan forks in the AssignmentCollectorTree whose results are
    92  	// for blocks that are conflicting with the finalized blocks
    93  	t.lastSealedID = sealed.ID()
    94  	for height := finalized.Height; height > t.lastFinalizedHeight; height-- {
    95  		finalizedBlock, err := t.headers.ByHeight(height)
    96  		if err != nil {
    97  			return fmt.Errorf("could not retrieve finalized block at height %d: %w", height, err)
    98  		}
    99  		finalizedBlockID := finalizedBlock.ID()
   100  		iter := t.forest.GetVerticesAtLevel(height)
   101  		for iter.HasNext() {
   102  			vertex := iter.NextVertex().(*assignmentCollectorVertex)
   103  			if finalizedBlockID != vertex.collector.BlockID() {
   104  				err = t.updateForkState(vertex, Orphaned)
   105  				if err != nil {
   106  					return err
   107  				}
   108  			}
   109  		}
   110  	}
   111  
   112  	t.lastFinalizedHeight = finalized.Height
   113  
   114  	// WARNING: next block of code implements a special fallback mechanism to recover from sealing halt.
   115  	// CONTEXT: As blocks are incorporated into chain they are picked up by sealing.Core and added to AssignmentCollectorTree.
   116  	// By definition, all blocks should be reported to sealing.Core and that's why all results should be saved in AssignmentCollectorTree.
   117  	// When finalization kicks in, we must have a finalized processable fork of assignment collectors.
   118  	// Next section checks if we indeed have a finalized fork, starting from last finalized seal. By definition it has to be
   119  	// processable. If it's not then we have a critical bug which results in blocks being missed by sealing.Core.
   120  	// TODO: remove this at some point when this logic matures.
   121  	if t.lastSealedHeight < sealed.Height {
   122  		collectors, err := t.selectCollectorsForFinalizedFork(sealed.Height+1, finalized.Height)
   123  		if err != nil {
   124  			return fmt.Errorf("could not select finalized fork: %w", err)
   125  		}
   126  
   127  		for _, collectorVertex := range collectors {
   128  			clr := collectorVertex.collector
   129  			if clr.ProcessingStatus() != VerifyingApprovals {
   130  				log.Error().Msgf("AssignmentCollectorTree has found not processable finalized fork %v,"+
   131  					" this is unexpected and shouldn't happen, recovering", clr.BlockID())
   132  			}
   133  			currentStatus := clr.ProcessingStatus()
   134  			if clr.Block().Height < finalized.Height {
   135  				err = clr.ChangeProcessingStatus(currentStatus, VerifyingApprovals)
   136  			} else {
   137  				err = t.updateForkState(collectorVertex, VerifyingApprovals)
   138  			}
   139  			if err != nil {
   140  				return err
   141  			}
   142  		}
   143  
   144  		t.lastSealedHeight = sealed.Height
   145  	}
   146  
   147  	// STEP 2: prune levels below the latest sealed finalized height.
   148  	err := t.pruneUpToHeight(sealed.Height)
   149  	if err != nil {
   150  		return fmt.Errorf("could not prune collectors tree up to height %d: %w", sealed.Height, err)
   151  	}
   152  
   153  	return nil
   154  }
   155  
   156  // selectCollectorsForFinalizedFork collects all collectors for blocks with height
   157  // in [startHeight, finalizedHeight], whose block is finalized.
   158  // NOT concurrency safe.
   159  func (t *AssignmentCollectorTree) selectCollectorsForFinalizedFork(startHeight, finalizedHeight uint64) ([]*assignmentCollectorVertex, error) {
   160  	var fork []*assignmentCollectorVertex
   161  	for height := startHeight; height <= finalizedHeight; height++ {
   162  		iter := t.forest.GetVerticesAtLevel(height)
   163  		finalizedBlock, err := t.headers.ByHeight(height)
   164  		if err != nil {
   165  			return nil, fmt.Errorf("could not retrieve finalized block at height %d: %w", height, err)
   166  		}
   167  		finalizedBlockID := finalizedBlock.ID()
   168  		for iter.HasNext() {
   169  			vertex := iter.NextVertex().(*assignmentCollectorVertex)
   170  			if finalizedBlockID == vertex.collector.BlockID() {
   171  				fork = append(fork, vertex)
   172  				break
   173  			}
   174  		}
   175  	}
   176  	return fork, nil
   177  }
   178  
   179  // updateForkState changes the state of `vertex` and all its descendants to `newState`.
   180  // NOT concurrency safe.
   181  func (t *AssignmentCollectorTree) updateForkState(vertex *assignmentCollectorVertex, newState ProcessingStatus) error {
   182  	currentStatus := vertex.collector.ProcessingStatus()
   183  	if currentStatus == newState {
   184  		return nil
   185  	}
   186  	err := vertex.collector.ChangeProcessingStatus(currentStatus, newState)
   187  	if err != nil {
   188  		return err
   189  	}
   190  
   191  	iter := t.forest.GetChildren(vertex.VertexID())
   192  	for iter.HasNext() {
   193  		err := t.updateForkState(iter.NextVertex().(*assignmentCollectorVertex), newState)
   194  		if err != nil {
   195  			return err
   196  		}
   197  	}
   198  
   199  	return nil
   200  }
   201  
   202  // GetCollectorsByInterval returns all collectors in state `VerifyingApprovals`
   203  // whose executed block has height in [from; to)
   204  func (t *AssignmentCollectorTree) GetCollectorsByInterval(from, to uint64) []AssignmentCollector {
   205  	var vertices []AssignmentCollector
   206  	t.lock.RLock()
   207  	defer t.lock.RUnlock()
   208  
   209  	if from < t.forest.LowestLevel {
   210  		from = t.forest.LowestLevel
   211  	}
   212  
   213  	for l := from; l < to; l++ {
   214  		iter := t.forest.GetVerticesAtLevel(l)
   215  		for iter.HasNext() {
   216  			vertex := iter.NextVertex().(*assignmentCollectorVertex)
   217  			if vertex.collector.ProcessingStatus() == VerifyingApprovals {
   218  				vertices = append(vertices, vertex.collector)
   219  			}
   220  		}
   221  	}
   222  
   223  	return vertices
   224  }
   225  
   226  // LazyInitCollector is a helper structure that is used to return collector which is lazy initialized
   227  type LazyInitCollector struct {
   228  	Collector AssignmentCollector
   229  	Created   bool // whether collector was created or retrieved from cache
   230  }
   231  
   232  // GetOrCreateCollector performs lazy initialization of AssignmentCollector using double-checked locking.
   233  func (t *AssignmentCollectorTree) GetOrCreateCollector(result *flow.ExecutionResult) (*LazyInitCollector, error) {
   234  	resultID := result.ID()
   235  	// first let's check if we have a collector already
   236  	cachedCollector := t.GetCollector(resultID)
   237  	if cachedCollector != nil {
   238  		return &LazyInitCollector{
   239  			Collector: cachedCollector,
   240  			Created:   false,
   241  		}, nil
   242  	}
   243  
   244  	collector, err := t.createCollector(result)
   245  	if err != nil {
   246  		return nil, fmt.Errorf("could not create assignment collector for result %v: %w", resultID, err)
   247  	}
   248  	vertex := &assignmentCollectorVertex{
   249  		collector: collector,
   250  	}
   251  
   252  	// Initial check showed that there was no collector. However, it's possible that after the
   253  	// initial check but before acquiring the lock to add the newly-created collector, another
   254  	// goroutine already added the needed collector. Hence, check again after acquiring the lock:
   255  	t.lock.Lock()
   256  	defer t.lock.Unlock()
   257  
   258  	// leveled forest doesn't treat this case as error, we shouldn't create collectors
   259  	// for vertices lower that forest.LowestLevel
   260  	if vertex.Level() < t.forest.LowestLevel {
   261  		return nil, engine.NewOutdatedInputErrorf("cannot add collector because its height %d is smaller than the lowest height %d", vertex.Level(), t.forest.LowestLevel)
   262  	}
   263  
   264  	v, found := t.forest.GetVertex(resultID)
   265  	if found {
   266  		return &LazyInitCollector{
   267  			Collector: v.(*assignmentCollectorVertex).collector,
   268  			Created:   false,
   269  		}, nil
   270  	}
   271  
   272  	// add AssignmentCollector as vertex to tree
   273  	err = t.forest.VerifyVertex(vertex)
   274  	if err != nil {
   275  		return nil, fmt.Errorf("failed to store assignment collector into the tree: %w", err)
   276  	}
   277  
   278  	t.forest.AddVertex(vertex)
   279  
   280  	// An assignment collector is processable if and only if:
   281  	// either (i) the parent result is the latest sealed result (seal is finalized)
   282  	//    or (ii) the result's parent is processable
   283  	parent, parentFound := t.forest.GetVertex(result.PreviousResultID)
   284  	newStatus := CachingApprovals
   285  	if parentFound {
   286  		newStatus = parent.(*assignmentCollectorVertex).collector.ProcessingStatus()
   287  	}
   288  	if collector.Block().ParentID == t.lastSealedID {
   289  		newStatus = VerifyingApprovals
   290  	}
   291  	err = t.updateForkState(vertex, newStatus)
   292  	if err != nil {
   293  		return nil, fmt.Errorf("failed to update fork state: %w", err)
   294  	}
   295  
   296  	return &LazyInitCollector{
   297  		Collector: vertex.collector,
   298  		Created:   true,
   299  	}, nil
   300  }
   301  
   302  // pruneUpToHeight prunes all assignment collectors for results with height up to but
   303  // NOT INCLUDING `limit`. Noop, if limit is lower than the previous value (caution:
   304  // this is different from the levelled forest's convention).
   305  // This function is NOT concurrency safe.
   306  func (t *AssignmentCollectorTree) pruneUpToHeight(limit uint64) error {
   307  	if t.forest.LowestLevel >= limit {
   308  		return nil
   309  	}
   310  
   311  	// remove vertices and adjust size
   312  	err := t.forest.PruneUpToLevel(limit)
   313  	if err != nil {
   314  		return fmt.Errorf("pruning Levelled Forest up to height (aka level) %d failed: %w", limit, err)
   315  	}
   316  
   317  	return nil
   318  }