github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/engine/consensus/approvals/approvals_lru_cache.go (about)

     1  package approvals
     2  
     3  import (
     4  	"sync"
     5  
     6  	"github.com/hashicorp/golang-lru/v2/simplelru"
     7  
     8  	"github.com/onflow/flow-go/model/flow"
     9  )
    10  
    11  // LruCache is a wrapper over `simplelru.LRUCache` that provides needed api for processing result approvals
    12  // Extends functionality of `simplelru.LRUCache` by introducing additional index for quicker access.
    13  type LruCache struct {
    14  	lru  simplelru.LRUCache[flow.Identifier, *flow.ResultApproval]
    15  	lock sync.RWMutex
    16  	// secondary index by result id, since multiple approvals could
    17  	// reference same result
    18  	byResultID map[flow.Identifier]map[flow.Identifier]struct{}
    19  }
    20  
    21  func NewApprovalsLRUCache(limit uint) *LruCache {
    22  	byResultID := make(map[flow.Identifier]map[flow.Identifier]struct{})
    23  	// callback has to be called while we are holding lock
    24  	lru, _ := simplelru.NewLRU(int(limit), func(key flow.Identifier, approval *flow.ResultApproval) {
    25  		delete(byResultID[approval.Body.ExecutionResultID], approval.Body.PartialID())
    26  		if len(byResultID[approval.Body.ExecutionResultID]) == 0 {
    27  			delete(byResultID, approval.Body.ExecutionResultID)
    28  		}
    29  	})
    30  	return &LruCache{
    31  		lru:        lru,
    32  		byResultID: byResultID,
    33  	}
    34  }
    35  
    36  func (c *LruCache) Peek(approvalID flow.Identifier) *flow.ResultApproval {
    37  	c.lock.RLock()
    38  	defer c.lock.RUnlock()
    39  	// check if we have it in the cache
    40  	resource, cached := c.lru.Peek(approvalID)
    41  	if cached {
    42  		return resource
    43  	}
    44  
    45  	return nil
    46  }
    47  
    48  func (c *LruCache) Get(approvalID flow.Identifier) *flow.ResultApproval {
    49  	c.lock.Lock()
    50  	defer c.lock.Unlock()
    51  	// check if we have it in the cache
    52  	resource, cached := c.lru.Get(approvalID)
    53  	if cached {
    54  		return resource
    55  	}
    56  
    57  	return nil
    58  }
    59  
    60  func (c *LruCache) TakeByResultID(resultID flow.Identifier) []*flow.ResultApproval {
    61  	c.lock.Lock()
    62  	defer c.lock.Unlock()
    63  
    64  	ids, ok := c.byResultID[resultID]
    65  	if !ok {
    66  		return nil
    67  	}
    68  
    69  	approvals := make([]*flow.ResultApproval, 0, len(ids))
    70  	for approvalID := range ids {
    71  		// check if we have it in the cache
    72  		if resource, ok := c.lru.Peek(approvalID); ok {
    73  			// no need to cleanup secondary index since it will be
    74  			// cleaned up in evict callback
    75  			_ = c.lru.Remove(approvalID)
    76  			approvals = append(approvals, resource)
    77  		}
    78  	}
    79  
    80  	return approvals
    81  }
    82  
    83  func (c *LruCache) Put(approval *flow.ResultApproval) {
    84  	approvalID := approval.Body.PartialID()
    85  	resultID := approval.Body.ExecutionResultID
    86  	c.lock.Lock()
    87  	defer c.lock.Unlock()
    88  	// cache the resource and eject least recently used one if we reached limit
    89  	_ = c.lru.Add(approvalID, approval)
    90  	_, ok := c.byResultID[resultID]
    91  	if !ok {
    92  		c.byResultID[resultID] = map[flow.Identifier]struct{}{approvalID: {}}
    93  	} else {
    94  		c.byResultID[resultID][approvalID] = struct{}{}
    95  	}
    96  }