github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/network/p2p/tracer/internal/duplicate_msgs_counter_cache.go (about)

     1  package internal
     2  
     3  import (
     4  	"fmt"
     5  	"time"
     6  
     7  	"github.com/libp2p/go-libp2p/core/peer"
     8  	"github.com/rs/zerolog"
     9  
    10  	"github.com/onflow/flow-go/model/flow"
    11  	"github.com/onflow/flow-go/module"
    12  	herocache "github.com/onflow/flow-go/module/mempool/herocache/backdata"
    13  	"github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool"
    14  	"github.com/onflow/flow-go/module/mempool/stdmap"
    15  	"github.com/onflow/flow-go/network/p2p/scoring"
    16  )
    17  
    18  // DuplicateMessageTrackerCache is a cache used to store the current count of duplicate messages detected
    19  // from a peer. This count is utilized to calculate a penalty for duplicate messages, which is then applied
    20  // to the peer's application-specific score. The duplicate message tracker decays over time to prevent perpetual
    21  // penalization of a peer.
    22  type DuplicateMessageTrackerCache struct {
    23  	// the in-memory and thread-safe cache for storing the spam records of peers.
    24  	c     *stdmap.Backend
    25  	decay float64
    26  	// skipDecayThreshold The threshold for which when the counter is below this value, the decay function will not be called
    27  	skipDecayThreshold float64
    28  }
    29  
    30  // NewDuplicateMessageTrackerCache returns a new HeroCache-based duplicate message counter cache.
    31  // Args:
    32  //
    33  //	sizeLimit: the maximum number of entries that can be stored in the cache.
    34  //	decay: the record decay.
    35  //	logger: the logger to be used by the cache.
    36  //	collector: the metrics collector to be used by the cache.
    37  //
    38  // Returns:
    39  //   - *DuplicateMessageTrackerCache: the newly created cache with a HeroCache-based backend.
    40  func NewDuplicateMessageTrackerCache(sizeLimit uint32, decay, skipDecayThreshold float64, logger zerolog.Logger, collector module.HeroCacheMetrics) *DuplicateMessageTrackerCache {
    41  	backData := herocache.NewCache(sizeLimit,
    42  		herocache.DefaultOversizeFactor,
    43  		heropool.LRUEjection,
    44  		logger.With().Str("mempool", "gossipsub=duplicate-message-counter-cache").Logger(),
    45  		collector)
    46  	return &DuplicateMessageTrackerCache{
    47  		decay:              decay,
    48  		skipDecayThreshold: skipDecayThreshold,
    49  		c:                  stdmap.NewBackend(stdmap.WithBackData(backData)),
    50  	}
    51  }
    52  
    53  // DuplicateMessageReceived applies an adjustment that increments the number of duplicate messages received by a peer.
    54  // Returns number of duplicate messages received after the adjustment. The record is initialized before
    55  // the adjustment func is applied that will increment the counter value.
    56  //   - exception only in cases of internal data inconsistency or bugs. No errors are expected.
    57  func (d *DuplicateMessageTrackerCache) DuplicateMessageReceived(peerID peer.ID) (float64, error) {
    58  	var err error
    59  	adjustFunc := func(entity flow.Entity) flow.Entity {
    60  		entity, err = d.decayAdjustment(entity) // first decay the record
    61  		if err != nil {
    62  			return entity
    63  		}
    64  		return d.incrementAdjustment(entity) // then increment the record
    65  	}
    66  
    67  	entityId := makeId(peerID)
    68  	adjustedEntity, adjusted := d.c.AdjustWithInit(entityId, adjustFunc, func() flow.Entity {
    69  		return newDuplicateMessagesCounter(entityId)
    70  	})
    71  
    72  	if err != nil {
    73  		return 0, fmt.Errorf("unexpected error while applying decay and increment adjustments for peer %s: %w", peerID, err)
    74  	}
    75  
    76  	if !adjusted {
    77  		return 0, fmt.Errorf("adjustment failed for peer %s", peerID)
    78  	}
    79  
    80  	record := mustBeDuplicateMessagesCounterEntity(adjustedEntity)
    81  
    82  	return record.Value, nil
    83  }
    84  
    85  // GetWithInit returns the current number of duplicate messages received from a peer.
    86  // The record is initialized before the count is returned.
    87  // Before the counter value is returned it is decayed using the configured decay function.
    88  // Returns the record and true if the record exists, nil and false otherwise.
    89  // Args:
    90  // - peerID: peerID of the remote peer.
    91  // Returns:
    92  // - The duplicate messages counter value after the decay and true if the record exists, 0 and false otherwise.
    93  // No errors are expected during normal operation, all errors returned are considered irrecoverable.
    94  func (d *DuplicateMessageTrackerCache) GetWithInit(peerID peer.ID) (float64, bool, error) {
    95  	var err error
    96  	adjustLogic := func(entity flow.Entity) flow.Entity {
    97  		// perform decay on gauge value
    98  		entity, err = d.decayAdjustment(entity)
    99  		return entity
   100  	}
   101  
   102  	entityId := makeId(peerID)
   103  	adjustedEntity, adjusted := d.c.AdjustWithInit(entityId, adjustLogic, func() flow.Entity {
   104  		return newDuplicateMessagesCounter(entityId)
   105  	})
   106  	if err != nil {
   107  		return 0, false, fmt.Errorf("unexpected error while applying decay adjustment for peer %s: %w", peerID, err)
   108  	}
   109  	if !adjusted {
   110  		return 0, false, fmt.Errorf("decay adjustment failed for peer %s", peerID)
   111  	}
   112  
   113  	counter := mustBeDuplicateMessagesCounterEntity(adjustedEntity)
   114  
   115  	return counter.Value, true, nil
   116  }
   117  
   118  // incrementAdjustment performs a cache adjustment that increments the guage for the duplicateMessagesCounterEntity
   119  func (d *DuplicateMessageTrackerCache) incrementAdjustment(entity flow.Entity) flow.Entity {
   120  	counter := mustBeDuplicateMessagesCounterEntity(entity)
   121  	counter.Value++
   122  	counter.lastUpdated = time.Now()
   123  	// Return the adjusted counter.
   124  	return counter
   125  }
   126  
   127  // decayAdjustment performs geometric recordDecay on the duplicate message counter gauge of a peer. This ensures a peer is not penalized forever.
   128  // All errors returned from this function are unexpected and irrecoverable.
   129  func (d *DuplicateMessageTrackerCache) decayAdjustment(entity flow.Entity) (flow.Entity, error) {
   130  	counter := mustBeDuplicateMessagesCounterEntity(entity)
   131  	duplicateMessages := counter.Value
   132  	if duplicateMessages == 0 {
   133  		return counter, nil
   134  	}
   135  
   136  	if duplicateMessages < d.skipDecayThreshold {
   137  		counter.Value = 0
   138  		return counter, nil
   139  	}
   140  
   141  	decayedVal, err := scoring.GeometricDecay(duplicateMessages, d.decay, counter.lastUpdated)
   142  	if err != nil {
   143  		return counter, fmt.Errorf("could not decay duplicate message counter: %w", err)
   144  	}
   145  
   146  	if decayedVal > duplicateMessages {
   147  		return counter, fmt.Errorf("unexpected recordDecay value %f for duplicate message counter gauge %f", decayedVal, duplicateMessages)
   148  	}
   149  
   150  	counter.Value = decayedVal
   151  	counter.lastUpdated = time.Now()
   152  	// Return the adjusted counter.
   153  	return counter, nil
   154  }