github.com/onflow/flow-go@v0.33.17/config/default-config.yml (about)

     1  config-file: "./default-config.yml"
     2  # WARNING: Only modify the network configurations below if you fully understand their implications.
     3  # Incorrect settings may lead to system instability, security vulnerabilities, or degraded performance.
     4  # Make changes with caution and refer to the documentation for guidance.
     5  # Network configuration.
     6  network-config:
     7    # Network Configuration
     8    # Connection pruning determines whether connections to nodes
     9    # that are not part of protocol state should be trimmed
    10    networking-connection-pruning: true
    11    # Preferred unicasts protocols list of unicast protocols in preferred order
    12    preferred-unicast-protocols: [ ]
    13    received-message-cache-size: 10_000
    14    peerupdate-interval: 10m
    15  
    16    dns-cache-ttl: 5m
    17    # The size of the queue for notifications about new peers in the disallow list.
    18    disallow-list-notification-cache-size: 100
    19    unicast:
    20      rate-limiter:
    21        # Setting this to true will disable connection disconnects and gating when unicast rate limiters are configured
    22        dry-run: true
    23        # The number of seconds a peer will be forced to wait before being allowed to successfully reconnect to the node after being rate limited
    24        lockout-duration: 10s
    25        # Amount of unicast messages that can be sent by a peer per second
    26        message-rate-limit: 0
    27        # Bandwidth size in bytes a peer is allowed to send via unicast streams per second
    28        bandwidth-rate-limit: 0
    29        # Bandwidth size in bytes a peer is allowed to send via unicast streams at once
    30        bandwidth-burst-limit: 1e9
    31      manager:
    32        # The minimum number of consecutive successful streams to reset the unicast stream creation retry budget from zero to the maximum default. If it is set to 100 for example, it
    33        # means that if a peer has 100 consecutive successful streams to the remote peer, and the remote peer has a zero stream creation budget,
    34        # the unicast stream creation retry budget for that remote peer will be reset to the maximum default.
    35        stream-zero-retry-reset-threshold: 100
    36        # The maximum number of retry attempts for creating a unicast stream to a remote peer before giving up. If it is set to 3 for example, it means that if a peer fails to create
    37        # retry a unicast stream to a remote peer 3 times, the peer will give up and will not retry creating a unicast stream to that remote peer.
    38        # When it is set to zero it means that the peer will not retry creating a unicast stream to a remote peer if it fails.
    39        max-stream-creation-retry-attempt-times: 3
    40        # The size of the dial config cache used to keep track of the dial config for each remote peer. The dial config is used to keep track of the dial retry budget for each remote peer.
    41        # Recommended to set it to the maximum number of remote peers in the network.
    42        dial-config-cache-size: 10_000
    43        # Unicast create stream retry delay is initial delay used in the exponential backoff for create stream retries
    44        create-stream-retry-delay: 1s
    45      message-timeout: 5s
    46      # Enable stream protection for unicast streams; when enabled, all connections that are being established or
    47      #	have been already established for unicast streams are protected, meaning that they won't be closed by the connection manager.
    48      #	This is useful for preventing the connection manager from closing unicast streams that are being used by the application layer.
    49      #	However, it may interfere with the resource manager of libp2p, i.e., the connection manager may not be able to close connections
    50      #	that are not being used by the application layer while at the same time the node is running out of resources for new connections.
    51      enable-stream-protection: true
    52    # Resource manager config
    53    libp2p-resource-manager:
    54      # Maximum allowed fraction of file descriptors to be allocated by the libp2p resources in [0,1]
    55      # setting to zero means no allocation of memory by libp2p; and libp2p will run with very low limits
    56      memory-limit-ratio: 0.5 # flow default
    57      # Maximum allowed fraction of memory to be allocated by the libp2p resources in [0,1]
    58      # setting to zero means no allocation of memory by libp2p; and libp2p will run with very low limits
    59      file-descriptors-ratio: 0.2 # libp2p default
    60      # limits override: any non-zero values for libp2p-resource-limit-override will override the default values of the libp2p resource limits.
    61      limits-override:
    62        system:
    63          # maximum number of inbound system-wide streams, across all peers and protocols
    64          # Note that streams are ephemeral and are created and destroyed intermittently.
    65          streams-inbound: 15_000 # override
    66          # maximum number of outbound system-wide streams, across all peers and protocols
    67          # Note that streams are ephemeral and are created and destroyed intermittently.
    68          streams-outbound: 15_000 # override
    69          connections-inbound: 0 # no-override, use default
    70          connections-outbound: 0 # no-override, use default
    71          fd: 0 # no-override, use default
    72          memory-bytes: 0 # no-override, use default
    73        transient:
    74          # maximum number of inbound transient streams, across all streams that are not yet fully opened and associated with a protocol.
    75          # Note that streams are ephemeral and are created and destroyed intermittently.
    76          streams-inbound: 15_000 # override
    77          # maximum number of outbound transient streams, across all streams that are not yet fully opened and associated with a protocol.
    78          # Note that streams are ephemeral and are created and destroyed intermittently.
    79          streams-outbound: 15_000 # override
    80          connections-inbound: 0 # no-override, use default
    81          connections-outbound: 0 # no-override, use default
    82          fd: 0 # no-override, use default
    83          memory-bytes: 0 # no-override, use default
    84        protocol:
    85          # maximum number of inbound streams for each protocol across all peers; this is a per-protocol limit. We expect at least
    86          # three protocols per node; gossipsub, unicast, and dht. Note that streams are ephemeral and are created and destroyed intermittently.
    87          streams-inbound: 5000 # override
    88          # maximum number of outbound streams for each protocol across all peers; this is a per-protocol limit. We expect at least
    89          # three protocols per node; gossipsub, unicast, and dht. Note that streams are ephemeral and are created and destroyed intermittently.
    90          streams-outbound: 5000 # override
    91          connections-inbound: 0 # no-override, use default
    92          connections-outbound: 0 # no-override, use default
    93          fd: 0 # no-override, use default
    94          memory-bytes: 0 # no-override, use default
    95        peer:
    96          # maximum number of inbound streams from each peer across all protocols.
    97          streams-inbound: 1000 # override
    98          # maximum number of outbound streams from each peer across all protocols.
    99          streams-outbound: 1000 # override
   100          connections-inbound: 0 # no-override, use default
   101          connections-outbound: 0 # no-override, use default
   102          fd: 0 # no-override, use default
   103          memory-bytes: 0 # no-override, use default
   104        peer-protocol:
   105          # maximum number of inbound streams from each peer for each protocol.
   106          streams-inbound: 500 # override
   107          # maximum number of outbound streams from each peer for each protocol.
   108          streams-outbound: 500 # override
   109          connections-inbound: 0 # no-override, use default
   110          connections-outbound: 0 # no-override, use default
   111          fd: 0 # no-override, use default
   112          memory-bytes: 0 # no-override, use default
   113    connection-manager:
   114      # HighWatermark and LowWatermark govern the number of connections are maintained by the ConnManager.
   115      # When the peer count exceeds the HighWatermark, as many peers will be pruned (and
   116      # their connections terminated) until LowWatermark peers remain. In other words, whenever the
   117      # peer count is x > HighWatermark, the ConnManager will prune x - LowWatermark peers.
   118      # The pruning algorithm is as follows:
   119      # 1. The ConnManager will not prune any peers that have been connected for less than GracePeriod.
   120      # 2. The ConnManager will not prune any peers that are protected.
   121      # 3. The ConnManager will sort the peers based on their number of streams and direction of connections, and
   122      # prunes the peers with the least number of streams. If there are ties, the peer with the incoming connection
   123      # will be pruned. If both peers have incoming connections, and there are still ties, one of the peers will be
   124      # pruned at random.
   125      # Algorithm implementation is in https://github.com/libp2p/go-libp2p/blob/master/p2p/net/connmgr/connmgr.go#L262-L318
   126      # We assume number of nodes around 500, and each node is allowed to make at most 8 connections to each certain remote node,
   127      # we hence set the high-watermark to 500 * 8 = 4000, and the low-watermark to 500 * (0.5 * 4) = 1000, this means that when the
   128      # number of peers exceeds 4000, the connection manager will prune the peers with the least number of streams until the number of
   129      # peers is reduced to 1000 assuming an average of 2 connections per peer.
   130      high-watermark: 4000
   131      low-watermark: 1000
   132      # The silence period is a regular interval that the connection manager will check for pruning peers if the number of peers exceeds the high-watermark.
   133      # it is a regular interval and 10s is the default libp2p value.
   134      silence-period: 10s
   135      # The time to wait before a new connection is considered for pruning.
   136      grace-period: 1m
   137    # Gossipsub config
   138    gossipsub:
   139      rpc-inspector:
   140        # The size of the queue for notifications about invalid RPC messages
   141        notification-cache-size: 10_000
   142        validation: # RPC control message validation inspector configs
   143          inspection-queue:
   144            # Rpc validation inspector number of pool workers
   145            workers: 5
   146            # The size of the queue used by worker pool for the control message validation inspector
   147            queue-size: 100
   148          publish-messages:
   149            # The maximum number of messages in a single RPC message that are randomly sampled for async inspection.
   150            #	When the size of a single RPC message exceeds this threshold, a random sample is taken for inspection, but the RPC message is not truncated.
   151            max-sample-size: 1000
   152            # The threshold at which an error will be returned if the number of invalid RPC messages exceeds this value
   153            error-threshold: 500
   154          graft-and-prune:
   155            # The maximum number of GRAFT or PRUNE messages in a single RPC message.
   156            # When the total number of GRAFT or PRUNE messages in a single RPC message exceeds this threshold,
   157            #	a random sample of GRAFT or PRUNE messages will be taken and the RPC message will be truncated to this sample size.
   158            message-count-threshold: 1000
   159            # Maximum number of total duplicate topic ids in a single GRAFT or PRUNE message, ideally this should be 0 but we allow for some tolerance
   160            # to avoid penalizing peers that are not malicious but are misbehaving due to bugs or other issues.
   161            # A topic id is considered duplicate if it appears more than once in a single GRAFT or PRUNE message.
   162            duplicate-topic-id-threshold: 50
   163          ihave:
   164            # The maximum allowed number of iHave messages in a single RPC message.
   165            #	Each iHave message represents the list of message ids. When the total number of iHave messages
   166            #	in a single RPC message exceeds this threshold, a random sample of iHave messages will be taken and the RPC message will be truncated to this sample size.
   167            #	The sample size is equal to the configured message-count-threshold.
   168            message-count-threshold: 1000
   169            # The maximum allowed number of message ids in a single iHave message.
   170            #	Each iHave message represents the list of message ids for a specific topic, and this parameter controls the maximum number of message ids
   171            #	that can be included in a single iHave message. When the total number of message ids in a single iHave message exceeds this threshold,
   172            #	a random sample of message ids will be taken and the iHave message will be truncated to this sample size.
   173            #	The sample size is equal to the configured message-id-count-threshold.
   174            message-id-count-threshold: 1000
   175            # The tolerance threshold for having duplicate topics in an iHave message under inspection.
   176            #	When the total number of duplicate topic ids in a single iHave message exceeds this threshold, the inspection of message will fail.
   177            #	Note that a topic ID is counted as a duplicate only if it is repeated more than once.
   178            duplicate-topic-id-threshold: 50
   179            # Threshold of tolerance for having duplicate message IDs in a single iHave message under inspection.
   180            # When the total number of duplicate message ids in a single iHave message exceeds this threshold, the inspection of message will fail.
   181            # Ideally, an iHave message should not have any duplicate message IDs, hence a message id is considered duplicate when it is repeated more than once
   182            # within the same iHave message. When the total number of duplicate message ids in a single iHave message exceeds this threshold, the inspection of message will fail.
   183            duplicate-message-id-threshold: 100
   184          iwant:
   185            # The maximum allowed number of iWant messages in a single RPC message.
   186            #	Each iWant message represents the list of message ids. When the total number of iWant messages
   187            #	in a single RPC message exceeds this threshold, a random sample of iWant messages will be taken and the RPC message will be truncated to this sample size.
   188            #	The sample size is equal to the configured message-count-threshold.
   189            message-count-threshold: 1000
   190            # The maximum allowed number of message ids in a single iWant message.
   191            #	Each iWant message represents the list of message ids for a specific topic, and this parameter controls the maximum number of message ids
   192            #	that can be included in a single iWant message. When the total number of message ids in a single iWant message exceeds this threshold,
   193            #	a random sample of message ids will be taken and the iWant message will be truncated to this sample size.
   194            #	The sample size is equal to the configured message-id-count-threshold.
   195            message-id-count-threshold: 1000
   196            # The allowed threshold of iWant messages received without a corresponding tracked iHave message that was sent.
   197            # If the cache miss threshold is exceeded an invalid control message notification is disseminated and the sender will be penalized.
   198            cache-miss-threshold: 500
   199            # The max allowed number of duplicate message ids in a single iwant message.
   200            # Note that ideally there should be no duplicate message ids in a single iwant message but
   201            # we allow for some tolerance to avoid penalizing peers that are not malicious
   202            duplicate-message-id-threshold: 100
   203          cluster-prefixed-messages:
   204            # Cluster prefixed control message validation configs
   205            # The size of the cache used to track the amount of cluster prefixed topics received by peers
   206            tracker-cache-size: 100
   207            # The decay val used for the geometric decay of cache counters used to keep track of cluster prefixed topics received by peers
   208            tracker-cache-decay: 0.99
   209            # The upper bound on the amount of cluster prefixed control messages that will be processed
   210            hard-threshold: 100
   211      rpc-tracer:
   212        # The default interval at which the mesh tracer logs the mesh topology. This is used for debugging and forensics purposes.
   213        #	Note that we purposefully choose this logging interval high enough to avoid spamming the logs. Moreover, the
   214        #	mesh updates will be logged individually and separately. The logging interval is only used to log the mesh
   215        #	topology as a whole specially when there are no updates to the mesh topology for a long time.
   216        local-mesh-logging-interval: 1m
   217        # The default interval at which the gossipsub score tracer logs the peer scores. This is used for debugging and forensics purposes.
   218        #	Note that we purposefully choose this logging interval high enough to avoid spamming the logs.
   219        score-tracer-interval: 1m
   220        # The default RPC sent tracker cache size. The RPC sent tracker is used to track RPC control messages sent from the local node.
   221        # Note: this cache size must be large enough to keep a history of sent messages in a reasonable time window of past history.
   222        rpc-sent-tracker-cache-size: 1_000_000
   223        # Cache size of the rpc sent tracker queue used for async tracking.
   224        rpc-sent-tracker-queue-cache-size: 100_000
   225        # Number of workers for rpc sent tracker worker pool.
   226        rpc-sent-tracker-workers: 5
   227        # Peer scoring is the default value for enabling peer scoring
   228      peer-scoring-enabled: true
   229      scoring-parameters:
   230        peer-scoring:
   231          internal:
   232            # The weight for app-specific scores.
   233            # It is used to scale the app-specific scores to the same range as the other scores.
   234            # At the current version, we don't distinguish between the app-specific scores
   235            # and the other scores, so we set it to 1.
   236            app-specific-score-weight: 1
   237            # The default decay interval for the overall score of a peer at the GossipSub scoring
   238            # system. We set it to 1 minute so that it is not too short so that a malicious node can recover from a penalty
   239            # and is not too long so that a well-behaved node can't recover from a penalty.
   240            decay-interval: 1m
   241            # The default decay to zero for the overall score of a peer at the GossipSub scoring system.
   242            # It defines the maximum value below which a peer scoring counter is reset to zero.
   243            # This is to prevent the counter from decaying to a very small value.
   244            # The default value is 0.01, which means that a counter will be reset to zero if it decays to 0.01.
   245            # When a counter hits the DecayToZero threshold, it means that the peer did not exhibit the behavior
   246            # for a long time, and we can reset the counter.
   247            decay-to-zero: 0.01
   248            topic:
   249              # This is the default value for the skip atomic validation flag for topics.
   250              # We set it to true, which means gossipsub parameter validation will not fail if we leave some of the
   251              # topic parameters at their default values, i.e., zero. This is because we are not setting all
   252              # topic parameters at the current implementation.
   253              skip-atomic-validation: true
   254              # This value is applied to the square of the number of invalid message deliveries on a topic.
   255              # It is used to penalize peers that send invalid messages. By an invalid message, we mean a message that is not signed by the
   256              # publisher, or a message that is not signed by the peer that sent it.
   257              # An invalid message also can be a self-origin message, i.e., the peer sees its own message bounced back to it.
   258              # GossipSub has an edge-case that a peer may inadvertently request a self-origin message from a peer that it is connected to, through iHave-iWant messages, which is a
   259              # false-positive edge-case.
   260              # We set it to -10e-4, which means that with around 1414 invalid
   261              # message deliveries within a gossipsub heartbeat interval, the peer will be disconnected.
   262              # Note that we intentionally set this threshold high to avoid false-positively penalizing nodes due to self-origin message requests by iHave-iWants (a known issue in gossipsub).
   263              # The supporting math is as follows:
   264              # - each staked (i.e., authorized) peer is rewarded by the fixed reward of 100 (i.e., DefaultStakedIdentityReward).
   265              # - x invalid message deliveries will result in a penalty of x^2 * DefaultTopicInvalidMessageDeliveriesWeight, i.e., -x^2 * 10-e4.
   266              # - the peer will be disconnected when its penalty reaches -100 (i.e., MaxAppSpecificPenalty).
   267              # - so, the maximum number of invalid message deliveries that a peer can have before being disconnected is sqrt(200/10-e4) ~ 1414.
   268              invalid-message-deliveries-weight: -10e-4
   269              # The decay factor used to decay the number of invalid message deliveries.
   270              # The total number of invalid message deliveries is multiplied by this factor at each heartbeat interval to
   271              # decay the number of invalid message deliveries, and prevent the peer from being disconnected if it stops
   272              # sending invalid messages. We set it to 0.5, which means that the number of invalid message deliveries will
   273              # decay by 50% at each heartbeat interval.
   274              # The decay heartbeats are defined by the heartbeat interval of the gossipsub scoring system, which is 1 Minute (defaultDecayInterval).
   275              # Note that we set the decay factor low so that the invalid message deliveries will be decayed fast enough to prevent the peer from being disconnected on mediocre loads.
   276              # This is to address the false-positive disconnections that we observed in the network due to the self-origin message requests by iHave-iWants (a known issue in gossipsub).
   277              invalid-message-deliveries-decay: 0.5
   278              # The default time in mesh quantum for the GossipSub scoring system. It is used to gauge
   279              # a discrete time interval for the time in mesh counter. We set it to 1 hour, which means that every one complete hour a peer is
   280              # in a topic mesh, the time in mesh counter will be incremented by 1 and is counted towards the availability score of the peer in that topic mesh.
   281              # The reason for setting it to 1 hour is that we want to reward peers that are in a topic mesh for a long time, and we want to avoid rewarding peers that
   282              # are churners, i.e., peers that join and leave a topic mesh frequently.
   283              time-in-mesh-quantum: 1h
   284              # The default weight of a topic in the GossipSub scoring system.
   285              # The overall score of a peer in a topic mesh is multiplied by the weight of the topic when calculating the overall score of the peer.
   286              # We set it to 1.0, which means that the overall score of a peer in a topic mesh is not affected by the weight of the topic.
   287              topic-weight: 1.0
   288              # This is applied to the number of actual message deliveries in a topic mesh
   289              # at each decay interval (i.e., defaultDecayInterval).
   290              # It is used to decay the number of actual message deliveries, and prevents past message
   291              # deliveries from affecting the current score of the peer.
   292              # As the decay interval is 1 minute, we set it to 0.5, which means that the number of actual message
   293              # deliveries will decay by 50% at each decay interval.
   294              mesh-message-deliveries-decay: 0.5
   295              # The maximum number of actual message deliveries in a topic
   296              # mesh that is used to calculate the score of a peer in that topic mesh.
   297              # We set it to 1000, which means that the maximum number of actual message deliveries in a
   298              # topic mesh that is used to calculate the score of a peer in that topic mesh is 1000.
   299              # This is to prevent the score of a peer in a topic mesh from being affected by a large number of actual
   300              # message deliveries and also affect the score of the peer in other topic meshes.
   301              # When the total delivered messages in a topic mesh exceeds this value, the score of the peer in that topic
   302              # mesh will not be affected by the actual message deliveries in that topic mesh.
   303              # Moreover, this does not allow the peer to accumulate a large number of actual message deliveries in a topic mesh
   304              # and then start under-performing in that topic mesh without being penalized.
   305              mesh-message-deliveries-cap: 1000
   306              # The threshold for the number of actual message deliveries in a
   307              # topic mesh that is used to calculate the score of a peer in that topic mesh.
   308              # If the number of actual message deliveries in a topic mesh is less than this value,
   309              # the peer will be penalized by square of the difference between the actual message deliveries and the threshold,
   310              # i.e., -w * (actual - threshold)^2 where `actual` and `threshold` are the actual message deliveries and the
   311              # threshold, respectively, and `w` is the weight (i.e., defaultTopicMeshMessageDeliveriesWeight).
   312              # We set it to 0.1 * defaultTopicMeshMessageDeliveriesCap, which means that if a peer delivers less tha 10% of the
   313              # maximum number of actual message deliveries in a topic mesh, it will be considered as an under-performing peer
   314              # in that topic mesh.
   315              mesh-message-deliveries-threshold: 100
   316              # The weight for applying penalty when a peer is under-performing in a topic mesh.
   317              # Upon every decay interval, if the number of actual message deliveries is less than the topic mesh message deliveries threshold
   318              # (i.e., defaultTopicMeshMessageDeliveriesThreshold), the peer will be penalized by square of the difference between the actual
   319              # message deliveries and the threshold, multiplied by this weight, i.e., -w * (actual - threshold)^2 where w is the weight, and
   320              # `actual` and `threshold` are the actual message deliveries and the threshold, respectively.
   321              # We set this value to be - 0.05 MaxAppSpecificReward / (defaultTopicMeshMessageDeliveriesThreshold^2). This guarantees that even if a peer
   322              # is not delivering any message in a topic mesh, it will not be disconnected.
   323              # Rather, looses part of the MaxAppSpecificReward that is awarded by our app-specific scoring function to all staked
   324              # nodes by default will be withdrawn, and the peer will be slightly penalized. In other words, under-performing in a topic mesh
   325              # will drop the overall score of a peer by 5% of the MaxAppSpecificReward that is awarded by our app-specific scoring function.
   326              # It means that under-performing in a topic mesh will not cause a peer to be disconnected, but it will cause the peer to lose
   327              # its MaxAppSpecificReward that is awarded by our app-specific scoring function.
   328              # At this point, we do not want to disconnect a peer only because it is under-performing in a topic mesh as it might be
   329              # causing a false positive network partition.
   330              mesh-deliveries-weight: -0.0005
   331              # The window size is time interval that we count a delivery of an already
   332              # seen message towards the score of a peer in a topic mesh. The delivery is counted
   333              # by GossipSub only if the previous sender of the message is different from the current sender.
   334              # We set it to the decay interval of the GossipSub scoring system, which is 1 minute.
   335              # It means that if a peer delivers a message that it has already seen less than one minute ago,
   336              # the delivery will be counted towards the score of the peer in a topic mesh only if the previous sender of the message.
   337              # This also prevents replay attacks of messages that are older than one minute. As replayed messages will not
   338              # be counted towards the actual message deliveries of a peer in a topic mesh.
   339              mesh-message-deliveries-window: 1m
   340              # The time interval that we wait for a new peer that joins a topic mesh
   341              # till start counting the number of actual message deliveries of that peer in that topic mesh.
   342              # We set it to 2 * defaultDecayInterval, which means that we wait for 2 decay intervals before start counting
   343              # the number of actual message deliveries of a peer in a topic mesh.
   344              # With a default decay interval of 1 minute, it means that we wait for 2 minutes before start counting the
   345              # number of actual message deliveries of a peer in a topic mesh. This is to account for
   346              # the time that it takes for a peer to start up and receive messages from other peers in the topic mesh.
   347              mesh-message-delivery-activation: 2m
   348            thresholds:
   349              # This is the threshold when a peer's penalty drops below this threshold, no gossip
   350              # is emitted towards that peer and gossip from that peer is ignored.
   351              # Validation Constraint: GossipThreshold >= PublishThreshold && GossipThreshold < 0
   352              # How we use it: As the current max penalty is -100, we set the threshold to -99
   353              # so that all gossips to and from peers with penalty -100 are ignored.
   354              gossip: -99
   355              # This is the threshold when a peer's penalty drops below this threshold,
   356              # self-published messages are not propagated towards this peer.
   357              # Validation Constraint:
   358              # PublishThreshold >= GraylistThreshold && PublishThreshold <= GossipThreshold && PublishThreshold < 0.
   359              # How we use it: As the current max penalty is -100, we set the threshold to -99
   360              # so that all penalized peers are deprived of receiving any published messages.
   361              publish: -99
   362              # This is the threshold when a peer's penalty drops below this threshold,
   363              # the peer is graylisted, i.e., incoming RPCs from the peer are ignored.
   364              # Validation Constraint:
   365              # GraylistThreshold =< PublishThreshold && GraylistThreshold =< GossipThreshold && GraylistThreshold < 0
   366              # How we use it: As the current max penalty is -100, we set the threshold to -99
   367              # so that all penalized peers are graylisted.
   368              graylist: -99
   369              # This is the threshold when a peer sends us PX information with a prune,
   370              # we only accept it and connect to the supplied peers if the originating peer's
   371              # penalty exceeds this threshold.
   372              # Validation Constraint: must be non-negative.
   373              # How we use it: As the current max reward is 100, we set the threshold to 99
   374              # so that we only receive supplied peers from well-behaved peers.
   375              accept-px: 99
   376              # This is the threshold when the median peer penalty in the mesh drops
   377              # below this value, the peer may select more peers with penalty above the median
   378              # to opportunistically graft on the mesh.
   379              # Validation Constraint: must be non-negative.
   380              # How we use it: We set it to the -100 + 1 so that we only
   381              # opportunistically graft peers that are not access nodes (i.e., with -1),
   382              # or penalized peers (i.e., with -100).
   383              opportunistic-graft: 101
   384            behaviour:
   385              # The threshold when the behavior of a peer is considered as bad by GossipSub.
   386              # Currently, the misbehavior is defined as advertising an iHave without responding to the iWants (iHave broken promises), as well as attempting
   387              # on GRAFT when the peer is considered for a PRUNE backoff, i.e., the local peer does not allow the peer to join the local topic mesh
   388              # for a while, and the remote peer keep attempting on GRAFT (aka GRAFT flood).
   389              # When the misbehavior counter of a peer goes beyond this threshold, the peer is penalized by defaultBehaviorPenaltyWeight (see below) for the excess misbehavior.
   390              #
   391              # An iHave broken promise means that a peer advertises an iHave for a message, but does not respond to the iWant requests for that message.
   392              # For iHave broken promises, the gossipsub scoring works as follows:
   393              # It samples ONLY A SINGLE iHave out of the entire RPC.
   394              # If that iHave is not followed by an actual message within the next 3 seconds, the peer misbehavior counter is incremented by 1.
   395              #
   396              # We set it to 10, meaning that we at most tolerate 10 of such RPCs containing iHave broken promises. After that, the peer is penalized for every
   397              # excess RPC containing iHave broken promises.
   398              # The counter is also decayed by (0.99) every decay interval (defaultDecayInterval) i.e., every minute.
   399              # Note that misbehaviors are counted by GossipSub across all topics (and is different from the Application Layer Misbehaviors that we count through
   400              # the ALSP system).
   401              penalty-threshold: 1000
   402              # The weight for applying penalty when a peer misbehavior goes beyond the threshold.
   403              # Misbehavior of a peer at gossipsub layer is defined as advertising an iHave without responding to the iWants (broken promises), as well as attempting
   404              # on GRAFT when the peer is considered for a PRUNE backoff, i.e., the local peer does not allow the peer to join the local topic mesh
   405              # This is detected by the GossipSub scoring system, and the peer is penalized by defaultBehaviorPenaltyWeight.
   406              #
   407              # An iHave broken promise means that a peer advertises an iHave for a message, but does not respond to the iWant requests for that message.
   408              # For iHave broken promises, the gossipsub scoring works as follows:
   409              # It samples ONLY A SINGLE iHave out of the entire RPC.
   410              # If that iHave is not followed by an actual message within the next 3 seconds, the peer misbehavior counter is incremented by 1.
   411              #
   412              # The penalty is applied to the square of the difference between the misbehavior counter and the threshold, i.e., -|w| * (misbehavior counter - threshold)^2.
   413              # We set it to 0.01 * MaxAppSpecificPenalty, which means that misbehaving 10 times more than the threshold (i.e., 10 + 10) will cause the peer to lose
   414              # its entire AppSpecificReward that is awarded by our app-specific scoring function to all staked (i.e., authorized) nodes by default.
   415              # Moreover, as the MaxAppSpecificPenalty is -MaxAppSpecificReward, misbehaving sqrt(2) * 10 times more than the threshold will cause the peer score
   416              # to be dropped below the MaxAppSpecificPenalty, which is also below the GraylistThreshold, and the peer will be graylisted (i.e., disconnected).
   417              #
   418              # The math is as follows: -|w| * (misbehavior - threshold)^2 = 0.01 * MaxAppSpecificPenalty * (misbehavior - threshold)^2 < 2 * MaxAppSpecificPenalty
   419              # if misbehavior > threshold + sqrt(2) * 10.
   420              # As shown above, with this choice of defaultBehaviorPenaltyWeight, misbehaving sqrt(2) * 10 times more than the threshold will cause the peer score
   421              # to be dropped below the MaxAppSpecificPenalty, which is also below the GraylistThreshold, and the peer will be graylisted (i.e., disconnected). This weight
   422              # is chosen in a way that with almost a few misbehaviors more than the threshold, the peer will be graylisted. The rationale relies on the fact that
   423              # the misbehavior counter is incremented by 1 for each RPC containing one or more broken promises. Hence, it is per RPC, and not per broken promise.
   424              # Having sqrt(2) * 10 broken promises RPC is a blatant misbehavior, and the peer should be graylisted. With decay interval of 1 minute, and decay value of
   425              # 0.99 we expect a graylisted node due to borken promises to get back in about 527 minutes, i.e., (0.99)^x * (sqrt(2) * 10)^2 * MaxAppSpecificPenalty > GraylistThreshold
   426              # where x is the number of decay intervals that the peer is graylisted. As MaxAppSpecificPenalty and GraylistThresholds are close, we can simplify the inequality
   427              # to (0.99)^x * (sqrt(2) * 10)^2 > 1 --> (0.99)^x * 200 > 1 --> (0.99)^x > 1/200 --> x > log(1/200) / log(0.99) --> x > 527.17 decay intervals, i.e., 527 minutes.
   428              # Note that misbehaviors are counted by GossipSub across all topics (and is different from the Application Layer Misbehaviors that we count through
   429              # the ALSP system that are reported by the engines).
   430              penalty-weight: -0.01
   431              # The decay interval for the misbehavior counter of a peer. The misbehavior counter is
   432              # incremented by GossipSub for iHave broken promises or the GRAFT flooding attacks (i.e., each GRAFT received from a remote peer while that peer is on a PRUNE backoff).
   433              #
   434              # An iHave broken promise means that a peer advertises an iHave for a message, but does not respond to the iWant requests for that message.
   435              # For iHave broken promises, the gossipsub scoring works as follows:
   436              # It samples ONLY A SINGLE iHave out of the entire RPC.
   437              # If that iHave is not followed by an actual message within the next 3 seconds, the peer misbehavior counter is incremented by 1.
   438              # This means that regardless of how many iHave broken promises an RPC contains, the misbehavior counter is incremented by 1.
   439              # That is why we decay the misbehavior counter very slow, as this counter indicates a severe misbehavior.
   440              #
   441              # The misbehavior counter is decayed per decay interval (i.e., defaultDecayInterval = 1 minute) by GossipSub.
   442              # We set it to 0.99, which means that the misbehavior counter is decayed by 1% per decay interval.
   443              # With the generous threshold that we set (i.e., defaultBehaviourPenaltyThreshold = 10), we take the peers going beyond the threshold as persistent misbehaviors,
   444              # We expect honest peers never to go beyond the threshold, and if they do, we expect them to go back below the threshold quickly.
   445              #
   446              # Note that misbehaviors are counted by GossipSub across all topics (and is different from the Application Layer Misbehaviors that we count through
   447              # the ALSP system that is based on the engines report).
   448              penalty-decay: 0.5
   449          protocol:
   450            # The max number of debug/trace log events per second.
   451            # Logs emitted above this threshold are dropped.
   452            max-debug-logs: 50
   453            application-specific:
   454              # This is the maximum penalty for severe offenses that we apply
   455              # to a remote node score. The score mechanism of GossipSub in Flow is designed
   456              # in a way that all other infractions are penalized with a fraction of this value.
   457              # We have also set the other parameters such as GraylistThreshold,
   458              # GossipThreshold, and PublishThreshold to be a bit higher than this,
   459              # i.e., -100 + 1. This ensures that a node with a score of
   460              # -100 will be graylisted (i.e., all incoming and outgoing RPCs
   461              # are rejected) and will not be able to publish or gossip any messages.
   462              max-app-specific-penalty: -100
   463              min-app-specific-penalty: -1
   464              # This is the penalty for unknown identity. It is
   465              # applied to the peer's score when the peer is not in the identity list.
   466              unknown-identity-penalty: -100
   467              # This is the penalty for invalid subscription.
   468              # It is applied to the peer's score when the peer subscribes to a topic that it is
   469              # not authorized to subscribe to.
   470              invalid-subscription-penalty: -100
   471              # This is the reward for well-behaving staked peers.
   472              # If a peer does not have any misbehavior record, e.g., invalid subscription,
   473              # invalid message, etc., it will be rewarded with this score.
   474              max-app-specific-reward: 100
   475              # This is the reward for staking peers. It is applied
   476              # to the peer's score when the peer does not have any misbehavior record, e.g.,
   477              # invalid subscription, invalid message, etc. The purpose is to reward the staking
   478              # peers for their contribution to the network and prioritize them in neighbor selection.
   479              staked-identity-reward: 100
   480        scoring-registry:
   481          # Defines the duration of time, after the node startup,
   482          # during which the scoring registry remains inactive before penalizing nodes.
   483          # Throughout this startup silence period, the application-specific penalty
   484          # returned for all nodes will be 0, and any invalid control message notifications
   485          # will be ignored. This configuration allows nodes to stabilize and initialize before
   486          # applying penalties or processing invalid control message notifications.
   487          startup-silence-duration: 1h
   488          app-specific-score:
   489            # number of workers that asynchronously update the app specific score requests when they are expired.
   490            score-update-worker-num: 5
   491            # size of the queue used by the worker pool for the app specific score update requests. The queue is used to buffer the app specific score update requests
   492            # before they are processed by the worker pool. The queue size must be larger than total number of peers in the network.
   493            # The queue is deduplicated based on the peer ids ensuring that there is only one app specific score update request per peer in the queue.
   494            score-update-request-queue-size: 10_000
   495            # score ttl is the time to live for the app specific score. Once the score is expired; a new request will be sent to the app specific score provider to update the score.
   496            # until the score is updated, the previous score will be used.
   497            score-ttl: 1m
   498          spam-record-cache:
   499            # size of cache used to track spam records at gossipsub. Each peer id is mapped to a spam record that keeps track of the spam score for that peer.
   500            # cache should be big enough to keep track of the entire network's size. Otherwise, the local node's view of the network will be incomplete due to cache eviction.
   501            cache-size: 10_000
   502            decay:
   503              # Threshold level for spam record penalty.
   504              # At each evaluation period, when a node's penalty is below this value, the decay rate slows down, ensuring longer decay periods for malicious nodes and quicker decay for honest ones.
   505              penalty-decay-slowdown-threshold: -99
   506              # This setting adjusts the decay rate when a node's penalty falls below the threshold.
   507              # The decay rate, ranging between 0 and 1, dictates how quickly penalties decrease: a higher rate results in slower decay.
   508              # The decay calculation is multiplicative (newPenalty = decayRate * oldPenalty).
   509              # The reduction factor increases the decay rate, thus decelerating the penalty reduction. For instance, with a 0.01 reduction factor,
   510              # the decay rate increases by 0.01 at each evaluation interval when the penalty is below the threshold.
   511              # Consequently, a decay rate of `x` diminishes the penalty to zero more rapidly than a rate of `x+0.01`.
   512              penalty-decay-rate-reduction-factor: 0.01
   513              # Defines the frequency for evaluating and potentially adjusting the decay process of a spam record.
   514              # At each interval, the system assesses the current penalty of a node.
   515              # If this penalty is below the defined threshold, the decay rate is modified according to the reduction factor, slowing down the penalty reduction process.
   516              # This reassessment at regular intervals ensures that the decay rate is dynamically adjusted to reflect the node's ongoing behavior,
   517              # maintaining a balance between penalizing malicious activity and allowing recovery for honest nodes.
   518              penalty-decay-evaluation-period: 10m
   519              # The minimum speed at which the spam penalty value of a peer is decayed.
   520              # Spam record will be initialized with a decay value between .5 , .7 and this value will then be decayed up to .99 on consecutive misbehavior's,
   521              # The maximum decay value decays the penalty by 1% every second. The decay is applied geometrically, i.e., `newPenalty = oldPenalty * decay`, hence, the higher decay value
   522              # indicates a lower decay speed, i.e., it takes more heartbeat intervals to decay a penalty back to zero when the decay value is high.
   523              # assume:
   524              #     penalty = -100 (the maximum application specific penalty is -100)
   525              #     skipDecayThreshold = -0.1
   526              # it takes around 459 seconds for the penalty to decay to reach greater than -0.1 and turn into 0.
   527              #     x * 0.99 ^ n > -0.1 (assuming negative x).
   528              #     0.99 ^ n > -0.1 / x
   529              # Now we can take the logarithm of both sides (with any base, but let's use base 10 for simplicity).
   530              #     log( 0.99 ^ n ) < log( 0.1 / x )
   531              # Using the properties of logarithms, we can bring down the exponent:
   532              #     n * log( 0.99 ) < log( -0.1 / x )
   533              # And finally, we can solve for n:
   534              #     n > log( -0.1 / x ) / log( 0.99 )
   535              # We can plug in x = -100:
   536              #     n > log( -0.1 / -100 ) / log( 0.99 )
   537              #     n > log( 0.001 ) / log( 0.99 )
   538              #     n > -3 / log( 0.99 )
   539              #     n >  458.22
   540              minimum-spam-penalty-decay-factor: 0.99
   541              # The maximum rate at which the spam penalty value of a peer decays. Decay speeds increase
   542              # during sustained malicious activity, leading to a slower recovery of the app-specific score for the penalized node. Conversely,
   543              # decay speeds decrease, allowing faster recoveries, when nodes exhibit fleeting misbehavior.
   544              maximum-spam-penalty-decay-factor: 0.8
   545              # The threshold for which when the negative penalty is above this value, the decay function will not be called.
   546              # instead, the penalty will be set to 0. This is to prevent the penalty from keeping a small negative value for a long time.
   547              skip-decay-threshold: -0.1
   548          misbehaviour-penalties:
   549            # The penalty applied to the application specific penalty when a peer conducts a graft misbehaviour.
   550            graft: -10
   551            # The penalty applied to the application specific penalty when a peer conducts a prune misbehaviour.
   552            prune: -10
   553            # The penalty applied to the application specific penalty when a peer conducts a iHave misbehaviour.
   554            ihave: -10
   555            # The penalty applied to the application specific penalty when a peer conducts a iWant misbehaviour.
   556            iwant: -10
   557            # The penalty applied to the application specific penalty when a peer conducts a rpc publish message misbehaviour.
   558            publish: -10
   559            # The factor used to reduce the penalty for control message misbehaviours on cluster prefixed topics. This allows a more lenient punishment for nodes
   560            # that fall behind and may need to request old data.
   561            cluster-prefixed-reduction-factor: 0.2
   562      subscription-provider:
   563        # The interval for updating the list of subscribed peers to all topics in gossipsub. This is used to keep track of subscriptions
   564        # violations and penalize peers accordingly. Recommended value is in the order of a few minutes to avoid contentions; as the operation
   565        # reads all topics and all peers subscribed to each topic.
   566        update-interval: 10m
   567        # The size of cache for keeping the list of all peers subscribed to each topic (same as the local node). This cache is the local node's
   568        # view of the network and is used to detect subscription violations and penalize peers accordingly. Recommended to be big enough to
   569        # keep the entire network's size. Otherwise, the local node's view of the network will be incomplete due to cache eviction.
   570        # Recommended size is 10x the number of peers in the network.
   571        cache-size: 10000
   572    # Application layer spam prevention
   573    alsp-spam-record-cache-size: 1000
   574    alsp-spam-report-queue-size: 10_000
   575    alsp-disable-penalty: false
   576    alsp-heart-beat-interval: 1s
   577    # Base probability in [0,1] that's used in creating the final probability of creating a
   578    # misbehavior report for a BatchRequest message. This is why the word "base" is used in the name of this field,
   579    # since it's not the final probability and there are other factors that determine the final probability.
   580    # The reason for this is that we want to increase the probability of creating a misbehavior report for a large batch.
   581    # Create misbehavior report for about 0.2% of BatchRequest messages for normal batch requests (i.e. not too large)
   582    # The final batch request probability is calculated as follows:
   583    # batchRequestBaseProb * (len(batchRequest.BlockIDs) + 1) / synccore.DefaultConfig().MaxSize
   584    # Example 1 (small batch of block IDs) if the batch request is for 10 blocks IDs and batchRequestBaseProb is 0.01, then the probability of
   585    # creating a misbehavior report is:
   586    # batchRequestBaseProb * (10+1) / synccore.DefaultConfig().MaxSize
   587    # = 0.01 * 11 / 64 = 0.00171875 = 0.171875%
   588    # Example 2 (large batch of block IDs) if the batch request is for 1000 block IDs and batchRequestBaseProb is 0.01, then the probability of
   589    # creating a misbehavior report is:
   590    # batchRequestBaseProb * (1000+1) / synccore.DefaultConfig().MaxSize
   591    # = 0.01 * 1001 / 64 = 0.15640625 = 15.640625%
   592    alsp-sync-engine-batch-request-base-prob: 0.01
   593    # Base probability in [0,1] that's used in creating the final probability of creating a
   594    # misbehavior report for a RangeRequest message. This is why the word "base" is used in the name of this field,
   595    # since it's not the final probability and there are other factors that determine the final probability.
   596    # The reason for this is that we want to increase the probability of creating a misbehavior report for a large range.
   597    # Create misbehavior report for about 0.2% of RangeRequest messages for normal range requests (i.e. not too large)
   598    # and about 15% of RangeRequest messages for very large range requests.
   599    # The final probability is calculated as follows:
   600    # rangeRequestBaseProb * ((rangeRequest.ToHeight-rangeRequest.FromHeight) + 1) / synccore.DefaultConfig().MaxSize
   601    # Example 1 (small range) if the range request is for 10 blocks and rangeRequestBaseProb is 0.01, then the probability of
   602    # creating a misbehavior report is:
   603    # rangeRequestBaseProb * (10+1) / synccore.DefaultConfig().MaxSize
   604    # = 0.01 * 11 / 64 = 0.00171875 = 0.171875%
   605    # Example 2 (large range) if the range request is for 1000 blocks and rangeRequestBaseProb is 0.01, then the probability of
   606    # creating a misbehavior report is:
   607    # rangeRequestBaseProb * (1000+1) / synccore.DefaultConfig().MaxSize
   608    # = 0.01 * 1001 / 64 = 0.15640625 = 15.640625%
   609    alsp-sync-engine-range-request-base-prob: 0.01
   610    # Probability in [0,1] of creating a misbehavior report for a SyncRequest message.
   611    # create misbehavior report for 1% of SyncRequest messages
   612    alsp-sync-engine-sync-request-prob: 0.01