github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/config/default-config.yml (about)

     1  config-file: "./default-config.yml"
     2  # WARNING: Only modify the network configurations below if you fully understand their implications.
     3  # Incorrect settings may lead to system instability, security vulnerabilities, or degraded performance.
     4  # Make changes with caution and refer to the documentation for guidance.
     5  # Network configuration.
     6  network-config:
     7    # Network Configuration
     8    # Connection pruning determines whether connections to nodes
     9    # that are not part of protocol state should be trimmed
    10    networking-connection-pruning: true
    11    # Preferred unicasts protocols list of unicast protocols in preferred order
    12    preferred-unicast-protocols: [ ]
    13    received-message-cache-size: 10_000
    14    peerupdate-interval: 10m
    15  
    16    dns-cache-ttl: 5m
    17    # The size of the queue for notifications about new peers in the disallow list.
    18    disallow-list-notification-cache-size: 100
    19    unicast:
    20      rate-limiter:
    21        # Setting this to true will disable connection disconnects and gating when unicast rate limiters are configured
    22        dry-run: true
    23        # The number of seconds a peer will be forced to wait before being allowed to successfully reconnect to the node after being rate limited
    24        lockout-duration: 10s
    25        # Amount of unicast messages that can be sent by a peer per second
    26        message-rate-limit: 0
    27        # Bandwidth size in bytes a peer is allowed to send via unicast streams per second
    28        bandwidth-rate-limit: 0
    29        # Bandwidth size in bytes a peer is allowed to send via unicast streams at once
    30        bandwidth-burst-limit: 1e9
    31      manager:
    32        # The minimum number of consecutive successful streams to reset the unicast stream creation retry budget from zero to the maximum default. If it is set to 100 for example, it
    33        # means that if a peer has 100 consecutive successful streams to the remote peer, and the remote peer has a zero stream creation budget,
    34        # the unicast stream creation retry budget for that remote peer will be reset to the maximum default.
    35        stream-zero-retry-reset-threshold: 100
    36        # The maximum number of retry attempts for creating a unicast stream to a remote peer before giving up. If it is set to 3 for example, it means that if a peer fails to create
    37        # retry a unicast stream to a remote peer 3 times, the peer will give up and will not retry creating a unicast stream to that remote peer.
    38        # When it is set to zero it means that the peer will not retry creating a unicast stream to a remote peer if it fails.
    39        max-stream-creation-retry-attempt-times: 3
    40        # The size of the dial config cache used to keep track of the dial config for each remote peer. The dial config is used to keep track of the dial retry budget for each remote peer.
    41        # Recommended to set it to the maximum number of remote peers in the network.
    42        dial-config-cache-size: 10_000
    43        # Unicast create stream retry delay is initial delay used in the exponential backoff for create stream retries
    44        create-stream-retry-delay: 1s
    45      message-timeout: 5s
    46      # Enable stream protection for unicast streams; when enabled, all connections that are being established or
    47      #	have been already established for unicast streams are protected, meaning that they won't be closed by the connection manager.
    48      #	This is useful for preventing the connection manager from closing unicast streams that are being used by the application layer.
    49      #	However, it may interfere with the resource manager of libp2p, i.e., the connection manager may not be able to close connections
    50      #	that are not being used by the application layer while at the same time the node is running out of resources for new connections.
    51      enable-stream-protection: true
    52    # Resource manager config
    53    libp2p-resource-manager:
    54      # Maximum allowed fraction of file descriptors to be allocated by the libp2p resources in [0,1]
    55      # setting to zero means no allocation of memory by libp2p; and libp2p will run with very low limits
    56      memory-limit-ratio: 0.5 # flow default
    57      # Maximum allowed fraction of memory to be allocated by the libp2p resources in [0,1]
    58      # setting to zero means no allocation of memory by libp2p; and libp2p will run with very low limits
    59      file-descriptors-ratio: 0.2 # libp2p default
    60      # limits override: any non-zero values for libp2p-resource-limit-override will override the default values of the libp2p resource limits.
    61      limits-override:
    62        system:
    63          # maximum number of inbound system-wide streams, across all peers and protocols
    64          # Note that streams are ephemeral and are created and destroyed intermittently.
    65          streams-inbound: 15_000 # override
    66          # maximum number of outbound system-wide streams, across all peers and protocols
    67          # Note that streams are ephemeral and are created and destroyed intermittently.
    68          streams-outbound: 15_000 # override
    69          connections-inbound: 0 # no-override, use default
    70          connections-outbound: 0 # no-override, use default
    71          fd: 0 # no-override, use default
    72          memory-bytes: 0 # no-override, use default
    73        transient:
    74          # maximum number of inbound transient streams, across all streams that are not yet fully opened and associated with a protocol.
    75          # Note that streams are ephemeral and are created and destroyed intermittently.
    76          streams-inbound: 15_000 # override
    77          # maximum number of outbound transient streams, across all streams that are not yet fully opened and associated with a protocol.
    78          # Note that streams are ephemeral and are created and destroyed intermittently.
    79          streams-outbound: 15_000 # override
    80          connections-inbound: 0 # no-override, use default
    81          connections-outbound: 0 # no-override, use default
    82          fd: 0 # no-override, use default
    83          memory-bytes: 0 # no-override, use default
    84        protocol:
    85          # maximum number of inbound streams for each protocol across all peers; this is a per-protocol limit. We expect at least
    86          # three protocols per node; gossipsub, unicast, and dht. Note that streams are ephemeral and are created and destroyed intermittently.
    87          streams-inbound: 5000 # override
    88          # maximum number of outbound streams for each protocol across all peers; this is a per-protocol limit. We expect at least
    89          # three protocols per node; gossipsub, unicast, and dht. Note that streams are ephemeral and are created and destroyed intermittently.
    90          streams-outbound: 5000 # override
    91          connections-inbound: 0 # no-override, use default
    92          connections-outbound: 0 # no-override, use default
    93          fd: 0 # no-override, use default
    94          memory-bytes: 0 # no-override, use default
    95        peer:
    96          # maximum number of inbound streams from each peer across all protocols.
    97          streams-inbound: 1000 # override
    98          # maximum number of outbound streams from each peer across all protocols.
    99          streams-outbound: 1000 # override
   100          connections-inbound: 0 # no-override, use default
   101          connections-outbound: 0 # no-override, use default
   102          fd: 0 # no-override, use default
   103          memory-bytes: 0 # no-override, use default
   104        peer-protocol:
   105          # maximum number of inbound streams from each peer for each protocol.
   106          streams-inbound: 500 # override
   107          # maximum number of outbound streams from each peer for each protocol.
   108          streams-outbound: 500 # override
   109          connections-inbound: 0 # no-override, use default
   110          connections-outbound: 0 # no-override, use default
   111          fd: 0 # no-override, use default
   112          memory-bytes: 0 # no-override, use default
   113    connection-manager:
   114      # HighWatermark and LowWatermark govern the number of connections are maintained by the ConnManager.
   115      # When the peer count exceeds the HighWatermark, as many peers will be pruned (and
   116      # their connections terminated) until LowWatermark peers remain. In other words, whenever the
   117      # peer count is x > HighWatermark, the ConnManager will prune x - LowWatermark peers.
   118      # The pruning algorithm is as follows:
   119      # 1. The ConnManager will not prune any peers that have been connected for less than GracePeriod.
   120      # 2. The ConnManager will not prune any peers that are protected.
   121      # 3. The ConnManager will sort the peers based on their number of streams and direction of connections, and
   122      # prunes the peers with the least number of streams. If there are ties, the peer with the incoming connection
   123      # will be pruned. If both peers have incoming connections, and there are still ties, one of the peers will be
   124      # pruned at random.
   125      # Algorithm implementation is in https://github.com/libp2p/go-libp2p/blob/master/p2p/net/connmgr/connmgr.go#L262-L318
   126      # We assume number of nodes around 500, and each node is allowed to make at most 8 connections to each certain remote node,
   127      # we hence set the high-watermark to 500 * 8 = 4000, and the low-watermark to 500 * (0.5 * 4) = 1000, this means that when the
   128      # number of peers exceeds 4000, the connection manager will prune the peers with the least number of streams until the number of
   129      # peers is reduced to 1000 assuming an average of 2 connections per peer.
   130      high-watermark: 4000
   131      low-watermark: 1000
   132      # The silence period is a regular interval that the connection manager will check for pruning peers if the number of peers exceeds the high-watermark.
   133      # it is a regular interval and 10s is the default libp2p value.
   134      silence-period: 10s
   135      # The time to wait before a new connection is considered for pruning.
   136      grace-period: 1m
   137    # Gossipsub config
   138    gossipsub:
   139      rpc-inspector:
   140        # The size of the queue for notifications about invalid RPC messages
   141        notification-cache-size: 10_000
   142        validation: # RPC control message validation inspector configs
   143          inspection-queue:
   144            # Rpc validation inspector number of pool workers
   145            workers: 5
   146            # The size of the queue used by worker pool for the control message validation inspector
   147            queue-size: 100
   148          publish-messages:
   149            # The maximum number of messages in a single RPC message that are randomly sampled for async inspection.
   150            #	When the size of a single RPC message exceeds this threshold, a random sample is taken for inspection, but the RPC message is not truncated.
   151            max-sample-size: 1000
   152            # The threshold at which an error will be returned if the number of invalid RPC messages exceeds this value
   153            error-threshold: 500
   154          graft-and-prune:
   155            # The maximum number of GRAFT or PRUNE messages in a single RPC message.
   156            # When the total number of GRAFT or PRUNE messages in a single RPC message exceeds this threshold,
   157            #	a random sample of GRAFT or PRUNE messages will be taken and the RPC message will be truncated to this sample size.
   158            message-count-threshold: 1000
   159            # Maximum number of total duplicate topic ids in a single GRAFT or PRUNE message, ideally this should be 0 but we allow for some tolerance
   160            # to avoid penalizing peers that are not malicious but are misbehaving due to bugs or other issues.
   161            # A topic id is considered duplicate if it appears more than once in a single GRAFT or PRUNE message.
   162            duplicate-topic-id-threshold: 50
   163            # Maximum number of total invalid topic ids in GRAFTs/PRUNEs of a single RPC, ideally this should be 0 but we allow for some tolerance
   164            # to avoid penalizing peers that are not malicious but are misbehaving due to bugs or other issues. Exceeding this threshold causes RPC inspection failure with an invalid control message notification (penalty).
   165            invalid-topic-id-threshold: 50
   166          ihave:
   167            # The maximum allowed number of iHave messages in a single RPC message.
   168            #	Each iHave message represents the list of message ids. When the total number of iHave messages
   169            #	in a single RPC message exceeds this threshold, a random sample of iHave messages will be taken and the RPC message will be truncated to this sample size.
   170            #	The sample size is equal to the configured message-count-threshold.
   171            message-count-threshold: 1000
   172            # The maximum allowed number of message ids in a single iHave message.
   173            #	Each iHave message represents the list of message ids for a specific topic, and this parameter controls the maximum number of message ids
   174            #	that can be included in a single iHave message. When the total number of message ids in a single iHave message exceeds this threshold,
   175            #	a random sample of message ids will be taken and the iHave message will be truncated to this sample size.
   176            #	The sample size is equal to the configured message-id-count-threshold.
   177            message-id-count-threshold: 1000
   178            # The tolerance threshold for having duplicate topics in an iHave message under inspection.
   179            #	When the total number of duplicate topic ids in a single iHave message exceeds this threshold, the inspection of message will fail.
   180            #	Note that a topic ID is counted as a duplicate only if it is repeated more than once.
   181            duplicate-topic-id-threshold: 50
   182            # Threshold of tolerance for having duplicate message IDs in a single iHave message under inspection.
   183            # When the total number of duplicate message ids in a single iHave message exceeds this threshold, the inspection of message will fail.
   184            # Ideally, an iHave message should not have any duplicate message IDs, hence a message id is considered duplicate when it is repeated more than once
   185            # within the same iHave message. When the total number of duplicate message ids in a single iHave message exceeds this threshold, the inspection of message will fail.
   186            duplicate-message-id-threshold: 100
   187            # Maximum number of total invalid topic ids in an IHAVE message on a single RPC, ideally this should be 0 but we allow for some tolerance
   188            # to avoid penalizing peers that are not malicious but are misbehaving due to bugs or other issues.  Exceeding this threshold causes RPC inspection failure with an invalid control message notification (penalty).
   189            invalid-topic-id-threshold: 50
   190          iwant:
   191            # The maximum allowed number of iWant messages in a single RPC message.
   192            #	Each iWant message represents the list of message ids. When the total number of iWant messages
   193            #	in a single RPC message exceeds this threshold, a random sample of iWant messages will be taken and the RPC message will be truncated to this sample size.
   194            #	The sample size is equal to the configured message-count-threshold.
   195            message-count-threshold: 1000
   196            # The maximum allowed number of message ids in a single iWant message.
   197            #	Each iWant message represents the list of message ids for a specific topic, and this parameter controls the maximum number of message ids
   198            #	that can be included in a single iWant message. When the total number of message ids in a single iWant message exceeds this threshold,
   199            #	a random sample of message ids will be taken and the iWant message will be truncated to this sample size.
   200            #	The sample size is equal to the configured message-id-count-threshold.
   201            message-id-count-threshold: 1000
   202            # The allowed threshold of iWant messages received without a corresponding tracked iHave message that was sent.
   203            # If the cache miss threshold is exceeded an invalid control message notification is disseminated and the sender will be penalized.
   204            cache-miss-threshold: 500
   205            # The max allowed number of duplicate message ids in a single iwant message.
   206            # Note that ideally there should be no duplicate message ids in a single iwant message but
   207            # we allow for some tolerance to avoid penalizing peers that are not malicious
   208            duplicate-message-id-threshold: 100
   209          cluster-prefixed-messages:
   210            # Cluster prefixed control message validation configs
   211            # The size of the cache used to track the amount of cluster prefixed topics received by peers
   212            tracker-cache-size: 100
   213            # The decay val used for the geometric decay of cache counters used to keep track of cluster prefixed topics received by peers
   214            tracker-cache-decay: 0.99
   215            # The upper bound on the amount of cluster prefixed control messages that will be processed
   216            hard-threshold: 100
   217          process:
   218            inspection:
   219              # Serves as a fail-safe mechanism to globally deactivate inspection logic. When this fail-safe is activated it disables all
   220              # aspects of the inspection logic, irrespective of individual configurations like inspection.enable-graft, inspection.enable-prune, etc.
   221              # Consequently, all metrics collection and logging related to the rpc and inspection will also be disabled.
   222              # It is important to note that activating this fail-safe results in a comprehensive deactivation inspection features.
   223              # Please use this setting judiciously, considering its broad impact on the behavior of control message handling.
   224              disabled: false
   225              # Enables graft control message inspection.
   226              enable-graft: true
   227              # Enables prune control message inspection.
   228              enable-prune: true
   229              # Enables ihave control message inspection.
   230              enable-ihave: true
   231              # Enables iwant control message inspection.
   232              enable-iwant: true
   233              # Enables publish message inspection.
   234              enable-publish: true
   235              # When set to true RPC's will be rejected from unstaked peers
   236              reject-unstaked-peers: true
   237            truncation:
   238              # Serves as a fail-safe mechanism to globally deactivate truncation logic. When this fail-safe is activated it disables all
   239              # aspects of the truncation logic, irrespective of individual configurations like truncation.enable-graft, truncation.enable-prune, etc.
   240              # Consequently, all metrics collection and logging related to the rpc and inspection will also be disabled.
   241              # It is important to note that activating this fail-safe results in a comprehensive deactivation truncation features.
   242              # Please use this setting judiciously, considering its broad impact on the behavior of control message handling.
   243              disabled: false
   244              # Enables graft control message truncation.
   245              enable-graft: true
   246              # Enables prune control message truncation.
   247              enable-prune: true
   248              # Enables ihave control message truncation.
   249              enable-ihave: true
   250              # Enables ihave message id truncation.
   251              enable-ihave-message-id: true
   252              # Enables iwant control message truncation.
   253              enable-iwant: true
   254              # Enables iwant message id truncation.
   255              enable-iwant-message-id: true
   256      rpc-tracer:
   257        # The default interval at which the mesh tracer logs the mesh topology. This is used for debugging and forensics purposes.
   258        #	Note that we purposefully choose this logging interval high enough to avoid spamming the logs. Moreover, the
   259        #	mesh updates will be logged individually and separately. The logging interval is only used to log the mesh
   260        #	topology as a whole specially when there are no updates to the mesh topology for a long time.
   261        local-mesh-logging-interval: 1m
   262        # The default interval at which the gossipsub score tracer logs the peer scores. This is used for debugging and forensics purposes.
   263        #	Note that we purposefully choose this logging interval high enough to avoid spamming the logs.
   264        score-tracer-interval: 1m
   265        # The default RPC sent tracker cache size. The RPC sent tracker is used to track RPC control messages sent from the local node.
   266        # Note: this cache size must be large enough to keep a history of sent messages in a reasonable time window of past history.
   267        rpc-sent-tracker-cache-size: 1_000_000
   268        # Cache size of the rpc sent tracker queue used for async tracking.
   269        rpc-sent-tracker-queue-cache-size: 100_000
   270        # Number of workers for rpc sent tracker worker pool.
   271        rpc-sent-tracker-workers: 5
   272        # Cache size of the gossipsub duplicate message tracker.
   273        duplicate-message-tracker:
   274           cache-size: 10_000
   275           decay: .5
   276           # The threshold for which when the counter is below this value, the decay function will not be called.
   277           # instead, the counter will be set to 0. This is to prevent the counter from becoming a large number over time.
   278           skip-decay-threshold: 0.1
   279      # Peer scoring is the default value for enabling peer scoring
   280      peer-scoring-enabled: true
   281      scoring-parameters:
   282        peer-scoring:
   283          internal:
   284            # The weight for app-specific scores.
   285            # It is used to scale the app-specific scores to the same range as the other scores.
   286            # At the current version, we don't distinguish between the app-specific scores
   287            # and the other scores, so we set it to 1.
   288            app-specific-score-weight: 1
   289            # The default decay interval for the overall score of a peer at the GossipSub scoring
   290            # system. We set it to 1 minute so that it is not too short so that a malicious node can recover from a penalty
   291            # and is not too long so that a well-behaved node can't recover from a penalty.
   292            decay-interval: 1m
   293            # The default decay to zero for the overall score of a peer at the GossipSub scoring system.
   294            # It defines the maximum value below which a peer scoring counter is reset to zero.
   295            # This is to prevent the counter from decaying to a very small value.
   296            # The default value is 0.01, which means that a counter will be reset to zero if it decays to 0.01.
   297            # When a counter hits the DecayToZero threshold, it means that the peer did not exhibit the behavior
   298            # for a long time, and we can reset the counter.
   299            decay-to-zero: 0.01
   300            topic:
   301              # This is the default value for the skip atomic validation flag for topics.
   302              # We set it to true, which means gossipsub parameter validation will not fail if we leave some of the
   303              # topic parameters at their default values, i.e., zero. This is because we are not setting all
   304              # topic parameters at the current implementation.
   305              skip-atomic-validation: true
   306              # This value is applied to the square of the number of invalid message deliveries on a topic.
   307              # It is used to penalize peers that send invalid messages. By an invalid message, we mean a message that is not signed by the
   308              # publisher, or a message that is not signed by the peer that sent it.
   309              # An invalid message also can be a self-origin message, i.e., the peer sees its own message bounced back to it.
   310              # GossipSub has an edge-case that a peer may inadvertently request a self-origin message from a peer that it is connected to, through iHave-iWant messages, which is a
   311              # false-positive edge-case.
   312              # We set it to -10e-4, which means that with around 1414 invalid
   313              # message deliveries within a gossipsub heartbeat interval, the peer will be disconnected.
   314              # Note that we intentionally set this threshold high to avoid false-positively penalizing nodes due to self-origin message requests by iHave-iWants (a known issue in gossipsub).
   315              # The supporting math is as follows:
   316              # - each staked (i.e., authorized) peer is rewarded by the fixed reward of 100 (i.e., DefaultStakedIdentityReward).
   317              # - x invalid message deliveries will result in a penalty of x^2 * DefaultTopicInvalidMessageDeliveriesWeight, i.e., -x^2 * 10-e4.
   318              # - the peer will be disconnected when its penalty reaches -100 (i.e., MaxAppSpecificPenalty).
   319              # - so, the maximum number of invalid message deliveries that a peer can have before being disconnected is sqrt(200/10-e4) ~ 1414.
   320              invalid-message-deliveries-weight: -10e-4
   321              # The decay factor used to decay the number of invalid message deliveries.
   322              # The total number of invalid message deliveries is multiplied by this factor at each heartbeat interval to
   323              # decay the number of invalid message deliveries, and prevent the peer from being disconnected if it stops
   324              # sending invalid messages. We set it to 0.5, which means that the number of invalid message deliveries will
   325              # decay by 50% at each heartbeat interval.
   326              # The decay heartbeats are defined by the heartbeat interval of the gossipsub scoring system, which is 1 Minute (defaultDecayInterval).
   327              # Note that we set the decay factor low so that the invalid message deliveries will be decayed fast enough to prevent the peer from being disconnected on mediocre loads.
   328              # This is to address the false-positive disconnections that we observed in the network due to the self-origin message requests by iHave-iWants (a known issue in gossipsub).
   329              invalid-message-deliveries-decay: 0.5
   330              # The default time in mesh quantum for the GossipSub scoring system. It is used to gauge
   331              # a discrete time interval for the time in mesh counter. We set it to 1 hour, which means that every one complete hour a peer is
   332              # in a topic mesh, the time in mesh counter will be incremented by 1 and is counted towards the availability score of the peer in that topic mesh.
   333              # The reason for setting it to 1 hour is that we want to reward peers that are in a topic mesh for a long time, and we want to avoid rewarding peers that
   334              # are churners, i.e., peers that join and leave a topic mesh frequently.
   335              time-in-mesh-quantum: 1h
   336              # The default weight of a topic in the GossipSub scoring system.
   337              # The overall score of a peer in a topic mesh is multiplied by the weight of the topic when calculating the overall score of the peer.
   338              # We set it to 1.0, which means that the overall score of a peer in a topic mesh is not affected by the weight of the topic.
   339              topic-weight: 1.0
   340              # This is applied to the number of actual message deliveries in a topic mesh
   341              # at each decay interval (i.e., defaultDecayInterval).
   342              # It is used to decay the number of actual message deliveries, and prevents past message
   343              # deliveries from affecting the current score of the peer.
   344              # As the decay interval is 1 minute, we set it to 0.5, which means that the number of actual message
   345              # deliveries will decay by 50% at each decay interval.
   346              mesh-message-deliveries-decay: 0.5
   347              # The maximum number of actual message deliveries in a topic
   348              # mesh that is used to calculate the score of a peer in that topic mesh.
   349              # We set it to 1000, which means that the maximum number of actual message deliveries in a
   350              # topic mesh that is used to calculate the score of a peer in that topic mesh is 1000.
   351              # This is to prevent the score of a peer in a topic mesh from being affected by a large number of actual
   352              # message deliveries and also affect the score of the peer in other topic meshes.
   353              # When the total delivered messages in a topic mesh exceeds this value, the score of the peer in that topic
   354              # mesh will not be affected by the actual message deliveries in that topic mesh.
   355              # Moreover, this does not allow the peer to accumulate a large number of actual message deliveries in a topic mesh
   356              # and then start under-performing in that topic mesh without being penalized.
   357              mesh-message-deliveries-cap: 1000
   358              # The threshold for the number of actual message deliveries in a
   359              # topic mesh that is used to calculate the score of a peer in that topic mesh.
   360              # If the number of actual message deliveries in a topic mesh is less than this value,
   361              # the peer will be penalized by square of the difference between the actual message deliveries and the threshold,
   362              # i.e., -w * (actual - threshold)^2 where `actual` and `threshold` are the actual message deliveries and the
   363              # threshold, respectively, and `w` is the weight (i.e., defaultTopicMeshMessageDeliveriesWeight).
   364              # We set it to 0.1 * defaultTopicMeshMessageDeliveriesCap, which means that if a peer delivers less tha 10% of the
   365              # maximum number of actual message deliveries in a topic mesh, it will be considered as an under-performing peer
   366              # in that topic mesh.
   367              mesh-message-deliveries-threshold: 100
   368              # The weight for applying penalty when a peer is under-performing in a topic mesh.
   369              # Upon every decay interval, if the number of actual message deliveries is less than the topic mesh message deliveries threshold
   370              # (i.e., defaultTopicMeshMessageDeliveriesThreshold), the peer will be penalized by square of the difference between the actual
   371              # message deliveries and the threshold, multiplied by this weight, i.e., -w * (actual - threshold)^2 where w is the weight, and
   372              # `actual` and `threshold` are the actual message deliveries and the threshold, respectively.
   373              # We set this value to be - 0.05 MaxAppSpecificReward / (defaultTopicMeshMessageDeliveriesThreshold^2). This guarantees that even if a peer
   374              # is not delivering any message in a topic mesh, it will not be disconnected.
   375              # Rather, looses part of the MaxAppSpecificReward that is awarded by our app-specific scoring function to all staked
   376              # nodes by default will be withdrawn, and the peer will be slightly penalized. In other words, under-performing in a topic mesh
   377              # will drop the overall score of a peer by 5% of the MaxAppSpecificReward that is awarded by our app-specific scoring function.
   378              # It means that under-performing in a topic mesh will not cause a peer to be disconnected, but it will cause the peer to lose
   379              # its MaxAppSpecificReward that is awarded by our app-specific scoring function.
   380              # At this point, we do not want to disconnect a peer only because it is under-performing in a topic mesh as it might be
   381              # causing a false positive network partition.
   382              mesh-deliveries-weight: -0.0005
   383              # The window size is time interval that we count a delivery of an already
   384              # seen message towards the score of a peer in a topic mesh. The delivery is counted
   385              # by GossipSub only if the previous sender of the message is different from the current sender.
   386              # We set it to the decay interval of the GossipSub scoring system, which is 1 minute.
   387              # It means that if a peer delivers a message that it has already seen less than one minute ago,
   388              # the delivery will be counted towards the score of the peer in a topic mesh only if the previous sender of the message.
   389              # This also prevents replay attacks of messages that are older than one minute. As replayed messages will not
   390              # be counted towards the actual message deliveries of a peer in a topic mesh.
   391              mesh-message-deliveries-window: 1m
   392              # The time interval that we wait for a new peer that joins a topic mesh
   393              # till start counting the number of actual message deliveries of that peer in that topic mesh.
   394              # We set it to 2 * defaultDecayInterval, which means that we wait for 2 decay intervals before start counting
   395              # the number of actual message deliveries of a peer in a topic mesh.
   396              # With a default decay interval of 1 minute, it means that we wait for 2 minutes before start counting the
   397              # number of actual message deliveries of a peer in a topic mesh. This is to account for
   398              # the time that it takes for a peer to start up and receive messages from other peers in the topic mesh.
   399              mesh-message-delivery-activation: 2m
   400            thresholds:
   401              # This is the threshold when a peer's penalty drops below this threshold, no gossip
   402              # is emitted towards that peer and gossip from that peer is ignored.
   403              # Validation Constraint: GossipThreshold >= PublishThreshold && GossipThreshold < 0
   404              # How we use it: As the current max penalty is -100, we set the threshold to -99
   405              # so that all gossips to and from peers with penalty -100 are ignored.
   406              gossip: -99
   407              # This is the threshold when a peer's penalty drops below this threshold,
   408              # self-published messages are not propagated towards this peer.
   409              # Validation Constraint:
   410              # PublishThreshold >= GraylistThreshold && PublishThreshold <= GossipThreshold && PublishThreshold < 0.
   411              # How we use it: As the current max penalty is -100, we set the threshold to -99
   412              # so that all penalized peers are deprived of receiving any published messages.
   413              publish: -99
   414              # This is the threshold when a peer's penalty drops below this threshold,
   415              # the peer is graylisted, i.e., incoming RPCs from the peer are ignored.
   416              # Validation Constraint:
   417              # GraylistThreshold =< PublishThreshold && GraylistThreshold =< GossipThreshold && GraylistThreshold < 0
   418              # How we use it: As the current max penalty is -100, we set the threshold to -99
   419              # so that all penalized peers are graylisted.
   420              graylist: -99
   421              # This is the threshold when a peer sends us PX information with a prune,
   422              # we only accept it and connect to the supplied peers if the originating peer's
   423              # penalty exceeds this threshold.
   424              # Validation Constraint: must be non-negative.
   425              # How we use it: As the current max reward is 100, we set the threshold to 99
   426              # so that we only receive supplied peers from well-behaved peers.
   427              accept-px: 99
   428              # This is the threshold when the median peer penalty in the mesh drops
   429              # below this value, the peer may select more peers with penalty above the median
   430              # to opportunistically graft on the mesh.
   431              # Validation Constraint: must be non-negative.
   432              # How we use it: We set it to the -100 + 1 so that we only
   433              # opportunistically graft peers that are not access nodes (i.e., with -1),
   434              # or penalized peers (i.e., with -100).
   435              opportunistic-graft: 101
   436            behaviour:
   437              # The threshold when the behavior of a peer is considered as bad by GossipSub.
   438              # Currently, the misbehavior is defined as advertising an iHave without responding to the iWants (iHave broken promises), as well as attempting
   439              # on GRAFT when the peer is considered for a PRUNE backoff, i.e., the local peer does not allow the peer to join the local topic mesh
   440              # for a while, and the remote peer keep attempting on GRAFT (aka GRAFT flood).
   441              # When the misbehavior counter of a peer goes beyond this threshold, the peer is penalized by defaultBehaviorPenaltyWeight (see below) for the excess misbehavior.
   442              #
   443              # An iHave broken promise means that a peer advertises an iHave for a message, but does not respond to the iWant requests for that message.
   444              # For iHave broken promises, the gossipsub scoring works as follows:
   445              # It samples ONLY A SINGLE iHave out of the entire RPC.
   446              # If that iHave is not followed by an actual message within the next 3 seconds, the peer misbehavior counter is incremented by 1.
   447              #
   448              # We set it to 10, meaning that we at most tolerate 10 of such RPCs containing iHave broken promises. After that, the peer is penalized for every
   449              # excess RPC containing iHave broken promises.
   450              # The counter is also decayed by (0.99) every decay interval (defaultDecayInterval) i.e., every minute.
   451              # Note that misbehaviors are counted by GossipSub across all topics (and is different from the Application Layer Misbehaviors that we count through
   452              # the ALSP system).
   453              penalty-threshold: 1000
   454              # The weight for applying penalty when a peer misbehavior goes beyond the threshold.
   455              # Misbehavior of a peer at gossipsub layer is defined as advertising an iHave without responding to the iWants (broken promises), as well as attempting
   456              # on GRAFT when the peer is considered for a PRUNE backoff, i.e., the local peer does not allow the peer to join the local topic mesh
   457              # This is detected by the GossipSub scoring system, and the peer is penalized by defaultBehaviorPenaltyWeight.
   458              #
   459              # An iHave broken promise means that a peer advertises an iHave for a message, but does not respond to the iWant requests for that message.
   460              # For iHave broken promises, the gossipsub scoring works as follows:
   461              # It samples ONLY A SINGLE iHave out of the entire RPC.
   462              # If that iHave is not followed by an actual message within the next 3 seconds, the peer misbehavior counter is incremented by 1.
   463              #
   464              # The penalty is applied to the square of the difference between the misbehavior counter and the threshold, i.e., -|w| * (misbehavior counter - threshold)^2.
   465              # We set it to 0.01 * MaxAppSpecificPenalty, which means that misbehaving 10 times more than the threshold (i.e., 10 + 10) will cause the peer to lose
   466              # its entire AppSpecificReward that is awarded by our app-specific scoring function to all staked (i.e., authorized) nodes by default.
   467              # Moreover, as the MaxAppSpecificPenalty is -MaxAppSpecificReward, misbehaving sqrt(2) * 10 times more than the threshold will cause the peer score
   468              # to be dropped below the MaxAppSpecificPenalty, which is also below the GraylistThreshold, and the peer will be graylisted (i.e., disconnected).
   469              #
   470              # The math is as follows: -|w| * (misbehavior - threshold)^2 = 0.01 * MaxAppSpecificPenalty * (misbehavior - threshold)^2 < 2 * MaxAppSpecificPenalty
   471              # if misbehavior > threshold + sqrt(2) * 10.
   472              # As shown above, with this choice of defaultBehaviorPenaltyWeight, misbehaving sqrt(2) * 10 times more than the threshold will cause the peer score
   473              # to be dropped below the MaxAppSpecificPenalty, which is also below the GraylistThreshold, and the peer will be graylisted (i.e., disconnected). This weight
   474              # is chosen in a way that with almost a few misbehaviors more than the threshold, the peer will be graylisted. The rationale relies on the fact that
   475              # the misbehavior counter is incremented by 1 for each RPC containing one or more broken promises. Hence, it is per RPC, and not per broken promise.
   476              # Having sqrt(2) * 10 broken promises RPC is a blatant misbehavior, and the peer should be graylisted. With decay interval of 1 minute, and decay value of
   477              # 0.99 we expect a graylisted node due to borken promises to get back in about 527 minutes, i.e., (0.99)^x * (sqrt(2) * 10)^2 * MaxAppSpecificPenalty > GraylistThreshold
   478              # where x is the number of decay intervals that the peer is graylisted. As MaxAppSpecificPenalty and GraylistThresholds are close, we can simplify the inequality
   479              # to (0.99)^x * (sqrt(2) * 10)^2 > 1 --> (0.99)^x * 200 > 1 --> (0.99)^x > 1/200 --> x > log(1/200) / log(0.99) --> x > 527.17 decay intervals, i.e., 527 minutes.
   480              # Note that misbehaviors are counted by GossipSub across all topics (and is different from the Application Layer Misbehaviors that we count through
   481              # the ALSP system that are reported by the engines).
   482              penalty-weight: -0.01
   483              # The decay interval for the misbehavior counter of a peer. The misbehavior counter is
   484              # incremented by GossipSub for iHave broken promises or the GRAFT flooding attacks (i.e., each GRAFT received from a remote peer while that peer is on a PRUNE backoff).
   485              #
   486              # An iHave broken promise means that a peer advertises an iHave for a message, but does not respond to the iWant requests for that message.
   487              # For iHave broken promises, the gossipsub scoring works as follows:
   488              # It samples ONLY A SINGLE iHave out of the entire RPC.
   489              # If that iHave is not followed by an actual message within the next 3 seconds, the peer misbehavior counter is incremented by 1.
   490              # This means that regardless of how many iHave broken promises an RPC contains, the misbehavior counter is incremented by 1.
   491              # That is why we decay the misbehavior counter very slow, as this counter indicates a severe misbehavior.
   492              #
   493              # The misbehavior counter is decayed per decay interval (i.e., defaultDecayInterval = 1 minute) by GossipSub.
   494              # We set it to 0.99, which means that the misbehavior counter is decayed by 1% per decay interval.
   495              # With the generous threshold that we set (i.e., defaultBehaviourPenaltyThreshold = 10), we take the peers going beyond the threshold as persistent misbehaviors,
   496              # We expect honest peers never to go beyond the threshold, and if they do, we expect them to go back below the threshold quickly.
   497              #
   498              # Note that misbehaviors are counted by GossipSub across all topics (and is different from the Application Layer Misbehaviors that we count through
   499              # the ALSP system that is based on the engines report).
   500              penalty-decay: 0.5
   501          protocol:
   502            # The max number of debug/trace log events per second.
   503            # Logs emitted above this threshold are dropped.
   504            max-debug-logs: 50
   505            application-specific:
   506              # This is the maximum penalty for severe offenses that we apply
   507              # to a remote node score. The score mechanism of GossipSub in Flow is designed
   508              # in a way that all other infractions are penalized with a fraction of this value.
   509              # We have also set the other parameters such as GraylistThreshold,
   510              # GossipThreshold, and PublishThreshold to be a bit higher than this,
   511              # i.e., -100 + 1. This ensures that a node with a score of
   512              # -100 will be graylisted (i.e., all incoming and outgoing RPCs
   513              # are rejected) and will not be able to publish or gossip any messages.
   514              max-app-specific-penalty: -100
   515              min-app-specific-penalty: -1
   516              # This is the penalty for unknown identity. It is
   517              # applied to the peer's score when the peer is not in the identity list.
   518              unknown-identity-penalty: -100
   519              # This is the penalty for invalid subscription.
   520              # It is applied to the peer's score when the peer subscribes to a topic that it is
   521              # not authorized to subscribe to.
   522              invalid-subscription-penalty: -100
   523              # The penalty for duplicate messages detected by the gossipsub tracer for a peer.
   524              # The penalty is multiplied by the current duplicate message count for a peer before it is applied to the application specific score.
   525              duplicate-message-penalty: -10e-4
   526              # The threshold at which the duplicate message count for a peer will result in the peer being penalized
   527              duplicate-message-threshold: 10e+4
   528              # This is the reward for well-behaving staked peers.
   529              # If a peer does not have any misbehavior record, e.g., invalid subscription,
   530              # invalid message, etc., it will be rewarded with this score.
   531              max-app-specific-reward: 100
   532              # This is the reward for staking peers. It is applied
   533              # to the peer's score when the peer does not have any misbehavior record, e.g.,
   534              # invalid subscription, invalid message, etc. The purpose is to reward the staking
   535              # peers for their contribution to the network and prioritize them in neighbor selection.
   536              staked-identity-reward: 100
   537        scoring-registry:
   538          # Defines the duration of time, after the node startup,
   539          # during which the scoring registry remains inactive before penalizing nodes.
   540          # Throughout this startup silence period, the application-specific penalty
   541          # returned for all nodes will be 0, and any invalid control message notifications
   542          # will be ignored. This configuration allows nodes to stabilize and initialize before
   543          # applying penalties or processing invalid control message notifications.
   544          startup-silence-duration: 1h
   545          app-specific-score:
   546            # number of workers that asynchronously update the app specific score requests when they are expired.
   547            score-update-worker-num: 5
   548            # size of the queue used by the worker pool for the app specific score update requests. The queue is used to buffer the app specific score update requests
   549            # before they are processed by the worker pool. The queue size must be larger than 10x total number of peers in the network.
   550            # The queue is deduplicated based on the peer ids ensuring that there is only one app specific score update request per peer in the queue.
   551            score-update-request-queue-size: 10_000
   552            # score ttl is the time to live for the app specific score. Once the score is expired; a new request will be sent to the app specific score provider to update the score.
   553            # until the score is updated, the previous score will be used.
   554            score-ttl: 1m
   555            # size of the queue used by the score registry to buffer the invalid control message notifications before they are processed by the worker pool. The queue size must be larger than 10x total number of peers in the network.
   556            invalid-control-message-notification-queue-size: 10_000
   557          spam-record-cache:
   558            # size of cache used to track spam records at gossipsub. Each peer id is mapped to a spam record that keeps track of the spam score for that peer.
   559            # cache should be big enough to keep track of the entire network's size. Otherwise, the local node's view of the network will be incomplete due to cache eviction.
   560            cache-size: 10_000
   561            decay:
   562              # Threshold level for spam record penalty.
   563              # At each evaluation period, when a node's penalty is below this value, the decay rate slows down, ensuring longer decay periods for malicious nodes and quicker decay for honest ones.
   564              penalty-decay-slowdown-threshold: -99
   565              # This setting adjusts the decay rate when a node's penalty falls below the threshold.
   566              # The decay rate, ranging between 0 and 1, dictates how quickly penalties decrease: a higher rate results in slower decay.
   567              # The decay calculation is multiplicative (newPenalty = decayRate * oldPenalty).
   568              # The reduction factor increases the decay rate, thus decelerating the penalty reduction. For instance, with a 0.01 reduction factor,
   569              # the decay rate increases by 0.01 at each evaluation interval when the penalty is below the threshold.
   570              # Consequently, a decay rate of `x` diminishes the penalty to zero more rapidly than a rate of `x+0.01`.
   571              penalty-decay-rate-reduction-factor: 0.01
   572              # Defines the frequency for evaluating and potentially adjusting the decay process of a spam record.
   573              # At each interval, the system assesses the current penalty of a node.
   574              # If this penalty is below the defined threshold, the decay rate is modified according to the reduction factor, slowing down the penalty reduction process.
   575              # This reassessment at regular intervals ensures that the decay rate is dynamically adjusted to reflect the node's ongoing behavior,
   576              # maintaining a balance between penalizing malicious activity and allowing recovery for honest nodes.
   577              penalty-decay-evaluation-period: 10m
   578              # The minimum speed at which the spam penalty value of a peer is decayed.
   579              # Spam record will be initialized with a decay value between .5 , .7 and this value will then be decayed up to .99 on consecutive misbehavior's,
   580              # The maximum decay value decays the penalty by 1% every second. The decay is applied geometrically, i.e., `newPenalty = oldPenalty * decay`, hence, the higher decay value
   581              # indicates a lower decay speed, i.e., it takes more heartbeat intervals to decay a penalty back to zero when the decay value is high.
   582              # assume:
   583              #     penalty = -100 (the maximum application specific penalty is -100)
   584              #     skipDecayThreshold = -0.1
   585              # it takes around 459 seconds for the penalty to decay to reach greater than -0.1 and turn into 0.
   586              #     x * 0.99 ^ n > -0.1 (assuming negative x).
   587              #     0.99 ^ n > -0.1 / x
   588              # Now we can take the logarithm of both sides (with any base, but let's use base 10 for simplicity).
   589              #     log( 0.99 ^ n ) < log( 0.1 / x )
   590              # Using the properties of logarithms, we can bring down the exponent:
   591              #     n * log( 0.99 ) < log( -0.1 / x )
   592              # And finally, we can solve for n:
   593              #     n > log( -0.1 / x ) / log( 0.99 )
   594              # We can plug in x = -100:
   595              #     n > log( -0.1 / -100 ) / log( 0.99 )
   596              #     n > log( 0.001 ) / log( 0.99 )
   597              #     n > -3 / log( 0.99 )
   598              #     n >  458.22
   599              minimum-spam-penalty-decay-factor: 0.99
   600              # The maximum rate at which the spam penalty value of a peer decays. Decay speeds increase
   601              # during sustained malicious activity, leading to a slower recovery of the app-specific score for the penalized node. Conversely,
   602              # decay speeds decrease, allowing faster recoveries, when nodes exhibit fleeting misbehavior.
   603              maximum-spam-penalty-decay-factor: 0.8
   604              # The threshold for which when the negative penalty is above this value, the decay function will not be called.
   605              # instead, the penalty will be set to 0. This is to prevent the penalty from keeping a small negative value for a long time.
   606              skip-decay-threshold: -0.1
   607          misbehaviour-penalties:
   608            # The penalty applied to the application specific penalty when a peer conducts a graft misbehaviour.
   609            graft: -10
   610            # The penalty applied to the application specific penalty when a peer conducts a prune misbehaviour.
   611            prune: -10
   612            # The penalty applied to the application specific penalty when a peer conducts a iHave misbehaviour.
   613            ihave: -10
   614            # The penalty applied to the application specific penalty when a peer conducts a iWant misbehaviour.
   615            iwant: -10
   616            # The penalty applied to the application specific penalty when a peer conducts a rpc publish message misbehaviour.
   617            publish: -10
   618            # The factor used to reduce the penalty for control message misbehaviours on cluster prefixed topics. This allows a more lenient punishment for nodes
   619            # that fall behind and may need to request old data.
   620            cluster-prefixed-reduction-factor: 0.2
   621      subscription-provider:
   622        # The interval for updating the list of subscribed peers to all topics in gossipsub. This is used to keep track of subscriptions
   623        # violations and penalize peers accordingly. Recommended value is in the order of a few minutes to avoid contentions; as the operation
   624        # reads all topics and all peers subscribed to each topic.
   625        update-interval: 10m
   626        # The size of cache for keeping the list of all peers subscribed to each topic (same as the local node). This cache is the local node's
   627        # view of the network and is used to detect subscription violations and penalize peers accordingly. Recommended to be big enough to
   628        # keep the entire network's size. Otherwise, the local node's view of the network will be incomplete due to cache eviction.
   629        # Recommended size is 10x the number of peers in the network.
   630        cache-size: 10000
   631    # Application layer spam prevention
   632    alsp-spam-record-cache-size: 1000
   633    alsp-spam-report-queue-size: 10_000
   634    alsp-disable-penalty: false
   635    alsp-heart-beat-interval: 1s
   636    # Base probability in [0,1] that's used in creating the final probability of creating a
   637    # misbehavior report for a BatchRequest message. This is why the word "base" is used in the name of this field,
   638    # since it's not the final probability and there are other factors that determine the final probability.
   639    # The reason for this is that we want to increase the probability of creating a misbehavior report for a large batch.
   640    # Create misbehavior report for about 0.2% of BatchRequest messages for normal batch requests (i.e. not too large)
   641    # The final batch request probability is calculated as follows:
   642    # batchRequestBaseProb * (len(batchRequest.BlockIDs) + 1) / synccore.DefaultConfig().MaxSize
   643    # Example 1 (small batch of block IDs) if the batch request is for 10 blocks IDs and batchRequestBaseProb is 0.01, then the probability of
   644    # creating a misbehavior report is:
   645    # batchRequestBaseProb * (10+1) / synccore.DefaultConfig().MaxSize
   646    # = 0.01 * 11 / 64 = 0.00171875 = 0.171875%
   647    # Example 2 (large batch of block IDs) if the batch request is for 1000 block IDs and batchRequestBaseProb is 0.01, then the probability of
   648    # creating a misbehavior report is:
   649    # batchRequestBaseProb * (1000+1) / synccore.DefaultConfig().MaxSize
   650    # = 0.01 * 1001 / 64 = 0.15640625 = 15.640625%
   651    alsp-sync-engine-batch-request-base-prob: 0.01
   652    # Base probability in [0,1] that's used in creating the final probability of creating a
   653    # misbehavior report for a RangeRequest message. This is why the word "base" is used in the name of this field,
   654    # since it's not the final probability and there are other factors that determine the final probability.
   655    # The reason for this is that we want to increase the probability of creating a misbehavior report for a large range.
   656    # Create misbehavior report for about 0.2% of RangeRequest messages for normal range requests (i.e. not too large)
   657    # and about 15% of RangeRequest messages for very large range requests.
   658    # The final probability is calculated as follows:
   659    # rangeRequestBaseProb * ((rangeRequest.ToHeight-rangeRequest.FromHeight) + 1) / synccore.DefaultConfig().MaxSize
   660    # Example 1 (small range) if the range request is for 10 blocks and rangeRequestBaseProb is 0.01, then the probability of
   661    # creating a misbehavior report is:
   662    # rangeRequestBaseProb * (10+1) / synccore.DefaultConfig().MaxSize
   663    # = 0.01 * 11 / 64 = 0.00171875 = 0.171875%
   664    # Example 2 (large range) if the range request is for 1000 blocks and rangeRequestBaseProb is 0.01, then the probability of
   665    # creating a misbehavior report is:
   666    # rangeRequestBaseProb * (1000+1) / synccore.DefaultConfig().MaxSize
   667    # = 0.01 * 1001 / 64 = 0.15640625 = 15.640625%
   668    alsp-sync-engine-range-request-base-prob: 0.01
   669    # Probability in [0,1] of creating a misbehavior report for a SyncRequest message.
   670    # create misbehavior report for 1% of SyncRequest messages
   671    alsp-sync-engine-sync-request-prob: 0.01