github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/network/p2p/node/libp2pNode.go (about) 1 // Package p2pnode encapsulates the libp2p library 2 package p2pnode 3 4 import ( 5 "context" 6 "errors" 7 "fmt" 8 "sync" 9 "time" 10 11 "github.com/go-playground/validator/v10" 12 "github.com/hashicorp/go-multierror" 13 dht "github.com/libp2p/go-libp2p-kad-dht" 14 kbucket "github.com/libp2p/go-libp2p-kbucket" 15 "github.com/libp2p/go-libp2p/core/host" 16 libp2pnet "github.com/libp2p/go-libp2p/core/network" 17 "github.com/libp2p/go-libp2p/core/peer" 18 "github.com/libp2p/go-libp2p/core/protocol" 19 "github.com/libp2p/go-libp2p/core/routing" 20 "github.com/rs/zerolog" 21 22 "github.com/onflow/flow-go/model/flow" 23 "github.com/onflow/flow-go/module/component" 24 "github.com/onflow/flow-go/module/irrecoverable" 25 flownet "github.com/onflow/flow-go/network" 26 "github.com/onflow/flow-go/network/channels" 27 "github.com/onflow/flow-go/network/internal/p2putils" 28 "github.com/onflow/flow-go/network/p2p" 29 p2plogging "github.com/onflow/flow-go/network/p2p/logging" 30 nodeinternal "github.com/onflow/flow-go/network/p2p/node/internal" 31 "github.com/onflow/flow-go/network/p2p/unicast/protocols" 32 "github.com/onflow/flow-go/utils/logging" 33 ) 34 35 const ( 36 _ = iota 37 _ = 1 << (10 * iota) 38 mb 39 ) 40 41 const ( 42 // DefaultMaxPubSubMsgSize defines the maximum message size in publish and multicast modes 43 DefaultMaxPubSubMsgSize = 5 * mb // 5 mb 44 45 // timeout for FindPeer queries to the routing system 46 // TODO: is this a sensible value? 47 findPeerQueryTimeout = 10 * time.Second 48 ) 49 50 var _ p2p.LibP2PNode = (*Node)(nil) 51 52 // Node is a wrapper around the LibP2P host. 53 type Node struct { 54 component.Component 55 sync.RWMutex 56 uniMgr p2p.UnicastManager 57 host host.Host // reference to the libp2p host (https://godoc.org/github.com/libp2p/go-libp2p/core/host) 58 pubSub p2p.PubSubAdapter 59 logger zerolog.Logger // used to provide logging 60 topics map[channels.Topic]p2p.Topic // map of a topic string to an actual topic instance 61 subs map[channels.Topic]p2p.Subscription // map of a topic string to an actual subscription 62 routing routing.Routing 63 pCache p2p.ProtocolPeerCache 64 peerManager p2p.PeerManager 65 // Cache of temporary disallow-listed peers, when a peer is disallow-listed, the connections to that peer 66 // are closed and further connections are not allowed till the peer is removed from the disallow-list. 67 disallowListedCache p2p.DisallowListCache 68 parameters *p2p.NodeParameters 69 } 70 71 // NewNode creates a new libp2p node and sets its parameters. 72 // Args: 73 // - cfg: The configuration for the libp2p node. 74 // 75 // Returns: 76 // - *Node: The created libp2p node. 77 // 78 // - error: An error, if any occurred during the process. This includes failure in creating the node. The returned error is irrecoverable, and the node cannot be used. 79 func NewNode(cfg *p2p.NodeConfig) (*Node, error) { 80 err := validator.New().Struct(cfg) 81 if err != nil { 82 return nil, fmt.Errorf("invalid config: %w", err) 83 } 84 85 pCache, err := nodeinternal.NewProtocolPeerCache(cfg.Logger, cfg.Host) 86 if err != nil { 87 return nil, fmt.Errorf("failed to create protocol peer cache: %w", err) 88 } 89 90 return &Node{ 91 host: cfg.Host, 92 logger: cfg.Logger.With().Str("component", "libp2p-node").Logger(), 93 topics: make(map[channels.Topic]p2p.Topic), 94 subs: make(map[channels.Topic]p2p.Subscription), 95 pCache: pCache, 96 peerManager: cfg.PeerManager, 97 parameters: cfg.Parameters, 98 disallowListedCache: nodeinternal.NewDisallowListCache( 99 cfg.DisallowListCacheCfg.MaxSize, 100 cfg.Logger.With().Str("module", "disallow-list-cache").Logger(), 101 cfg.DisallowListCacheCfg.Metrics, 102 ), 103 }, nil 104 } 105 106 func (n *Node) Start(ctx irrecoverable.SignalerContext) { 107 n.Component.Start(ctx) 108 } 109 110 // Stop terminates the libp2p node. 111 // All errors returned from this function can be considered benign. 112 func (n *Node) Stop() error { 113 var result error 114 115 n.logger.Debug().Msg("unsubscribing from all topics") 116 for t := range n.topics { 117 err := n.unsubscribeTopic(t) 118 // context cancelled errors are expected while unsubscribing from topics during shutdown 119 if err != nil && !errors.Is(err, context.Canceled) { 120 result = multierror.Append(result, err) 121 } 122 } 123 124 n.logger.Debug().Msg("stopping libp2p node") 125 if err := n.host.Close(); err != nil { 126 result = multierror.Append(result, err) 127 } 128 129 n.logger.Debug().Msg("closing peer store") 130 // to prevent peerstore routine leak (https://github.com/libp2p/go-libp2p/issues/718) 131 if err := n.host.Peerstore().Close(); err != nil { 132 n.logger.Debug().Err(err).Msg("closing peer store") 133 result = multierror.Append(result, err) 134 } 135 136 if result != nil { 137 return result 138 } 139 140 addrs := len(n.host.Network().ListenAddresses()) 141 ticker := time.NewTicker(time.Millisecond * 2) 142 defer ticker.Stop() 143 timeout := time.After(time.Second) 144 for addrs > 0 { 145 // wait for all listen addresses to have been removed 146 select { 147 case <-timeout: 148 n.logger.Error().Int("port", addrs).Msg("listen addresses still open") 149 return nil 150 case <-ticker.C: 151 addrs = len(n.host.Network().ListenAddresses()) 152 } 153 } 154 155 n.logger.Debug().Msg("libp2p node stopped successfully") 156 157 return nil 158 } 159 160 // ConnectToPeerAddrInfo adds a peer to this node by adding it to this node's peerstore and connecting to it. 161 // All errors returned from this function can be considered benign. 162 func (n *Node) ConnectToPeer(ctx context.Context, peerInfo peer.AddrInfo) error { 163 return n.host.Connect(ctx, peerInfo) 164 } 165 166 // RemovePeer closes the connection with the peer. 167 // All errors returned from this function can be considered benign. 168 func (n *Node) RemovePeer(peerID peer.ID) error { 169 err := n.host.Network().ClosePeer(peerID) 170 if err != nil { 171 return fmt.Errorf("failed to remove peer %s: %w", peerID, err) 172 } 173 // logging with suspicious level as we only expect to disconnect from a peer if it is not part of the 174 // protocol state. 175 n.logger.Warn(). 176 Str("peer_id", p2plogging.PeerId(peerID)). 177 Bool(logging.KeySuspicious, true). 178 Msg("disconnected from peer") 179 180 return nil 181 } 182 183 // GetPeersForProtocol returns slice peer IDs for the specified protocol ID. 184 func (n *Node) GetPeersForProtocol(pid protocol.ID) peer.IDSlice { 185 pMap := n.pCache.GetPeers(pid) 186 peers := make(peer.IDSlice, 0, len(pMap)) 187 for p := range pMap { 188 peers = append(peers, p) 189 } 190 return peers 191 } 192 193 // OpenAndWriteOnStream opens a new stream to a peer. The stream is opened to the given peerID 194 // and writingLogic is executed on the stream. The created stream does not need to be reused and can be inexpensively 195 // created for each send. Moreover, the stream creation does not incur a round-trip time as the stream negotiation happens 196 // on an existing connection. 197 // 198 // Args: 199 // - ctx: The context used to control the stream's lifecycle. 200 // - peerID: The ID of the peer to open the stream to. 201 // - protectionTag: A tag that protects the connection and ensures that the connection manager keeps it alive, and 202 // won't prune the connection while the tag is active. 203 // - writingLogic: A callback function that contains the logic for writing to the stream. It allows an external caller to 204 // write to the stream without having to worry about the stream creation and management. 205 // 206 // Returns: 207 // error: An error, if any occurred during the process. This includes failure in creating the stream, setting the write 208 // deadline, executing the writing logic, resetting the stream if the writing logic fails, or closing the stream. 209 // All returned errors during this process can be considered benign. 210 func (n *Node) OpenAndWriteOnStream(ctx context.Context, peerID peer.ID, protectionTag string, writingLogic func(stream libp2pnet.Stream) error) error { 211 lg := n.logger.With().Str("remote_peer_id", p2plogging.PeerId(peerID)).Logger() 212 if n.parameters.EnableProtectedStreams { 213 n.host.ConnManager().Protect(peerID, protectionTag) 214 defer n.host.ConnManager().Unprotect(peerID, protectionTag) 215 lg = lg.With().Str("protection_tag", protectionTag).Logger() 216 lg.Trace().Msg("attempting to create protected stream") 217 } 218 219 // streams don't need to be reused and are fairly inexpensive to be created for each send. 220 // A stream creation does NOT incur an RTT as stream negotiation happens on an existing connection. 221 s, err := n.createStream(ctx, peerID) 222 if err != nil { 223 return fmt.Errorf("failed to create stream for %s: %w", peerID, err) 224 } 225 lg.Trace().Msg("successfully created stream") 226 227 deadline, _ := ctx.Deadline() 228 err = s.SetWriteDeadline(deadline) 229 if err != nil { 230 return fmt.Errorf("failed to set write deadline for stream: %w", err) 231 } 232 lg.Trace().Msg("successfully set write deadline on stream") 233 234 err = writingLogic(s) 235 if err != nil { 236 // reset the stream to ensure that the next stream creation is not affected by the error. 237 resetErr := s.Reset() 238 if resetErr != nil { 239 n.logger.Error(). 240 Str("target_peer_id", p2plogging.PeerId(peerID)). 241 Err(resetErr). 242 Msg("failed to reset stream") 243 } 244 245 return fmt.Errorf("writing logic failed for %s: %w", peerID, err) 246 } 247 lg.Trace().Msg("successfully wrote on stream") 248 249 // close the stream immediately 250 err = s.Close() 251 if err != nil { 252 return fmt.Errorf("failed to close the stream for %s: %w", peerID, err) 253 } 254 lg.Trace().Msg("successfully closed stream") 255 256 return nil 257 } 258 259 // createStream creates a new stream to the given peer. 260 // Args: 261 // - ctx: The context used to control the stream's lifecycle. 262 // - peerID: The ID of the peer to open the stream to. 263 // 264 // Returns: 265 // - libp2pnet.Stream: The created stream. 266 // - error: An error, if any occurred during the process. This includes failure in creating the stream. All returned 267 // errors during this process can be considered benign. 268 func (n *Node) createStream(ctx context.Context, peerID peer.ID) (libp2pnet.Stream, error) { 269 lg := n.logger.With().Str("peer_id", p2plogging.PeerId(peerID)).Logger() 270 271 // If we do not currently have any addresses for the given peer, stream creation will almost 272 // certainly fail. If this Node was configured with a routing system, we can try to use it to 273 // look up the address of the peer. 274 if len(n.host.Peerstore().Addrs(peerID)) == 0 && n.routing != nil { 275 lg.Debug().Msg("address not found in peer store, searching for peer in routing system") 276 277 var err error 278 func() { 279 timedCtx, cancel := context.WithTimeout(ctx, findPeerQueryTimeout) 280 defer cancel() 281 // try to find the peer using the routing system 282 _, err = n.routing.FindPeer(timedCtx, peerID) 283 }() 284 285 if err != nil { 286 lg.Warn().Err(err).Msg("address not found in both peer store and routing system") 287 } else { 288 lg.Debug().Msg("address not found in peer store, but found in routing system search") 289 } 290 } 291 292 stream, err := n.uniMgr.CreateStream(ctx, peerID) 293 if err != nil { 294 return nil, flownet.NewPeerUnreachableError(fmt.Errorf("could not create stream peer_id: %s: %w", peerID, err)) 295 } 296 297 lg.Info(). 298 Str("networking_protocol_id", string(stream.Protocol())). 299 Msg("stream successfully created to remote peer") 300 return stream, nil 301 } 302 303 // ID returns the peer.ID of the node, which is the unique identifier of the node at the libp2p level. 304 // For other libp2p nodes, the current node is identified by this ID. 305 func (n *Node) ID() peer.ID { 306 return n.host.ID() 307 } 308 309 // GetIPPort returns the IP and Port the libp2p node is listening on. 310 // All errors returned from this function can be considered benign. 311 func (n *Node) GetIPPort() (string, string, error) { 312 return p2putils.IPPortFromMultiAddress(n.host.Network().ListenAddresses()...) 313 } 314 315 // RoutingTable returns the node routing table 316 func (n *Node) RoutingTable() *kbucket.RoutingTable { 317 return n.routing.(*dht.IpfsDHT).RoutingTable() 318 } 319 320 // ListPeers returns list of peer IDs for peers subscribed to the topic. 321 func (n *Node) ListPeers(topic string) []peer.ID { 322 return n.pubSub.ListPeers(topic) 323 } 324 325 // Subscribe subscribes the node to the given topic and returns the subscription 326 // All errors returned from this function can be considered benign. 327 func (n *Node) Subscribe(topic channels.Topic, topicValidator p2p.TopicValidatorFunc) (p2p.Subscription, error) { 328 n.Lock() 329 defer n.Unlock() 330 331 // Check if the topic has been already created and is in the cache 332 n.pubSub.GetTopics() 333 tp, found := n.topics[topic] 334 var err error 335 if !found { 336 if err := n.pubSub.RegisterTopicValidator(topic.String(), topicValidator); err != nil { 337 n.logger.Err(err).Str("topic", topic.String()).Msg("failed to register topic validator, aborting subscription") 338 return nil, fmt.Errorf("failed to register topic validator: %w", err) 339 } 340 341 tp, err = n.pubSub.Join(topic.String()) 342 if err != nil { 343 if err := n.pubSub.UnregisterTopicValidator(topic.String()); err != nil { 344 n.logger.Err(err).Str("topic", topic.String()).Msg("failed to unregister topic validator") 345 } 346 347 return nil, fmt.Errorf("could not join topic (%s): %w", topic, err) 348 } 349 350 n.topics[topic] = tp 351 } 352 353 // Create a new subscription 354 s, err := tp.Subscribe() 355 if err != nil { 356 return s, fmt.Errorf("could not subscribe to topic (%s): %w", topic, err) 357 } 358 359 // Add the subscription to the cache 360 n.subs[topic] = s 361 362 n.logger.Debug(). 363 Str("topic", topic.String()). 364 Msg("subscribed to topic") 365 return s, err 366 } 367 368 // Unsubscribe cancels the subscriber and closes the topic. 369 // Args: 370 // topic: topic to unsubscribe from. 371 // Returns: 372 // error: error if any, which means unsubscribe failed. 373 // All errors returned from this function can be considered benign. 374 func (n *Node) Unsubscribe(topic channels.Topic) error { 375 err := n.unsubscribeTopic(topic) 376 if err != nil { 377 return fmt.Errorf("failed to unsubscribe from topic: %w", err) 378 } 379 380 n.RequestPeerUpdate() 381 382 return nil 383 } 384 385 // unsubscribeTopic cancels the subscriber and closes the topic. 386 // All errors returned from this function can be considered benign. 387 // Args: 388 // 389 // topic: topic to unsubscribe from 390 // 391 // Returns: 392 // error: error if any. 393 func (n *Node) unsubscribeTopic(topic channels.Topic) error { 394 n.Lock() 395 defer n.Unlock() 396 397 // Remove the Subscriber from the cache 398 if s, found := n.subs[topic]; found { 399 s.Cancel() 400 n.subs[topic] = nil 401 delete(n.subs, topic) 402 } 403 404 tp, found := n.topics[topic] 405 if !found { 406 err := fmt.Errorf("could not find topic (%s)", topic) 407 return err 408 } 409 410 if err := n.pubSub.UnregisterTopicValidator(topic.String()); err != nil { 411 return fmt.Errorf("failed to unregister topic validator: %w", err) 412 } 413 414 // attempt to close the topic 415 err := tp.Close() 416 if err != nil { 417 return fmt.Errorf("could not close topic (%s): %w", topic, err) 418 } 419 n.topics[topic] = nil 420 delete(n.topics, topic) 421 422 n.logger.Debug(). 423 Str("topic", topic.String()). 424 Msg("unsubscribed from topic") 425 426 return nil 427 } 428 429 // Publish publishes the given payload on the topic. 430 // All errors returned from this function can be considered benign. 431 func (n *Node) Publish(ctx context.Context, messageScope flownet.OutgoingMessageScope) error { 432 lg := n.logger.With(). 433 Str("topic", messageScope.Topic().String()). 434 Interface("proto_message", messageScope.Proto()). 435 Str("payload_type", messageScope.PayloadType()). 436 Int("message_size", messageScope.Size()).Logger() 437 lg.Debug().Msg("received message to publish") 438 439 // convert the message to bytes to be put on the wire. 440 data, err := messageScope.Proto().Marshal() 441 if err != nil { 442 return fmt.Errorf("failed to marshal the message: %w", err) 443 } 444 445 msgSize := len(data) 446 if msgSize > DefaultMaxPubSubMsgSize { 447 // libp2p pubsub will silently drop the message if its size is greater than the configured pubsub max message size 448 // hence return an error as this message is undeliverable 449 return fmt.Errorf("message size %d exceeds configured max message size %d", msgSize, DefaultMaxPubSubMsgSize) 450 } 451 452 ps, found := n.topics[messageScope.Topic()] 453 if !found { 454 return fmt.Errorf("could not find topic (%s)", messageScope.Topic()) 455 } 456 err = ps.Publish(ctx, data) 457 if err != nil { 458 return fmt.Errorf("could not publish to topic (%s): %w", messageScope.Topic(), err) 459 } 460 461 lg.Debug().Msg("published message to topic") 462 return nil 463 } 464 465 // HasSubscription returns true if the node currently has an active subscription to the topic. 466 func (n *Node) HasSubscription(topic channels.Topic) bool { 467 n.RLock() 468 defer n.RUnlock() 469 _, ok := n.subs[topic] 470 return ok 471 } 472 473 // Host returns pointer to host object of node. 474 func (n *Node) Host() host.Host { 475 return n.host 476 } 477 478 // WithDefaultUnicastProtocol overrides the default handler of the unicast manager and registers all preferred protocols. 479 func (n *Node) WithDefaultUnicastProtocol(defaultHandler libp2pnet.StreamHandler, preferred []protocols.ProtocolName) error { 480 n.uniMgr.SetDefaultHandler(defaultHandler) 481 for _, p := range preferred { 482 err := n.uniMgr.Register(p) 483 if err != nil { 484 return fmt.Errorf("could not register unicast protocls: %w", err) 485 } 486 } 487 488 return nil 489 } 490 491 // WithPeersProvider sets the PeersProvider for the peer manager. 492 // If a peer manager factory is set, this method will set the peer manager's PeersProvider. 493 func (n *Node) WithPeersProvider(peersProvider p2p.PeersProvider) { 494 // TODO: chore: we should not allow overriding the peers provider if one is already set. 495 if n.peerManager != nil { 496 n.peerManager.SetPeersProvider( 497 func() peer.IDSlice { 498 authorizedPeersIds := peersProvider() 499 allowListedPeerIds := peer.IDSlice{} // subset of authorizedPeersIds that are not disallowed 500 for _, peerId := range authorizedPeersIds { 501 // exclude the disallowed peers from the authorized peers list 502 causes, disallowListed := n.disallowListedCache.IsDisallowListed(peerId) 503 if disallowListed { 504 n.logger.Warn(). 505 Str("peer_id", p2plogging.PeerId(peerId)). 506 Str("causes", fmt.Sprintf("%v", causes)). 507 Msg("peer is disallowed for a cause, removing from authorized peers of peer manager") 508 509 // exclude the peer from the authorized peers list 510 continue 511 } 512 allowListedPeerIds = append(allowListedPeerIds, peerId) 513 } 514 515 return allowListedPeerIds 516 }, 517 ) 518 } 519 } 520 521 // PeerManagerComponent returns the component interface of the peer manager. 522 func (n *Node) PeerManagerComponent() component.Component { 523 return n.peerManager 524 } 525 526 // RequestPeerUpdate requests an update to the peer connections of this node using the peer manager. 527 func (n *Node) RequestPeerUpdate() { 528 if n.peerManager != nil { 529 n.peerManager.RequestPeerUpdate() 530 } 531 } 532 533 // IsConnected returns true if address is a direct peer of this node else false. 534 // Peers are considered not connected if the underlying libp2p host reports the 535 // peers as not connected and there are no connections in the connection list. 536 // error returns: 537 // - network.ErrIllegalConnectionState if the underlying libp2p host reports connectedness as NotConnected but the connections list 538 // to the peer is not empty. This would normally indicate a bug within libp2p. Although the network.ErrIllegalConnectionState a bug in libp2p there is a small chance that this error will be returned due 539 // to a race condition between the time we check Connectedness and ConnsToPeer. There is a chance that a connection could be established 540 // after we check Connectedness but right before we check ConnsToPeer. 541 func (n *Node) IsConnected(peerID peer.ID) (bool, error) { 542 isConnected := n.host.Network().Connectedness(peerID) 543 numOfConns := len(n.host.Network().ConnsToPeer(peerID)) 544 if isConnected == libp2pnet.NotConnected && numOfConns > 0 { 545 return true, flownet.NewConnectionStatusErr(peerID, numOfConns) 546 } 547 return isConnected == libp2pnet.Connected && numOfConns > 0, nil 548 } 549 550 // SetRouting sets the node's routing implementation. 551 // SetRouting may be called at most once. 552 func (n *Node) SetRouting(r routing.Routing) error { 553 if n.routing != nil { 554 // we should not allow overriding the routing implementation if one is already set; crashing the node. 555 return fmt.Errorf("routing already set") 556 } 557 558 n.routing = r 559 return nil 560 } 561 562 // Routing returns the node's routing implementation. 563 func (n *Node) Routing() routing.Routing { 564 return n.routing 565 } 566 567 // PeerScoreExposer returns the node's peer score exposer implementation. 568 // If the node's peer score exposer has not been set, the second return value will be false. 569 func (n *Node) PeerScoreExposer() p2p.PeerScoreExposer { 570 return n.pubSub.PeerScoreExposer() 571 } 572 573 // SetPubSub sets the node's pubsub implementation. 574 // SetPubSub may be called at most once. 575 func (n *Node) SetPubSub(ps p2p.PubSubAdapter) { 576 if n.pubSub != nil { 577 n.logger.Fatal().Msg("pubSub already set") 578 } 579 580 n.pubSub = ps 581 } 582 583 // GetLocalMeshPeers returns the list of peers in the local mesh for the given topic. 584 // Args: 585 // - topic: the topic. 586 // Returns: 587 // - []peer.ID: the list of peers in the local mesh for the given topic. 588 func (n *Node) GetLocalMeshPeers(topic channels.Topic) []peer.ID { 589 return n.pubSub.GetLocalMeshPeers(topic) 590 } 591 592 // SetComponentManager sets the component manager for the node. 593 // SetComponentManager may be called at most once. 594 func (n *Node) SetComponentManager(cm *component.ComponentManager) { 595 if n.Component != nil { 596 n.logger.Fatal().Msg("component already set") 597 } 598 599 n.Component = cm 600 } 601 602 // SetUnicastManager sets the unicast manager for the node. 603 // SetUnicastManager may be called at most once. 604 func (n *Node) SetUnicastManager(uniMgr p2p.UnicastManager) { 605 if n.uniMgr != nil { 606 n.logger.Fatal().Msg("unicast manager already set") 607 } 608 n.uniMgr = uniMgr 609 } 610 611 // OnDisallowListNotification is called when a new disallow list update notification is distributed. 612 // Any error on consuming event must handle internally. 613 // The implementation must be concurrency safe. 614 // Args: 615 // 616 // id: peer ID of the peer being disallow-listed. 617 // cause: cause of the peer being disallow-listed (only this cause is added to the peer's disallow-listed causes). 618 // 619 // Returns: 620 // 621 // none 622 func (n *Node) OnDisallowListNotification(peerId peer.ID, cause flownet.DisallowListedCause) { 623 causes, err := n.disallowListedCache.DisallowFor(peerId, cause) 624 if err != nil { 625 // returned error is fatal. 626 n.logger.Fatal().Err(err).Str("peer_id", p2plogging.PeerId(peerId)).Msg("failed to add peer to disallow list") 627 } 628 629 // TODO: this code should further be refactored to also log the Flow id. 630 n.logger.Warn(). 631 Str("peer_id", p2plogging.PeerId(peerId)). 632 Str("notification_cause", cause.String()). 633 Str("causes", fmt.Sprintf("%v", causes)). 634 Msg("peer added to disallow list cache") 635 } 636 637 // OnAllowListNotification is called when a new allow list update notification is distributed. 638 // Any error on consuming event must handle internally. 639 // The implementation must be concurrency safe. 640 // Args: 641 // 642 // id: peer ID of the peer being allow-listed. 643 // cause: cause of the peer being allow-listed (only this cause is removed from the peer's disallow-listed causes). 644 // 645 // Returns: 646 // 647 // none 648 func (n *Node) OnAllowListNotification(peerId peer.ID, cause flownet.DisallowListedCause) { 649 remainingCauses := n.disallowListedCache.AllowFor(peerId, cause) 650 651 n.logger.Debug(). 652 Str("peer_id", p2plogging.PeerId(peerId)). 653 Str("causes", fmt.Sprintf("%v", cause)). 654 Str("remaining_causes", fmt.Sprintf("%v", remainingCauses)). 655 Msg("peer is allow-listed for cause") 656 } 657 658 // IsDisallowListed determines whether the given peer is disallow-listed for any reason. 659 // Args: 660 // - peerID: the peer to check. 661 // Returns: 662 // - []network.DisallowListedCause: the list of causes for which the given peer is disallow-listed. If the peer is not disallow-listed for any reason, 663 // a nil slice is returned. 664 // - bool: true if the peer is disallow-listed for any reason, false otherwise. 665 func (n *Node) IsDisallowListed(peerId peer.ID) ([]flownet.DisallowListedCause, bool) { 666 return n.disallowListedCache.IsDisallowListed(peerId) 667 } 668 669 // ActiveClustersChanged is called when the active clusters list of the collection clusters has changed. 670 // The LibP2PNode implementation directly calls the ActiveClustersChanged method of the pubsub implementation, as 671 // the pubsub implementation is responsible for the actual handling of the event. 672 // Args: 673 // - list: the new active clusters list. 674 // Returns: 675 // - none 676 func (n *Node) ActiveClustersChanged(list flow.ChainIDList) { 677 n.pubSub.ActiveClustersChanged(list) 678 }