github.com/unicornultrafoundation/go-u2u@v1.0.0-rc1.0.20240205080301-e74a83d3fadc/gossip/peer.go (about) 1 package gossip 2 3 import ( 4 "errors" 5 "fmt" 6 "sync" 7 "sync/atomic" 8 "time" 9 10 mapset "github.com/deckarep/golang-set" 11 "github.com/unicornultrafoundation/go-helios/hash" 12 "github.com/unicornultrafoundation/go-helios/native/dag" 13 "github.com/unicornultrafoundation/go-helios/native/idx" 14 "github.com/unicornultrafoundation/go-helios/utils/datasemaphore" 15 "github.com/unicornultrafoundation/go-u2u/common" 16 "github.com/unicornultrafoundation/go-u2u/core/types" 17 "github.com/unicornultrafoundation/go-u2u/eth/protocols/snap" 18 "github.com/unicornultrafoundation/go-u2u/p2p" 19 "github.com/unicornultrafoundation/go-u2u/rlp" 20 21 "github.com/unicornultrafoundation/go-u2u/gossip/protocols/blockrecords/brstream" 22 "github.com/unicornultrafoundation/go-u2u/gossip/protocols/blockvotes/bvstream" 23 "github.com/unicornultrafoundation/go-u2u/gossip/protocols/dag/dagstream" 24 "github.com/unicornultrafoundation/go-u2u/gossip/protocols/epochpacks/epstream" 25 "github.com/unicornultrafoundation/go-u2u/native" 26 ) 27 28 var ( 29 errNotRegistered = errors.New("peer is not registered") 30 ) 31 32 const ( 33 handshakeTimeout = 5 * time.Second 34 ) 35 36 // PeerInfo represents a short summary of the sub-protocol metadata known 37 // about a connected peer. 38 type PeerInfo struct { 39 Version uint `json:"version"` // protocol version negotiated 40 Epoch idx.Epoch `json:"epoch"` 41 NumOfBlocks idx.Block `json:"blocks"` 42 } 43 44 type broadcastItem struct { 45 Code uint64 46 Raw rlp.RawValue 47 } 48 49 type peer struct { 50 id string 51 52 cfg PeerCacheConfig 53 54 *p2p.Peer 55 rw p2p.MsgReadWriter 56 57 version uint // Protocol version negotiated 58 59 knownTxs mapset.Set // Set of transaction hashes known to be known by this peer 60 knownEvents mapset.Set // Set of event hashes known to be known by this peer 61 queue chan broadcastItem // queue of items to send 62 queuedDataSemaphore *datasemaphore.DataSemaphore 63 term chan struct{} // Termination channel to stop the broadcaster 64 65 progress PeerProgress 66 67 snapExt *snapPeer // Satellite `snap` connection 68 syncDrop *time.Timer // Connection dropper if `eth` sync progress isn't validated in time 69 snapWait chan struct{} // Notification channel for snap connections 70 71 useless uint32 72 73 sync.RWMutex 74 } 75 76 func (p *peer) Useless() bool { 77 return atomic.LoadUint32(&p.useless) != 0 78 } 79 80 func (p *peer) SetUseless() { 81 atomic.StoreUint32(&p.useless, 1) 82 } 83 84 func (p *peer) SetProgress(x PeerProgress) { 85 p.Lock() 86 defer p.Unlock() 87 88 p.progress = x 89 } 90 91 func (p *peer) InterestedIn(h hash.Event) bool { 92 e := h.Epoch() 93 94 p.RLock() 95 defer p.RUnlock() 96 97 return e != 0 && 98 p.progress.Epoch != 0 && 99 (e == p.progress.Epoch || e == p.progress.Epoch+1) && 100 !p.knownEvents.Contains(h) 101 } 102 103 func (a *PeerProgress) Less(b PeerProgress) bool { 104 if a.Epoch != b.Epoch { 105 return a.Epoch < b.Epoch 106 } 107 return a.LastBlockIdx < b.LastBlockIdx 108 } 109 110 func newPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter, cfg PeerCacheConfig) *peer { 111 peer := &peer{ 112 cfg: cfg, 113 Peer: p, 114 rw: rw, 115 version: version, 116 id: p.ID().String(), 117 knownTxs: mapset.NewSet(), 118 knownEvents: mapset.NewSet(), 119 queue: make(chan broadcastItem, cfg.MaxQueuedItems), 120 queuedDataSemaphore: datasemaphore.New(dag.Metric{cfg.MaxQueuedItems, cfg.MaxQueuedSize}, getSemaphoreWarningFn("Peers queue")), 121 term: make(chan struct{}), 122 } 123 124 go peer.broadcast(peer.queue) 125 126 return peer 127 } 128 129 // broadcast is a write loop that multiplexes event propagations, announcements 130 // and transaction broadcasts into the remote peer. The goal is to have an async 131 // writer that does not lock up node internals. 132 func (p *peer) broadcast(queue chan broadcastItem) { 133 for { 134 select { 135 case item := <-queue: 136 _ = p2p.Send(p.rw, item.Code, item.Raw) 137 p.queuedDataSemaphore.Release(memSize(item.Raw)) 138 139 case <-p.term: 140 return 141 } 142 } 143 } 144 145 // Close signals the broadcast goroutine to terminate. 146 func (p *peer) Close() { 147 p.queuedDataSemaphore.Terminate() 148 close(p.term) 149 } 150 151 // Info gathers and returns a collection of metadata known about a peer. 152 func (p *peer) Info() *PeerInfo { 153 return &PeerInfo{ 154 Version: p.version, 155 Epoch: p.progress.Epoch, 156 NumOfBlocks: p.progress.LastBlockIdx, 157 } 158 } 159 160 // MarkEvent marks a event as known for the peer, ensuring that the event will 161 // never be propagated to this particular peer. 162 func (p *peer) MarkEvent(hash hash.Event) { 163 // If we reached the memory allowance, drop a previously known event hash 164 for p.knownEvents.Cardinality() >= p.cfg.MaxKnownEvents { 165 p.knownEvents.Pop() 166 } 167 p.knownEvents.Add(hash) 168 } 169 170 // MarkTransaction marks a transaction as known for the peer, ensuring that it 171 // will never be propagated to this particular peer. 172 func (p *peer) MarkTransaction(hash common.Hash) { 173 // If we reached the memory allowance, drop a previously known transaction hash 174 for p.knownTxs.Cardinality() >= p.cfg.MaxKnownTxs { 175 p.knownTxs.Pop() 176 } 177 p.knownTxs.Add(hash) 178 } 179 180 // SendTransactions sends transactions to the peer and includes the hashes 181 // in its transaction hash set for future reference. 182 func (p *peer) SendTransactions(txs types.Transactions) error { 183 // Mark all the transactions as known, but ensure we don't overflow our limits 184 for _, tx := range txs { 185 p.knownTxs.Add(tx.Hash()) 186 } 187 for p.knownTxs.Cardinality() >= p.cfg.MaxKnownTxs { 188 p.knownTxs.Pop() 189 } 190 return p2p.Send(p.rw, EvmTxsMsg, txs) 191 } 192 193 // SendTransactionHashes sends transaction hashess to the peer and includes the hashes 194 // in its transaction hash set for future reference. 195 func (p *peer) SendTransactionHashes(txids []common.Hash) error { 196 // Mark all the transactions as known, but ensure we don't overflow our limits 197 for _, txid := range txids { 198 p.knownTxs.Add(txid) 199 } 200 for p.knownTxs.Cardinality() >= p.cfg.MaxKnownTxs { 201 p.knownTxs.Pop() 202 } 203 return p2p.Send(p.rw, NewEvmTxHashesMsg, txids) 204 } 205 206 func memSize(v rlp.RawValue) dag.Metric { 207 return dag.Metric{1, uint64(len(v) + 1024)} 208 } 209 210 func (p *peer) asyncSendEncodedItem(raw rlp.RawValue, code uint64, queue chan broadcastItem) bool { 211 if !p.queuedDataSemaphore.TryAcquire(memSize(raw)) { 212 return false 213 } 214 item := broadcastItem{ 215 Code: code, 216 Raw: raw, 217 } 218 select { 219 case queue <- item: 220 return true 221 case <-p.term: 222 default: 223 } 224 p.queuedDataSemaphore.Release(memSize(raw)) 225 return false 226 } 227 228 func (p *peer) asyncSendNonEncodedItem(value interface{}, code uint64, queue chan broadcastItem) bool { 229 raw, err := rlp.EncodeToBytes(value) 230 if err != nil { 231 return false 232 } 233 return p.asyncSendEncodedItem(raw, code, queue) 234 } 235 236 func (p *peer) enqueueSendEncodedItem(raw rlp.RawValue, code uint64, queue chan broadcastItem) { 237 if !p.queuedDataSemaphore.Acquire(memSize(raw), 10*time.Second) { 238 return 239 } 240 item := broadcastItem{ 241 Code: code, 242 Raw: raw, 243 } 244 select { 245 case queue <- item: 246 return 247 case <-p.term: 248 } 249 p.queuedDataSemaphore.Release(memSize(raw)) 250 } 251 252 func (p *peer) enqueueSendNonEncodedItem(value interface{}, code uint64, queue chan broadcastItem) { 253 raw, err := rlp.EncodeToBytes(value) 254 if err != nil { 255 return 256 } 257 p.enqueueSendEncodedItem(raw, code, queue) 258 } 259 260 func SplitTransactions(txs types.Transactions, fn func(types.Transactions)) { 261 // divide big batch into smaller ones 262 for len(txs) > 0 { 263 batchSize := 0 264 var batch types.Transactions 265 for i, tx := range txs { 266 batchSize += int(tx.Size()) + 1024 267 batch = txs[:i+1] 268 if batchSize >= softResponseLimitSize || i+1 >= softLimitItems { 269 break 270 } 271 } 272 txs = txs[len(batch):] 273 fn(batch) 274 } 275 } 276 277 // AsyncSendTransactions queues list of transactions propagation to a remote 278 // peer. If the peer's broadcast queue is full, the transactions are silently dropped. 279 func (p *peer) AsyncSendTransactions(txs types.Transactions, queue chan broadcastItem) { 280 if p.asyncSendNonEncodedItem(txs, EvmTxsMsg, queue) { 281 // Mark all the transactions as known, but ensure we don't overflow our limits 282 for _, tx := range txs { 283 p.knownTxs.Add(tx.Hash()) 284 } 285 for p.knownTxs.Cardinality() >= p.cfg.MaxKnownTxs { 286 p.knownTxs.Pop() 287 } 288 } else { 289 p.Log().Debug("Dropping transactions propagation", "count", len(txs)) 290 } 291 } 292 293 // AsyncSendTransactionHashes queues list of transactions propagation to a remote 294 // peer. If the peer's broadcast queue is full, the transactions are silently dropped. 295 func (p *peer) AsyncSendTransactionHashes(txids []common.Hash, queue chan broadcastItem) { 296 if p.asyncSendNonEncodedItem(txids, NewEvmTxHashesMsg, queue) { 297 // Mark all the transactions as known, but ensure we don't overflow our limits 298 for _, tx := range txids { 299 p.knownTxs.Add(tx) 300 } 301 for p.knownTxs.Cardinality() >= p.cfg.MaxKnownTxs { 302 p.knownTxs.Pop() 303 } 304 } else { 305 p.Log().Debug("Dropping tx announcement", "count", len(txids)) 306 } 307 } 308 309 // EnqueueSendTransactions queues list of transactions propagation to a remote 310 // peer. 311 // The method is blocking in a case if the peer's broadcast queue is full. 312 func (p *peer) EnqueueSendTransactions(txs types.Transactions, queue chan broadcastItem) { 313 p.enqueueSendNonEncodedItem(txs, EvmTxsMsg, queue) 314 // Mark all the transactions as known, but ensure we don't overflow our limits 315 for _, tx := range txs { 316 p.knownTxs.Add(tx.Hash()) 317 } 318 for p.knownTxs.Cardinality() >= p.cfg.MaxKnownTxs { 319 p.knownTxs.Pop() 320 } 321 } 322 323 // SendEventIDs announces the availability of a number of events through 324 // a hash notification. 325 func (p *peer) SendEventIDs(hashes []hash.Event) error { 326 // Mark all the event hashes as known, but ensure we don't overflow our limits 327 for _, hash := range hashes { 328 p.knownEvents.Add(hash) 329 } 330 for p.knownEvents.Cardinality() >= p.cfg.MaxKnownEvents { 331 p.knownEvents.Pop() 332 } 333 return p2p.Send(p.rw, NewEventIDsMsg, hashes) 334 } 335 336 // AsyncSendEventIDs queues the availability of a event for propagation to a 337 // remote peer. If the peer's broadcast queue is full, the event is silently 338 // dropped. 339 func (p *peer) AsyncSendEventIDs(ids hash.Events, queue chan broadcastItem) { 340 if p.asyncSendNonEncodedItem(ids, NewEventIDsMsg, queue) { 341 // Mark all the event hash as known, but ensure we don't overflow our limits 342 for _, id := range ids { 343 p.knownEvents.Add(id) 344 } 345 for p.knownEvents.Cardinality() >= p.cfg.MaxKnownEvents { 346 p.knownEvents.Pop() 347 } 348 } else { 349 p.Log().Debug("Dropping event announcement", "count", len(ids)) 350 } 351 } 352 353 // SendEvents propagates a batch of events to a remote peer. 354 func (p *peer) SendEvents(events native.EventPayloads) error { 355 // Mark all the event hash as known, but ensure we don't overflow our limits 356 for _, event := range events { 357 p.knownEvents.Add(event.ID()) 358 for p.knownEvents.Cardinality() >= p.cfg.MaxKnownEvents { 359 p.knownEvents.Pop() 360 } 361 } 362 return p2p.Send(p.rw, EventsMsg, events) 363 } 364 365 // SendEventsRLP propagates a batch of RLP events to a remote peer. 366 func (p *peer) SendEventsRLP(events []rlp.RawValue, ids []hash.Event) error { 367 // Mark all the event hash as known, but ensure we don't overflow our limits 368 for _, id := range ids { 369 p.knownEvents.Add(id) 370 for p.knownEvents.Cardinality() >= p.cfg.MaxKnownEvents { 371 p.knownEvents.Pop() 372 } 373 } 374 return p2p.Send(p.rw, EventsMsg, events) 375 } 376 377 // AsyncSendEvents queues an entire event for propagation to a remote peer. 378 // If the peer's broadcast queue is full, the events are silently dropped. 379 func (p *peer) AsyncSendEvents(events native.EventPayloads, queue chan broadcastItem) bool { 380 if p.asyncSendNonEncodedItem(events, EventsMsg, queue) { 381 // Mark all the event hash as known, but ensure we don't overflow our limits 382 for _, event := range events { 383 p.knownEvents.Add(event.ID()) 384 } 385 for p.knownEvents.Cardinality() >= p.cfg.MaxKnownEvents { 386 p.knownEvents.Pop() 387 } 388 return true 389 } 390 p.Log().Debug("Dropping event propagation", "count", len(events)) 391 return false 392 } 393 394 // EnqueueSendEventsRLP queues an entire RLP event for propagation to a remote peer. 395 // The method is blocking in a case if the peer's broadcast queue is full. 396 func (p *peer) EnqueueSendEventsRLP(events []rlp.RawValue, ids []hash.Event, queue chan broadcastItem) { 397 p.enqueueSendNonEncodedItem(events, EventsMsg, queue) 398 // Mark all the event hash as known, but ensure we don't overflow our limits 399 for _, id := range ids { 400 p.knownEvents.Add(id) 401 } 402 for p.knownEvents.Cardinality() >= p.cfg.MaxKnownEvents { 403 p.knownEvents.Pop() 404 } 405 } 406 407 // AsyncSendProgress queues a progress propagation to a remote peer. 408 // If the peer's broadcast queue is full, the progress is silently dropped. 409 func (p *peer) AsyncSendProgress(progress PeerProgress, queue chan broadcastItem) { 410 if !p.asyncSendNonEncodedItem(progress, ProgressMsg, queue) { 411 p.Log().Debug("Dropping peer progress propagation") 412 } 413 } 414 415 func (p *peer) RequestEvents(ids hash.Events) error { 416 // divide big batch into smaller ones 417 for start := 0; start < len(ids); start += softLimitItems { 418 end := len(ids) 419 if end > start+softLimitItems { 420 end = start + softLimitItems 421 } 422 p.Log().Debug("Fetching batch of events", "count", len(ids[start:end])) 423 err := p2p.Send(p.rw, GetEventsMsg, ids[start:end]) 424 if err != nil { 425 return err 426 } 427 } 428 return nil 429 } 430 431 func (p *peer) RequestTransactions(txids []common.Hash) error { 432 // divide big batch into smaller ones 433 for start := 0; start < len(txids); start += softLimitItems { 434 end := len(txids) 435 if end > start+softLimitItems { 436 end = start + softLimitItems 437 } 438 p.Log().Debug("Fetching batch of transactions", "count", len(txids[start:end])) 439 err := p2p.Send(p.rw, GetEvmTxsMsg, txids[start:end]) 440 if err != nil { 441 return err 442 } 443 } 444 return nil 445 } 446 447 func (p *peer) SendBVsStream(r bvstream.Response) error { 448 return p2p.Send(p.rw, BVsStreamResponse, r) 449 } 450 451 func (p *peer) RequestBVsStream(r bvstream.Request) error { 452 return p2p.Send(p.rw, RequestBVsStream, r) 453 } 454 455 func (p *peer) SendBRsStream(r brstream.Response) error { 456 return p2p.Send(p.rw, BRsStreamResponse, r) 457 } 458 459 func (p *peer) RequestBRsStream(r brstream.Request) error { 460 return p2p.Send(p.rw, RequestBRsStream, r) 461 } 462 463 func (p *peer) SendEPsStream(r epstream.Response) error { 464 return p2p.Send(p.rw, EPsStreamResponse, r) 465 } 466 467 func (p *peer) RequestEPsStream(r epstream.Request) error { 468 return p2p.Send(p.rw, RequestEPsStream, r) 469 } 470 471 func (p *peer) SendEventsStream(r dagstream.Response, ids hash.Events) error { 472 // Mark all the event hash as known, but ensure we don't overflow our limits 473 for _, id := range ids { 474 p.knownEvents.Add(id) 475 for p.knownEvents.Cardinality() >= p.cfg.MaxKnownEvents { 476 p.knownEvents.Pop() 477 } 478 } 479 return p2p.Send(p.rw, EventsStreamResponse, r) 480 } 481 482 func (p *peer) RequestEventsStream(r dagstream.Request) error { 483 return p2p.Send(p.rw, RequestEventsStream, r) 484 } 485 486 // Handshake executes the protocol handshake, negotiating version number, 487 // network IDs, difficulties, head and genesis object. 488 func (p *peer) Handshake(network uint64, progress PeerProgress, genesis common.Hash) error { 489 // Send out own handshake in a new thread 490 errc := make(chan error, 2) 491 var handshake handshakeData // safe to read after two values have been received from errc 492 493 go func() { 494 // send both HandshakeMsg and ProgressMsg 495 err := p2p.Send(p.rw, HandshakeMsg, &handshakeData{ 496 ProtocolVersion: uint32(p.version), 497 NetworkID: 0, // TODO: set to `network` after all nodes updated to #184 498 Genesis: genesis, 499 }) 500 if err != nil { 501 errc <- err 502 } 503 errc <- p.SendProgress(progress) 504 }() 505 go func() { 506 errc <- p.readStatus(network, &handshake, genesis) 507 // do not expect ProgressMsg here, because eth62 clients won't send it 508 }() 509 timeout := time.NewTimer(handshakeTimeout) 510 defer timeout.Stop() 511 for i := 0; i < 2; i++ { 512 select { 513 case err := <-errc: 514 if err != nil { 515 return err 516 } 517 case <-timeout.C: 518 return p2p.DiscReadTimeout 519 } 520 } 521 return nil 522 } 523 524 func (p *peer) SendProgress(progress PeerProgress) error { 525 return p2p.Send(p.rw, ProgressMsg, progress) 526 } 527 528 func (p *peer) readStatus(network uint64, handshake *handshakeData, genesis common.Hash) (err error) { 529 msg, err := p.rw.ReadMsg() 530 if err != nil { 531 return err 532 } 533 if msg.Code != HandshakeMsg { 534 return errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, HandshakeMsg) 535 } 536 if msg.Size > protocolMaxMsgSize { 537 return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, protocolMaxMsgSize) 538 } 539 // Decode the handshake and make sure everything matches 540 if err := msg.Decode(&handshake); err != nil { 541 return errResp(ErrDecode, "msg %v: %v", msg, err) 542 } 543 544 // TODO: rm after all the nodes updated to #184 545 if handshake.NetworkID == 0 { 546 handshake.NetworkID = network 547 } 548 549 if handshake.Genesis != genesis { 550 return errResp(ErrGenesisMismatch, "%x (!= %x)", handshake.Genesis[:8], genesis[:8]) 551 } 552 if handshake.NetworkID != network { 553 return errResp(ErrNetworkIDMismatch, "%d (!= %d)", handshake.NetworkID, network) 554 } 555 if uint(handshake.ProtocolVersion) != p.version { 556 return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", handshake.ProtocolVersion, p.version) 557 } 558 return nil 559 } 560 561 // String implements fmt.Stringer. 562 func (p *peer) String() string { 563 return fmt.Sprintf("Peer %s [%s]", p.id, 564 fmt.Sprintf("u2u/%2d", p.version), 565 ) 566 } 567 568 // snapPeerInfo represents a short summary of the `snap` sub-protocol metadata known 569 // about a connected peer. 570 type snapPeerInfo struct { 571 Version uint `json:"version"` // Snapshot protocol version negotiated 572 } 573 574 // snapPeer is a wrapper around snap.Peer to maintain a few extra metadata. 575 type snapPeer struct { 576 *snap.Peer 577 } 578 579 // info gathers and returns some `snap` protocol metadata known about a peer. 580 func (p *snapPeer) info() *snapPeerInfo { 581 return &snapPeerInfo{ 582 Version: p.Version(), 583 } 584 } 585 586 // eligibleForSnap checks eligibility of a peer for a snap protocol. A peer is eligible for a snap if it advertises `snap` sattelite protocol along with `u2u` protocol. 587 func eligibleForSnap(p *p2p.Peer) bool { 588 return p.RunningCap(ProtocolName, []uint{UP01}) && p.RunningCap(snap.ProtocolName, snap.ProtocolVersions) 589 }