github.com/MagHErmit/tendermint@v0.282.1/blockchain/v2/reactor.go (about) 1 package v2 2 3 import ( 4 "errors" 5 "fmt" 6 "time" 7 8 "github.com/MagHErmit/tendermint/behaviour" 9 bc "github.com/MagHErmit/tendermint/blockchain" 10 "github.com/MagHErmit/tendermint/libs/log" 11 tmsync "github.com/MagHErmit/tendermint/libs/sync" 12 "github.com/MagHErmit/tendermint/p2p" 13 bcproto "github.com/MagHErmit/tendermint/proto/tendermint/blockchain" 14 "github.com/MagHErmit/tendermint/state" 15 "github.com/MagHErmit/tendermint/types" 16 ) 17 18 const ( 19 // chBufferSize is the buffer size of all event channels. 20 chBufferSize int = 1000 21 ) 22 23 type blockStore interface { 24 LoadBlock(height int64) *types.Block 25 SaveBlock(*types.Block, *types.PartSet, *types.Commit) 26 Base() int64 27 Height() int64 28 } 29 30 // BlockchainReactor handles fast sync protocol. 31 type BlockchainReactor struct { 32 p2p.BaseReactor 33 34 fastSync bool // if true, enable fast sync on start 35 stateSynced bool // set to true when SwitchToFastSync is called by state sync 36 scheduler *Routine 37 processor *Routine 38 logger log.Logger 39 40 mtx tmsync.RWMutex 41 maxPeerHeight int64 42 syncHeight int64 43 events chan Event // non-nil during a fast sync 44 45 reporter behaviour.Reporter 46 io iIO 47 store blockStore 48 } 49 50 //nolint:unused,deadcode 51 type blockVerifier interface { 52 VerifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error 53 } 54 55 type blockApplier interface { 56 ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, int64, error) 57 } 58 59 // XXX: unify naming in this package around tmState 60 func newReactor(state state.State, store blockStore, reporter behaviour.Reporter, 61 blockApplier blockApplier, fastSync bool) *BlockchainReactor { 62 initHeight := state.LastBlockHeight + 1 63 if initHeight == 1 { 64 initHeight = state.InitialHeight 65 } 66 scheduler := newScheduler(initHeight, time.Now()) 67 pContext := newProcessorContext(store, blockApplier, state) 68 // TODO: Fix naming to just newProcesssor 69 // newPcState requires a processorContext 70 processor := newPcState(pContext) 71 72 return &BlockchainReactor{ 73 scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize), 74 processor: newRoutine("processor", processor.handle, chBufferSize), 75 store: store, 76 reporter: reporter, 77 logger: log.NewNopLogger(), 78 fastSync: fastSync, 79 } 80 } 81 82 // NewBlockchainReactor creates a new reactor instance. 83 func NewBlockchainReactor( 84 state state.State, 85 blockApplier blockApplier, 86 store blockStore, 87 fastSync bool) *BlockchainReactor { 88 reporter := behaviour.NewMockReporter() 89 return newReactor(state, store, reporter, blockApplier, fastSync) 90 } 91 92 // SetSwitch implements Reactor interface. 93 func (r *BlockchainReactor) SetSwitch(sw *p2p.Switch) { 94 r.Switch = sw 95 if sw != nil { 96 r.io = newSwitchIo(sw) 97 } else { 98 r.io = nil 99 } 100 } 101 102 func (r *BlockchainReactor) setMaxPeerHeight(height int64) { 103 r.mtx.Lock() 104 defer r.mtx.Unlock() 105 if height > r.maxPeerHeight { 106 r.maxPeerHeight = height 107 } 108 } 109 110 func (r *BlockchainReactor) setSyncHeight(height int64) { 111 r.mtx.Lock() 112 defer r.mtx.Unlock() 113 r.syncHeight = height 114 } 115 116 // SyncHeight returns the height to which the BlockchainReactor has synced. 117 func (r *BlockchainReactor) SyncHeight() int64 { 118 r.mtx.RLock() 119 defer r.mtx.RUnlock() 120 return r.syncHeight 121 } 122 123 // SetLogger sets the logger of the reactor. 124 func (r *BlockchainReactor) SetLogger(logger log.Logger) { 125 r.logger = logger 126 r.scheduler.setLogger(logger) 127 r.processor.setLogger(logger) 128 } 129 130 // Start implements cmn.Service interface 131 func (r *BlockchainReactor) Start() error { 132 r.reporter = behaviour.NewSwitchReporter(r.BaseReactor.Switch) 133 if r.fastSync { 134 err := r.startSync(nil) 135 if err != nil { 136 return fmt.Errorf("failed to start fast sync: %w", err) 137 } 138 } 139 return nil 140 } 141 142 // startSync begins a fast sync, signalled by r.events being non-nil. If state is non-nil, 143 // the scheduler and processor is updated with this state on startup. 144 func (r *BlockchainReactor) startSync(state *state.State) error { 145 r.mtx.Lock() 146 defer r.mtx.Unlock() 147 if r.events != nil { 148 return errors.New("fast sync already in progress") 149 } 150 r.events = make(chan Event, chBufferSize) 151 go r.scheduler.start() 152 go r.processor.start() 153 if state != nil { 154 <-r.scheduler.ready() 155 <-r.processor.ready() 156 r.scheduler.send(bcResetState{state: *state}) 157 r.processor.send(bcResetState{state: *state}) 158 } 159 go r.demux(r.events) 160 return nil 161 } 162 163 // endSync ends a fast sync 164 func (r *BlockchainReactor) endSync() { 165 r.mtx.Lock() 166 defer r.mtx.Unlock() 167 if r.events != nil { 168 close(r.events) 169 } 170 r.events = nil 171 r.scheduler.stop() 172 r.processor.stop() 173 } 174 175 // SwitchToFastSync is called by the state sync reactor when switching to fast sync. 176 func (r *BlockchainReactor) SwitchToFastSync(state state.State) error { 177 r.stateSynced = true 178 state = state.Copy() 179 return r.startSync(&state) 180 } 181 182 // reactor generated ticker events: 183 // ticker for cleaning peers 184 type rTryPrunePeer struct { 185 priorityHigh 186 time time.Time 187 } 188 189 func (e rTryPrunePeer) String() string { 190 return fmt.Sprintf("rTryPrunePeer{%v}", e.time) 191 } 192 193 // ticker event for scheduling block requests 194 type rTrySchedule struct { 195 priorityHigh 196 time time.Time 197 } 198 199 func (e rTrySchedule) String() string { 200 return fmt.Sprintf("rTrySchedule{%v}", e.time) 201 } 202 203 // ticker for block processing 204 type rProcessBlock struct { 205 priorityNormal 206 } 207 208 func (e rProcessBlock) String() string { 209 return "rProcessBlock" 210 } 211 212 // reactor generated events based on blockchain related messages from peers: 213 // blockResponse message received from a peer 214 type bcBlockResponse struct { 215 priorityNormal 216 time time.Time 217 peerID p2p.ID 218 size int64 219 block *types.Block 220 } 221 222 func (resp bcBlockResponse) String() string { 223 return fmt.Sprintf("bcBlockResponse{%d#%X (size: %d bytes) from %v at %v}", 224 resp.block.Height, resp.block.Hash(), resp.size, resp.peerID, resp.time) 225 } 226 227 // blockNoResponse message received from a peer 228 type bcNoBlockResponse struct { 229 priorityNormal 230 time time.Time 231 peerID p2p.ID 232 height int64 233 } 234 235 func (resp bcNoBlockResponse) String() string { 236 return fmt.Sprintf("bcNoBlockResponse{%v has no block at height %d at %v}", 237 resp.peerID, resp.height, resp.time) 238 } 239 240 // statusResponse message received from a peer 241 type bcStatusResponse struct { 242 priorityNormal 243 time time.Time 244 peerID p2p.ID 245 base int64 246 height int64 247 } 248 249 func (resp bcStatusResponse) String() string { 250 return fmt.Sprintf("bcStatusResponse{%v is at height %d (base: %d) at %v}", 251 resp.peerID, resp.height, resp.base, resp.time) 252 } 253 254 // new peer is connected 255 type bcAddNewPeer struct { 256 priorityNormal 257 peerID p2p.ID 258 } 259 260 func (resp bcAddNewPeer) String() string { 261 return fmt.Sprintf("bcAddNewPeer{%v}", resp.peerID) 262 } 263 264 // existing peer is removed 265 type bcRemovePeer struct { 266 priorityHigh 267 peerID p2p.ID 268 reason interface{} 269 } 270 271 func (resp bcRemovePeer) String() string { 272 return fmt.Sprintf("bcRemovePeer{%v due to %v}", resp.peerID, resp.reason) 273 } 274 275 // resets the scheduler and processor state, e.g. following a switch from state syncing 276 type bcResetState struct { 277 priorityHigh 278 state state.State 279 } 280 281 func (e bcResetState) String() string { 282 return fmt.Sprintf("bcResetState{%v}", e.state) 283 } 284 285 // Takes the channel as a parameter to avoid race conditions on r.events. 286 func (r *BlockchainReactor) demux(events <-chan Event) { 287 var lastRate = 0.0 288 var lastHundred = time.Now() 289 290 var ( 291 processBlockFreq = 20 * time.Millisecond 292 doProcessBlockCh = make(chan struct{}, 1) 293 doProcessBlockTk = time.NewTicker(processBlockFreq) 294 ) 295 defer doProcessBlockTk.Stop() 296 297 var ( 298 prunePeerFreq = 1 * time.Second 299 doPrunePeerCh = make(chan struct{}, 1) 300 doPrunePeerTk = time.NewTicker(prunePeerFreq) 301 ) 302 defer doPrunePeerTk.Stop() 303 304 var ( 305 scheduleFreq = 20 * time.Millisecond 306 doScheduleCh = make(chan struct{}, 1) 307 doScheduleTk = time.NewTicker(scheduleFreq) 308 ) 309 defer doScheduleTk.Stop() 310 311 var ( 312 statusFreq = 10 * time.Second 313 doStatusCh = make(chan struct{}, 1) 314 doStatusTk = time.NewTicker(statusFreq) 315 ) 316 defer doStatusTk.Stop() 317 doStatusCh <- struct{}{} // immediately broadcast to get status of existing peers 318 319 // XXX: Extract timers to make testing atemporal 320 for { 321 select { 322 // Pacers: send at most per frequency but don't saturate 323 case <-doProcessBlockTk.C: 324 select { 325 case doProcessBlockCh <- struct{}{}: 326 default: 327 } 328 case <-doPrunePeerTk.C: 329 select { 330 case doPrunePeerCh <- struct{}{}: 331 default: 332 } 333 case <-doScheduleTk.C: 334 select { 335 case doScheduleCh <- struct{}{}: 336 default: 337 } 338 case <-doStatusTk.C: 339 select { 340 case doStatusCh <- struct{}{}: 341 default: 342 } 343 344 // Tickers: perform tasks periodically 345 case <-doScheduleCh: 346 r.scheduler.send(rTrySchedule{time: time.Now()}) 347 case <-doPrunePeerCh: 348 r.scheduler.send(rTryPrunePeer{time: time.Now()}) 349 case <-doProcessBlockCh: 350 r.processor.send(rProcessBlock{}) 351 case <-doStatusCh: 352 if err := r.io.broadcastStatusRequest(); err != nil { 353 r.logger.Error("Error broadcasting status request", "err", err) 354 } 355 356 // Events from peers. Closing the channel signals event loop termination. 357 case event, ok := <-events: 358 if !ok { 359 r.logger.Info("Stopping event processing") 360 return 361 } 362 switch event := event.(type) { 363 case bcStatusResponse: 364 r.setMaxPeerHeight(event.height) 365 r.scheduler.send(event) 366 case bcAddNewPeer, bcRemovePeer, bcBlockResponse, bcNoBlockResponse: 367 r.scheduler.send(event) 368 default: 369 r.logger.Error("Received unexpected event", "event", fmt.Sprintf("%T", event)) 370 } 371 372 // Incremental events from scheduler 373 case event := <-r.scheduler.next(): 374 switch event := event.(type) { 375 case scBlockReceived: 376 r.processor.send(event) 377 case scPeerError: 378 r.processor.send(event) 379 if err := r.reporter.Report(behaviour.BadMessage(event.peerID, "scPeerError")); err != nil { 380 r.logger.Error("Error reporting peer", "err", err) 381 } 382 case scBlockRequest: 383 if err := r.io.sendBlockRequest(event.peerID, event.height); err != nil { 384 r.logger.Error("Error sending block request", "err", err) 385 } 386 case scFinishedEv: 387 r.processor.send(event) 388 r.scheduler.stop() 389 case scSchedulerFail: 390 r.logger.Error("Scheduler failure", "err", event.reason.Error()) 391 case scPeersPruned: 392 // Remove peers from the processor. 393 for _, peerID := range event.peers { 394 r.processor.send(scPeerError{peerID: peerID, reason: errors.New("peer was pruned")}) 395 } 396 r.logger.Debug("Pruned peers", "count", len(event.peers)) 397 case noOpEvent: 398 default: 399 r.logger.Error("Received unexpected scheduler event", "event", fmt.Sprintf("%T", event)) 400 } 401 402 // Incremental events from processor 403 case event := <-r.processor.next(): 404 switch event := event.(type) { 405 case pcBlockProcessed: 406 r.setSyncHeight(event.height) 407 if r.syncHeight%100 == 0 { 408 lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) 409 r.logger.Info("Fast Sync Rate", "height", r.syncHeight, 410 "max_peer_height", r.maxPeerHeight, "blocks/s", lastRate) 411 lastHundred = time.Now() 412 } 413 r.scheduler.send(event) 414 case pcBlockVerificationFailure: 415 r.scheduler.send(event) 416 case pcFinished: 417 r.logger.Info("Fast sync complete, switching to consensus") 418 if !r.io.trySwitchToConsensus(event.tmState, event.blocksSynced > 0 || r.stateSynced) { 419 r.logger.Error("Failed to switch to consensus reactor") 420 } 421 r.endSync() 422 return 423 case noOpEvent: 424 default: 425 r.logger.Error("Received unexpected processor event", "event", fmt.Sprintf("%T", event)) 426 } 427 428 // Terminal event from scheduler 429 case err := <-r.scheduler.final(): 430 switch err { 431 case nil: 432 r.logger.Info("Scheduler stopped") 433 default: 434 r.logger.Error("Scheduler aborted with error", "err", err) 435 } 436 437 // Terminal event from processor 438 case err := <-r.processor.final(): 439 switch err { 440 case nil: 441 r.logger.Info("Processor stopped") 442 default: 443 r.logger.Error("Processor aborted with error", "err", err) 444 } 445 } 446 } 447 } 448 449 // Stop implements cmn.Service interface. 450 func (r *BlockchainReactor) Stop() error { 451 r.logger.Info("reactor stopping") 452 r.endSync() 453 r.logger.Info("reactor stopped") 454 return nil 455 } 456 457 // Receive implements Reactor by handling different message types. 458 func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { 459 msg, err := bc.DecodeMsg(msgBytes) 460 if err != nil { 461 r.logger.Error("error decoding message", 462 "src", src.ID(), "chId", chID, "msg", msg, "err", err) 463 _ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error())) 464 return 465 } 466 467 if err = bc.ValidateMsg(msg); err != nil { 468 r.logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err) 469 _ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error())) 470 return 471 } 472 473 r.logger.Debug("Receive", "src", src.ID(), "chID", chID, "msg", msg) 474 475 switch msg := msg.(type) { 476 case *bcproto.StatusRequest: 477 if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src.ID()); err != nil { 478 r.logger.Error("Could not send status message to peer", "src", src) 479 } 480 481 case *bcproto.BlockRequest: 482 block := r.store.LoadBlock(msg.Height) 483 if block != nil { 484 if err = r.io.sendBlockToPeer(block, src.ID()); err != nil { 485 r.logger.Error("Could not send block message to peer: ", err) 486 } 487 } else { 488 r.logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height) 489 peerID := src.ID() 490 if err = r.io.sendBlockNotFound(msg.Height, peerID); err != nil { 491 r.logger.Error("Couldn't send block not found: ", err) 492 } 493 } 494 495 case *bcproto.StatusResponse: 496 r.mtx.RLock() 497 if r.events != nil { 498 r.events <- bcStatusResponse{peerID: src.ID(), base: msg.Base, height: msg.Height} 499 } 500 r.mtx.RUnlock() 501 502 case *bcproto.BlockResponse: 503 bi, err := types.BlockFromProto(msg.Block) 504 if err != nil { 505 r.logger.Error("error transitioning block from protobuf", "err", err) 506 return 507 } 508 r.mtx.RLock() 509 if r.events != nil { 510 r.events <- bcBlockResponse{ 511 peerID: src.ID(), 512 block: bi, 513 size: int64(len(msgBytes)), 514 time: time.Now(), 515 } 516 } 517 r.mtx.RUnlock() 518 519 case *bcproto.NoBlockResponse: 520 r.mtx.RLock() 521 if r.events != nil { 522 r.events <- bcNoBlockResponse{peerID: src.ID(), height: msg.Height, time: time.Now()} 523 } 524 r.mtx.RUnlock() 525 } 526 } 527 528 // AddPeer implements Reactor interface 529 func (r *BlockchainReactor) AddPeer(peer p2p.Peer) { 530 err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer.ID()) 531 if err != nil { 532 r.logger.Error("Could not send status message to peer new", "src", peer.ID, "height", r.SyncHeight()) 533 } 534 r.mtx.RLock() 535 defer r.mtx.RUnlock() 536 if r.events != nil { 537 r.events <- bcAddNewPeer{peerID: peer.ID()} 538 } 539 } 540 541 // RemovePeer implements Reactor interface. 542 func (r *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) { 543 r.mtx.RLock() 544 defer r.mtx.RUnlock() 545 if r.events != nil { 546 r.events <- bcRemovePeer{ 547 peerID: peer.ID(), 548 reason: reason, 549 } 550 } 551 } 552 553 // GetChannels implements Reactor 554 func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { 555 return []*p2p.ChannelDescriptor{ 556 { 557 ID: BlockchainChannel, 558 Priority: 5, 559 SendQueueCapacity: 2000, 560 RecvBufferCapacity: 50 * 4096, 561 RecvMessageCapacity: bc.MaxMsgSize, 562 }, 563 } 564 }