github.com/noirx94/tendermintmp@v0.0.1/blockchain/v0/reactor.go (about) 1 package v0 2 3 import ( 4 "fmt" 5 "reflect" 6 "time" 7 8 bc "github.com/tendermint/tendermint/blockchain" 9 "github.com/tendermint/tendermint/libs/log" 10 "github.com/tendermint/tendermint/p2p" 11 bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" 12 sm "github.com/tendermint/tendermint/state" 13 "github.com/tendermint/tendermint/store" 14 "github.com/tendermint/tendermint/types" 15 ) 16 17 const ( 18 // BlockchainChannel is a channel for blocks and status updates (`BlockStore` height) 19 BlockchainChannel = byte(0x40) 20 21 trySyncIntervalMS = 10 22 23 // stop syncing when last block's time is 24 // within this much of the system time. 25 // stopSyncingDurationMinutes = 10 26 27 // ask for best height every 10s 28 statusUpdateIntervalSeconds = 10 29 // check if we should switch to consensus reactor 30 switchToConsensusIntervalSeconds = 1 31 ) 32 33 type consensusReactor interface { 34 // for when we switch from blockchain reactor and fast sync to 35 // the consensus machine 36 SwitchToConsensus(state sm.State, skipWAL bool) 37 } 38 39 type peerError struct { 40 err error 41 peerID p2p.ID 42 } 43 44 func (e peerError) Error() string { 45 return fmt.Sprintf("error with peer %v: %s", e.peerID, e.err.Error()) 46 } 47 48 // BlockchainReactor handles long-term catchup syncing. 49 type BlockchainReactor struct { 50 p2p.BaseReactor 51 52 // immutable 53 initialState sm.State 54 55 blockExec *sm.BlockExecutor 56 store *store.BlockStore 57 pool *BlockPool 58 fastSync bool 59 60 requestsCh <-chan BlockRequest 61 errorsCh <-chan peerError 62 } 63 64 // NewBlockchainReactor returns new reactor instance. 65 func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore, 66 fastSync bool) *BlockchainReactor { 67 68 if state.LastBlockHeight != store.Height() { 69 panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, 70 store.Height())) 71 } 72 73 requestsCh := make(chan BlockRequest, maxTotalRequesters) 74 75 const capacity = 1000 // must be bigger than peers count 76 errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock 77 78 startHeight := store.Height() + 1 79 if startHeight == 1 { 80 startHeight = state.InitialHeight 81 } 82 pool := NewBlockPool(startHeight, requestsCh, errorsCh) 83 84 bcR := &BlockchainReactor{ 85 initialState: state, 86 blockExec: blockExec, 87 store: store, 88 pool: pool, 89 fastSync: fastSync, 90 requestsCh: requestsCh, 91 errorsCh: errorsCh, 92 } 93 bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR) 94 return bcR 95 } 96 97 // SetLogger implements service.Service by setting the logger on reactor and pool. 98 func (bcR *BlockchainReactor) SetLogger(l log.Logger) { 99 bcR.BaseService.Logger = l 100 bcR.pool.Logger = l 101 } 102 103 // OnStart implements service.Service. 104 func (bcR *BlockchainReactor) OnStart() error { 105 if bcR.fastSync { 106 err := bcR.pool.Start() 107 if err != nil { 108 return err 109 } 110 go bcR.poolRoutine(false) 111 } 112 return nil 113 } 114 115 // SwitchToFastSync is called by the state sync reactor when switching to fast sync. 116 func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error { 117 bcR.fastSync = true 118 bcR.initialState = state 119 120 bcR.pool.height = state.LastBlockHeight + 1 121 err := bcR.pool.Start() 122 if err != nil { 123 return err 124 } 125 go bcR.poolRoutine(true) 126 return nil 127 } 128 129 // OnStop implements service.Service. 130 func (bcR *BlockchainReactor) OnStop() { 131 if bcR.fastSync { 132 if err := bcR.pool.Stop(); err != nil { 133 bcR.Logger.Error("Error stopping pool", "err", err) 134 } 135 } 136 } 137 138 // GetChannels implements Reactor 139 func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { 140 return []*p2p.ChannelDescriptor{ 141 { 142 ID: BlockchainChannel, 143 Priority: 5, 144 SendQueueCapacity: 1000, 145 RecvBufferCapacity: 50 * 4096, 146 RecvMessageCapacity: bc.MaxMsgSize, 147 }, 148 } 149 } 150 151 // AddPeer implements Reactor by sending our state to peer. 152 func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) { 153 msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{ 154 Base: bcR.store.Base(), 155 Height: bcR.store.Height()}) 156 if err != nil { 157 bcR.Logger.Error("could not convert msg to protobuf", "err", err) 158 return 159 } 160 161 peer.Send(BlockchainChannel, msgBytes) 162 // it's OK if send fails. will try later in poolRoutine 163 164 // peer is added to the pool once we receive the first 165 // bcStatusResponseMessage from the peer and call pool.SetPeerRange 166 } 167 168 // RemovePeer implements Reactor by removing peer from the pool. 169 func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) { 170 bcR.pool.RemovePeer(peer.ID()) 171 } 172 173 // respondToPeer loads a block and sends it to the requesting peer, 174 // if we have it. Otherwise, we'll respond saying we don't have it. 175 func (bcR *BlockchainReactor) respondToPeer(msg *bcproto.BlockRequest, 176 src p2p.Peer) (queued bool) { 177 178 block := bcR.store.LoadBlock(msg.Height) 179 if block != nil { 180 bl, err := block.ToProto() 181 if err != nil { 182 bcR.Logger.Error("could not convert msg to protobuf", "err", err) 183 return false 184 } 185 186 msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: bl}) 187 if err != nil { 188 bcR.Logger.Error("could not marshal msg", "err", err) 189 return false 190 } 191 192 return src.TrySend(BlockchainChannel, msgBytes) 193 } 194 195 bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height) 196 197 msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height}) 198 if err != nil { 199 bcR.Logger.Error("could not convert msg to protobuf", "err", err) 200 return false 201 } 202 203 return src.TrySend(BlockchainChannel, msgBytes) 204 } 205 206 // Receive implements Reactor by handling 4 types of messages (look below). 207 func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { 208 msg, err := bc.DecodeMsg(msgBytes) 209 if err != nil { 210 bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) 211 bcR.Switch.StopPeerForError(src, err) 212 return 213 } 214 215 if err = bc.ValidateMsg(msg); err != nil { 216 bcR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) 217 bcR.Switch.StopPeerForError(src, err) 218 return 219 } 220 221 bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg) 222 223 switch msg := msg.(type) { 224 case *bcproto.BlockRequest: 225 bcR.respondToPeer(msg, src) 226 case *bcproto.BlockResponse: 227 bi, err := types.BlockFromProto(msg.Block) 228 if err != nil { 229 bcR.Logger.Error("Block content is invalid", "err", err) 230 return 231 } 232 bcR.pool.AddBlock(src.ID(), bi, len(msgBytes)) 233 case *bcproto.StatusRequest: 234 // Send peer our state. 235 msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{ 236 Height: bcR.store.Height(), 237 Base: bcR.store.Base(), 238 }) 239 if err != nil { 240 bcR.Logger.Error("could not convert msg to protobut", "err", err) 241 return 242 } 243 src.TrySend(BlockchainChannel, msgBytes) 244 case *bcproto.StatusResponse: 245 // Got a peer status. Unverified. 246 bcR.pool.SetPeerRange(src.ID(), msg.Base, msg.Height) 247 case *bcproto.NoBlockResponse: 248 bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height) 249 default: 250 bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) 251 } 252 } 253 254 // Handle messages from the poolReactor telling the reactor what to do. 255 // NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down! 256 func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) { 257 258 trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond) 259 defer trySyncTicker.Stop() 260 261 statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second) 262 defer statusUpdateTicker.Stop() 263 264 switchToConsensusTicker := time.NewTicker(switchToConsensusIntervalSeconds * time.Second) 265 defer switchToConsensusTicker.Stop() 266 267 blocksSynced := uint64(0) 268 269 chainID := bcR.initialState.ChainID 270 state := bcR.initialState 271 272 lastHundred := time.Now() 273 lastRate := 0.0 274 275 didProcessCh := make(chan struct{}, 1) 276 277 go func() { 278 for { 279 select { 280 case <-bcR.Quit(): 281 return 282 case <-bcR.pool.Quit(): 283 return 284 case request := <-bcR.requestsCh: 285 peer := bcR.Switch.Peers().Get(request.PeerID) 286 if peer == nil { 287 continue 288 } 289 msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: request.Height}) 290 if err != nil { 291 bcR.Logger.Error("could not convert msg to proto", "err", err) 292 continue 293 } 294 295 queued := peer.TrySend(BlockchainChannel, msgBytes) 296 if !queued { 297 bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height) 298 } 299 case err := <-bcR.errorsCh: 300 peer := bcR.Switch.Peers().Get(err.peerID) 301 if peer != nil { 302 bcR.Switch.StopPeerForError(peer, err) 303 } 304 305 case <-statusUpdateTicker.C: 306 // ask for status updates 307 go bcR.BroadcastStatusRequest() // nolint: errcheck 308 309 } 310 } 311 }() 312 313 FOR_LOOP: 314 for { 315 select { 316 case <-switchToConsensusTicker.C: 317 height, numPending, lenRequesters := bcR.pool.GetStatus() 318 outbound, inbound, _ := bcR.Switch.NumPeers() 319 bcR.Logger.Debug("Consensus ticker", "numPending", numPending, "total", lenRequesters, 320 "outbound", outbound, "inbound", inbound) 321 if bcR.pool.IsCaughtUp() { 322 bcR.Logger.Info("Time to switch to consensus reactor!", "height", height) 323 if err := bcR.pool.Stop(); err != nil { 324 bcR.Logger.Error("Error stopping pool", "err", err) 325 } 326 conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor) 327 if ok { 328 conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced) 329 } 330 // else { 331 // should only happen during testing 332 // } 333 334 break FOR_LOOP 335 } 336 337 case <-trySyncTicker.C: // chan time 338 select { 339 case didProcessCh <- struct{}{}: 340 default: 341 } 342 343 case <-didProcessCh: 344 // NOTE: It is a subtle mistake to process more than a single block 345 // at a time (e.g. 10) here, because we only TrySend 1 request per 346 // loop. The ratio mismatch can result in starving of blocks, a 347 // sudden burst of requests and responses, and repeat. 348 // Consequently, it is better to split these routines rather than 349 // coupling them as it's written here. TODO uncouple from request 350 // routine. 351 352 // See if there are any blocks to sync. 353 first, second := bcR.pool.PeekTwoBlocks() 354 // bcR.Logger.Info("TrySync peeked", "first", first, "second", second) 355 if first == nil || second == nil { 356 // We need both to sync the first block. 357 continue FOR_LOOP 358 } else { 359 // Try again quickly next loop. 360 didProcessCh <- struct{}{} 361 } 362 363 firstParts := first.MakePartSet(types.BlockPartSizeBytes) 364 firstPartSetHeader := firstParts.Header() 365 firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader} 366 // Finally, verify the first block using the second's commit 367 // NOTE: we can probably make this more efficient, but note that calling 368 // first.Hash() doesn't verify the tx contents, so MakePartSet() is 369 // currently necessary. 370 err := state.Validators.VerifyCommitLight( 371 chainID, firstID, first.Height, second.LastCommit) 372 if err != nil { 373 bcR.Logger.Error("Error in validation", "err", err) 374 peerID := bcR.pool.RedoRequest(first.Height) 375 peer := bcR.Switch.Peers().Get(peerID) 376 if peer != nil { 377 // NOTE: we've already removed the peer's request, but we 378 // still need to clean up the rest. 379 bcR.Switch.StopPeerForError(peer, fmt.Errorf("blockchainReactor validation error: %v", err)) 380 } 381 peerID2 := bcR.pool.RedoRequest(second.Height) 382 peer2 := bcR.Switch.Peers().Get(peerID2) 383 if peer2 != nil && peer2 != peer { 384 // NOTE: we've already removed the peer's request, but we 385 // still need to clean up the rest. 386 bcR.Switch.StopPeerForError(peer2, fmt.Errorf("blockchainReactor validation error: %v", err)) 387 } 388 continue FOR_LOOP 389 } else { 390 bcR.pool.PopRequest() 391 392 // TODO: batch saves so we dont persist to disk every block 393 bcR.store.SaveBlock(first, firstParts, second.LastCommit) 394 395 // TODO: same thing for app - but we would need a way to 396 // get the hash without persisting the state 397 var err error 398 state, _, err = bcR.blockExec.ApplyBlock(state, firstID, first) 399 if err != nil { 400 // TODO This is bad, are we zombie? 401 panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) 402 } 403 blocksSynced++ 404 405 if blocksSynced%100 == 0 { 406 lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) 407 bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height, 408 "max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate) 409 lastHundred = time.Now() 410 } 411 } 412 continue FOR_LOOP 413 414 case <-bcR.Quit(): 415 break FOR_LOOP 416 } 417 } 418 } 419 420 // BroadcastStatusRequest broadcasts `BlockStore` base and height. 421 func (bcR *BlockchainReactor) BroadcastStatusRequest() error { 422 bm, err := bc.EncodeMsg(&bcproto.StatusRequest{}) 423 if err != nil { 424 bcR.Logger.Error("could not convert msg to proto", "err", err) 425 return fmt.Errorf("could not convert msg to proto: %w", err) 426 } 427 428 bcR.Switch.Broadcast(BlockchainChannel, bm) 429 430 return nil 431 }