github.com/franono/tendermint@v0.32.2-0.20200527150959-749313264ce9/blockchain/v2/scheduler.go (about)

     1  package v2
     2  
     3  import (
     4  	"bytes"
     5  	"fmt"
     6  	"math"
     7  	"sort"
     8  	"time"
     9  
    10  	"github.com/franono/tendermint/p2p"
    11  	"github.com/franono/tendermint/types"
    12  )
    13  
    14  // Events generated by the scheduler:
    15  // all blocks have been processed
    16  type scFinishedEv struct {
    17  	priorityNormal
    18  	reason string
    19  }
    20  
    21  // send a blockRequest message
    22  type scBlockRequest struct {
    23  	priorityNormal
    24  	peerID p2p.ID
    25  	height int64
    26  }
    27  
    28  // a block has been received and validated by the scheduler
    29  type scBlockReceived struct {
    30  	priorityNormal
    31  	peerID p2p.ID
    32  	block  *types.Block
    33  }
    34  
    35  // scheduler detected a peer error
    36  type scPeerError struct {
    37  	priorityHigh
    38  	peerID p2p.ID
    39  	reason error
    40  }
    41  
    42  func (e scPeerError) String() string {
    43  	return fmt.Sprintf("scPeerError - peerID %s, err %s", e.peerID, e.reason)
    44  }
    45  
    46  // scheduler removed a set of peers (timed out or slow peer)
    47  type scPeersPruned struct {
    48  	priorityHigh
    49  	peers []p2p.ID
    50  }
    51  
    52  // XXX: make this fatal?
    53  // scheduler encountered a fatal error
    54  type scSchedulerFail struct {
    55  	priorityHigh
    56  	reason error
    57  }
    58  
    59  type blockState int
    60  
    61  const (
    62  	blockStateUnknown   blockState = iota + 1 // no known peer has this block
    63  	blockStateNew                             // indicates that a peer has reported having this block
    64  	blockStatePending                         // indicates that this block has been requested from a peer
    65  	blockStateReceived                        // indicates that this block has been received by a peer
    66  	blockStateProcessed                       // indicates that this block has been applied
    67  )
    68  
    69  func (e blockState) String() string {
    70  	switch e {
    71  	case blockStateUnknown:
    72  		return "Unknown"
    73  	case blockStateNew:
    74  		return "New"
    75  	case blockStatePending:
    76  		return "Pending"
    77  	case blockStateReceived:
    78  		return "Received"
    79  	case blockStateProcessed:
    80  		return "Processed"
    81  	default:
    82  		return fmt.Sprintf("invalid blockState: %d", e)
    83  	}
    84  }
    85  
    86  type peerState int
    87  
    88  const (
    89  	peerStateNew = iota + 1
    90  	peerStateReady
    91  	peerStateRemoved
    92  )
    93  
    94  func (e peerState) String() string {
    95  	switch e {
    96  	case peerStateNew:
    97  		return "New"
    98  	case peerStateReady:
    99  		return "Ready"
   100  	case peerStateRemoved:
   101  		return "Removed"
   102  	default:
   103  		panic(fmt.Sprintf("unknown peerState: %d", e))
   104  	}
   105  }
   106  
   107  type scPeer struct {
   108  	peerID p2p.ID
   109  
   110  	// initialized as New when peer is added, updated to Ready when statusUpdate is received,
   111  	// updated to Removed when peer is removed
   112  	state peerState
   113  
   114  	base        int64 // updated when statusResponse is received
   115  	height      int64 // updated when statusResponse is received
   116  	lastTouched time.Time
   117  	lastRate    int64 // last receive rate in bytes
   118  }
   119  
   120  func (p scPeer) String() string {
   121  	return fmt.Sprintf("{state %v, base %d, height %d, lastTouched %v, lastRate %d, id %v}",
   122  		p.state, p.base, p.height, p.lastTouched, p.lastRate, p.peerID)
   123  }
   124  
   125  func newScPeer(peerID p2p.ID) *scPeer {
   126  	return &scPeer{
   127  		peerID:      peerID,
   128  		state:       peerStateNew,
   129  		base:        -1,
   130  		height:      -1,
   131  		lastTouched: time.Time{},
   132  	}
   133  }
   134  
   135  // The scheduler keep track of the state of each block and each peer. The
   136  // scheduler will attempt to schedule new block requests with `trySchedule`
   137  // events and remove slow peers with `tryPrune` events.
   138  type scheduler struct {
   139  	initHeight int64
   140  
   141  	// next block that needs to be processed. All blocks with smaller height are
   142  	// in Processed state.
   143  	height int64
   144  
   145  	// lastAdvance tracks the last time a block execution happened.
   146  	// syncTimeout is the maximum time the scheduler waits to advance in the fast sync process before finishing.
   147  	// This covers the cases where there are no peers or all peers have a lower height.
   148  	lastAdvance time.Time
   149  	syncTimeout time.Duration
   150  
   151  	// a map of peerID to scheduler specific peer struct `scPeer` used to keep
   152  	// track of peer specific state
   153  	peers       map[p2p.ID]*scPeer
   154  	peerTimeout time.Duration // maximum response time from a peer otherwise prune
   155  	minRecvRate int64         // minimum receive rate from peer otherwise prune
   156  
   157  	// the maximum number of blocks that should be New, Received or Pending at any point
   158  	// in time. This is used to enforce a limit on the blockStates map.
   159  	targetPending int
   160  	// a list of blocks to be scheduled (New), Pending or Received. Its length should be
   161  	// smaller than targetPending.
   162  	blockStates map[int64]blockState
   163  
   164  	// a map of heights to the peer we are waiting a response from
   165  	pendingBlocks map[int64]p2p.ID
   166  
   167  	// the time at which a block was put in blockStatePending
   168  	pendingTime map[int64]time.Time
   169  
   170  	// a map of heights to the peers that put the block in blockStateReceived
   171  	receivedBlocks map[int64]p2p.ID
   172  }
   173  
   174  func (sc scheduler) String() string {
   175  	return fmt.Sprintf("ih: %d, bst: %v, peers: %v, pblks: %v, ptm %v, rblks: %v",
   176  		sc.initHeight, sc.blockStates, sc.peers, sc.pendingBlocks, sc.pendingTime, sc.receivedBlocks)
   177  }
   178  
   179  func newScheduler(initHeight int64, startTime time.Time) *scheduler {
   180  	sc := scheduler{
   181  		initHeight:     initHeight,
   182  		lastAdvance:    startTime,
   183  		syncTimeout:    60 * time.Second,
   184  		height:         initHeight + 1,
   185  		blockStates:    make(map[int64]blockState),
   186  		peers:          make(map[p2p.ID]*scPeer),
   187  		pendingBlocks:  make(map[int64]p2p.ID),
   188  		pendingTime:    make(map[int64]time.Time),
   189  		receivedBlocks: make(map[int64]p2p.ID),
   190  		targetPending:  10,               // TODO - pass as param
   191  		peerTimeout:    15 * time.Second, // TODO - pass as param
   192  		minRecvRate:    0,                //int64(7680), TODO - pass as param
   193  	}
   194  
   195  	return &sc
   196  }
   197  
   198  func (sc *scheduler) ensurePeer(peerID p2p.ID) *scPeer {
   199  	if _, ok := sc.peers[peerID]; !ok {
   200  		sc.peers[peerID] = newScPeer(peerID)
   201  	}
   202  	return sc.peers[peerID]
   203  }
   204  
   205  func (sc *scheduler) touchPeer(peerID p2p.ID, time time.Time) error {
   206  	peer, ok := sc.peers[peerID]
   207  	if !ok {
   208  		return fmt.Errorf("couldn't find peer %s", peerID)
   209  	}
   210  
   211  	if peer.state != peerStateReady {
   212  		return fmt.Errorf("tried to touch peer in state %s, must be Ready", peer.state)
   213  	}
   214  
   215  	peer.lastTouched = time
   216  
   217  	return nil
   218  }
   219  
   220  func (sc *scheduler) removePeer(peerID p2p.ID) error {
   221  	peer, ok := sc.peers[peerID]
   222  	if !ok {
   223  		return nil
   224  	}
   225  
   226  	if peer.state == peerStateRemoved {
   227  		return fmt.Errorf("tried to remove peer %s in peerStateRemoved", peerID)
   228  	}
   229  
   230  	for height, pendingPeerID := range sc.pendingBlocks {
   231  		if pendingPeerID == peerID {
   232  			sc.setStateAtHeight(height, blockStateNew)
   233  			delete(sc.pendingTime, height)
   234  			delete(sc.pendingBlocks, height)
   235  		}
   236  	}
   237  
   238  	for height, rcvPeerID := range sc.receivedBlocks {
   239  		if rcvPeerID == peerID {
   240  			sc.setStateAtHeight(height, blockStateNew)
   241  			delete(sc.receivedBlocks, height)
   242  		}
   243  	}
   244  
   245  	// remove the blocks from blockStates if the peer removal causes the max peer height to be lower.
   246  	peer.state = peerStateRemoved
   247  	maxPeerHeight := int64(0)
   248  	for _, otherPeer := range sc.peers {
   249  		if otherPeer.state != peerStateReady {
   250  			continue
   251  		}
   252  		if otherPeer.peerID != peer.peerID && otherPeer.height > maxPeerHeight {
   253  			maxPeerHeight = otherPeer.height
   254  		}
   255  	}
   256  	for h := range sc.blockStates {
   257  		if h > maxPeerHeight {
   258  			delete(sc.blockStates, h)
   259  		}
   260  	}
   261  
   262  	return nil
   263  }
   264  
   265  // check if the blockPool is running low and add new blocks in New state to be requested.
   266  // This function is called when there is an increase in the maximum peer height or when
   267  // blocks are processed.
   268  func (sc *scheduler) addNewBlocks() {
   269  	if len(sc.blockStates) >= sc.targetPending {
   270  		return
   271  	}
   272  
   273  	for i := sc.height; i < int64(sc.targetPending)+sc.height; i++ {
   274  		if i > sc.maxHeight() {
   275  			break
   276  		}
   277  		if sc.getStateAtHeight(i) == blockStateUnknown {
   278  			sc.setStateAtHeight(i, blockStateNew)
   279  		}
   280  	}
   281  }
   282  
   283  func (sc *scheduler) setPeerRange(peerID p2p.ID, base int64, height int64) error {
   284  	peer := sc.ensurePeer(peerID)
   285  
   286  	if peer.state == peerStateRemoved {
   287  		return fmt.Errorf("cannot set peer height for a peer in peerStateRemoved")
   288  	}
   289  
   290  	if height < peer.height {
   291  		sc.removePeer(peerID)
   292  		return fmt.Errorf("cannot move peer height lower. from %d to %d", peer.height, height)
   293  	}
   294  
   295  	if base > height {
   296  		return fmt.Errorf("cannot set peer base higher than its height")
   297  	}
   298  
   299  	peer.base = base
   300  	peer.height = height
   301  	peer.state = peerStateReady
   302  
   303  	sc.addNewBlocks()
   304  	return nil
   305  }
   306  
   307  func (sc *scheduler) getStateAtHeight(height int64) blockState {
   308  	if height < sc.height {
   309  		return blockStateProcessed
   310  	} else if state, ok := sc.blockStates[height]; ok {
   311  		return state
   312  	} else {
   313  		return blockStateUnknown
   314  	}
   315  }
   316  
   317  func (sc *scheduler) getPeersWithHeight(height int64) []p2p.ID {
   318  	peers := make([]p2p.ID, 0)
   319  	for _, peer := range sc.peers {
   320  		if peer.state != peerStateReady {
   321  			continue
   322  		}
   323  		if peer.base <= height && peer.height >= height {
   324  			peers = append(peers, peer.peerID)
   325  		}
   326  	}
   327  	return peers
   328  }
   329  
   330  func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.ID {
   331  	prunable := make([]p2p.ID, 0)
   332  	for peerID, peer := range sc.peers {
   333  		if peer.state != peerStateReady {
   334  			continue
   335  		}
   336  		if now.Sub(peer.lastTouched) > peerTimout || peer.lastRate < minRecvRate {
   337  			prunable = append(prunable, peerID)
   338  		}
   339  	}
   340  	// Tests for handleTryPrunePeer() may fail without sort due to range non-determinism
   341  	sort.Sort(PeerByID(prunable))
   342  	return prunable
   343  }
   344  
   345  func (sc *scheduler) setStateAtHeight(height int64, state blockState) {
   346  	sc.blockStates[height] = state
   347  }
   348  
   349  func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int64, now time.Time) error {
   350  	peer, ok := sc.peers[peerID]
   351  	if !ok {
   352  		return fmt.Errorf("received block from unknown peer %s", peerID)
   353  	}
   354  
   355  	if peer.state != peerStateReady {
   356  		return fmt.Errorf("cannot receive blocks from not ready peer %s", peerID)
   357  	}
   358  
   359  	if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID {
   360  		return fmt.Errorf("received block %d from peer %s without being requested", height, peerID)
   361  	}
   362  
   363  	pendingTime, ok := sc.pendingTime[height]
   364  	if !ok || now.Sub(pendingTime) <= 0 {
   365  		return fmt.Errorf("clock error: block %d received at %s but requested at %s",
   366  			height, pendingTime, now)
   367  	}
   368  
   369  	peer.lastRate = size / now.Sub(pendingTime).Nanoseconds()
   370  
   371  	sc.setStateAtHeight(height, blockStateReceived)
   372  	delete(sc.pendingBlocks, height)
   373  	delete(sc.pendingTime, height)
   374  
   375  	sc.receivedBlocks[height] = peerID
   376  
   377  	return nil
   378  }
   379  
   380  func (sc *scheduler) markPending(peerID p2p.ID, height int64, time time.Time) error {
   381  	state := sc.getStateAtHeight(height)
   382  	if state != blockStateNew {
   383  		return fmt.Errorf("block %d should be in blockStateNew but is %s", height, state)
   384  	}
   385  
   386  	peer, ok := sc.peers[peerID]
   387  	if !ok {
   388  		return fmt.Errorf("cannot find peer %s", peerID)
   389  	}
   390  
   391  	if peer.state != peerStateReady {
   392  		return fmt.Errorf("cannot schedule %d from %s in %s", height, peerID, peer.state)
   393  	}
   394  
   395  	if height > peer.height {
   396  		return fmt.Errorf("cannot request height %d from peer %s that is at height %d",
   397  			height, peerID, peer.height)
   398  	}
   399  
   400  	if height < peer.base {
   401  		return fmt.Errorf("cannot request height %d for peer %s with base %d",
   402  			height, peerID, peer.base)
   403  	}
   404  
   405  	sc.setStateAtHeight(height, blockStatePending)
   406  	sc.pendingBlocks[height] = peerID
   407  	sc.pendingTime[height] = time
   408  
   409  	return nil
   410  }
   411  
   412  func (sc *scheduler) markProcessed(height int64) error {
   413  	sc.lastAdvance = time.Now()
   414  	state := sc.getStateAtHeight(height)
   415  	if state != blockStateReceived {
   416  		return fmt.Errorf("cannot mark height %d received from block state %s", height, state)
   417  	}
   418  
   419  	sc.height++
   420  	delete(sc.receivedBlocks, height)
   421  	delete(sc.blockStates, height)
   422  	sc.addNewBlocks()
   423  
   424  	return nil
   425  }
   426  
   427  func (sc *scheduler) allBlocksProcessed() bool {
   428  	if len(sc.peers) == 0 {
   429  		return false
   430  	}
   431  	return sc.height >= sc.maxHeight()
   432  }
   433  
   434  // returns max peer height or the last processed block, i.e. sc.height
   435  func (sc *scheduler) maxHeight() int64 {
   436  	max := sc.height - 1
   437  	for _, peer := range sc.peers {
   438  		if peer.state != peerStateReady {
   439  			continue
   440  		}
   441  		if max < peer.height {
   442  			max = peer.height
   443  		}
   444  	}
   445  	return max
   446  }
   447  
   448  // lowest block in sc.blockStates with state == blockStateNew or -1 if no new blocks
   449  func (sc *scheduler) nextHeightToSchedule() int64 {
   450  	var min int64 = math.MaxInt64
   451  	for height, state := range sc.blockStates {
   452  		if state == blockStateNew && height < min {
   453  			min = height
   454  		}
   455  	}
   456  	if min == math.MaxInt64 {
   457  		min = -1
   458  	}
   459  	return min
   460  }
   461  
   462  func (sc *scheduler) pendingFrom(peerID p2p.ID) []int64 {
   463  	var heights []int64
   464  	for height, pendingPeerID := range sc.pendingBlocks {
   465  		if pendingPeerID == peerID {
   466  			heights = append(heights, height)
   467  		}
   468  	}
   469  	return heights
   470  }
   471  
   472  func (sc *scheduler) selectPeer(height int64) (p2p.ID, error) {
   473  	peers := sc.getPeersWithHeight(height)
   474  	if len(peers) == 0 {
   475  		return "", fmt.Errorf("cannot find peer for height %d", height)
   476  	}
   477  
   478  	// create a map from number of pending requests to a list
   479  	// of peers having that number of pending requests.
   480  	pendingFrom := make(map[int][]p2p.ID)
   481  	for _, peerID := range peers {
   482  		numPending := len(sc.pendingFrom(peerID))
   483  		pendingFrom[numPending] = append(pendingFrom[numPending], peerID)
   484  	}
   485  
   486  	// find the set of peers with minimum number of pending requests.
   487  	var minPending int64 = math.MaxInt64
   488  	for mp := range pendingFrom {
   489  		if int64(mp) < minPending {
   490  			minPending = int64(mp)
   491  		}
   492  	}
   493  
   494  	sort.Sort(PeerByID(pendingFrom[int(minPending)]))
   495  	return pendingFrom[int(minPending)][0], nil
   496  }
   497  
   498  // PeerByID is a list of peers sorted by peerID.
   499  type PeerByID []p2p.ID
   500  
   501  func (peers PeerByID) Len() int {
   502  	return len(peers)
   503  }
   504  func (peers PeerByID) Less(i, j int) bool {
   505  	return bytes.Compare([]byte(peers[i]), []byte(peers[j])) == -1
   506  }
   507  
   508  func (peers PeerByID) Swap(i, j int) {
   509  	it := peers[i]
   510  	peers[i] = peers[j]
   511  	peers[j] = it
   512  }
   513  
   514  // Handlers
   515  
   516  // This handler gets the block, performs some validation and then passes it on to the processor.
   517  func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) {
   518  	err := sc.touchPeer(event.peerID, event.time)
   519  	if err != nil {
   520  		return scPeerError{peerID: event.peerID, reason: err}, nil
   521  	}
   522  
   523  	err = sc.markReceived(event.peerID, event.block.Height, event.size, event.time)
   524  	if err != nil {
   525  		_ = sc.removePeer(event.peerID)
   526  		return scPeerError{peerID: event.peerID, reason: err}, nil
   527  	}
   528  
   529  	return scBlockReceived{peerID: event.peerID, block: event.block}, nil
   530  }
   531  
   532  func (sc *scheduler) handleNoBlockResponse(event bcNoBlockResponse) (Event, error) {
   533  	if len(sc.peers) == 0 {
   534  		return noOp, nil
   535  	}
   536  
   537  	peer, ok := sc.peers[event.peerID]
   538  	if !ok || peer.state == peerStateRemoved {
   539  		return noOp, nil
   540  	}
   541  	// The peer may have been just removed due to errors, low speed or timeouts.
   542  	_ = sc.removePeer(event.peerID)
   543  
   544  	return scPeerError{peerID: event.peerID,
   545  		reason: fmt.Errorf("peer %v with base %d height %d claims no block for %d",
   546  			event.peerID, peer.base, peer.height, event.height)}, nil
   547  }
   548  
   549  func (sc *scheduler) handleBlockProcessed(event pcBlockProcessed) (Event, error) {
   550  	if event.height != sc.height {
   551  		panic(fmt.Sprintf("processed height %d but expected height %d", event.height, sc.height))
   552  	}
   553  	err := sc.markProcessed(event.height)
   554  	if err != nil {
   555  		// It is possible that a peer error or timeout is handled after the processor
   556  		// has processed the block but before the scheduler received this event,
   557  		// so when pcBlockProcessed event is received the block had been requested again.
   558  		return scSchedulerFail{reason: err}, nil
   559  	}
   560  
   561  	if sc.allBlocksProcessed() {
   562  		return scFinishedEv{reason: "processed all blocks"}, nil
   563  	}
   564  
   565  	return noOp, nil
   566  }
   567  
   568  // Handles an error from the processor. The processor had already cleaned the blocks from
   569  // the peers included in this event. Just attempt to remove the peers.
   570  func (sc *scheduler) handleBlockProcessError(event pcBlockVerificationFailure) (Event, error) {
   571  	if len(sc.peers) == 0 {
   572  		return noOp, nil
   573  	}
   574  	// The peers may have been just removed due to errors, low speed or timeouts.
   575  	_ = sc.removePeer(event.firstPeerID)
   576  	if event.firstPeerID != event.secondPeerID {
   577  		_ = sc.removePeer(event.secondPeerID)
   578  	}
   579  
   580  	if sc.allBlocksProcessed() {
   581  		return scFinishedEv{reason: "error on last block"}, nil
   582  	}
   583  
   584  	return noOp, nil
   585  }
   586  
   587  func (sc *scheduler) handleAddNewPeer(event bcAddNewPeer) (Event, error) {
   588  	sc.ensurePeer(event.peerID)
   589  	return noOp, nil
   590  }
   591  
   592  func (sc *scheduler) handleRemovePeer(event bcRemovePeer) (Event, error) {
   593  	err := sc.removePeer(event.peerID)
   594  	if err != nil {
   595  		// XXX - It is possible that the removePeer fails here for legitimate reasons
   596  		// for example if a peer timeout or error was handled just before this.
   597  		return scSchedulerFail{reason: err}, nil
   598  	}
   599  	if sc.allBlocksProcessed() {
   600  		return scFinishedEv{reason: "removed peer"}, nil
   601  	}
   602  	return noOp, nil
   603  }
   604  
   605  func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) {
   606  
   607  	// Check behavior of peer responsible to deliver block at sc.height.
   608  	timeHeightAsked, ok := sc.pendingTime[sc.height]
   609  	if ok && time.Since(timeHeightAsked) > sc.peerTimeout {
   610  		// A request was sent to a peer for block at sc.height but a response was not received
   611  		// from that peer within sc.peerTimeout. Remove the peer. This is to ensure that a peer
   612  		// will be timed out even if it sends blocks at higher heights but prevents progress by
   613  		// not sending the block at current height.
   614  		sc.removePeer(sc.pendingBlocks[sc.height])
   615  	}
   616  
   617  	prunablePeers := sc.prunablePeers(sc.peerTimeout, sc.minRecvRate, event.time)
   618  	if len(prunablePeers) == 0 {
   619  		return noOp, nil
   620  	}
   621  	for _, peerID := range prunablePeers {
   622  		err := sc.removePeer(peerID)
   623  		if err != nil {
   624  			// Should never happen as prunablePeers() returns only existing peers in Ready state.
   625  			panic("scheduler data corruption")
   626  		}
   627  	}
   628  
   629  	// If all blocks are processed we should finish.
   630  	if sc.allBlocksProcessed() {
   631  		return scFinishedEv{reason: "after try prune"}, nil
   632  	}
   633  
   634  	return scPeersPruned{peers: prunablePeers}, nil
   635  
   636  }
   637  
   638  func (sc *scheduler) handleResetState(event bcResetState) (Event, error) {
   639  	sc.initHeight = event.state.LastBlockHeight + 1
   640  	sc.height = event.state.LastBlockHeight + 1
   641  	sc.lastAdvance = time.Now()
   642  	sc.addNewBlocks()
   643  	return noOp, nil
   644  }
   645  
   646  func (sc *scheduler) handleTrySchedule(event rTrySchedule) (Event, error) {
   647  	if time.Since(sc.lastAdvance) > sc.syncTimeout {
   648  		return scFinishedEv{reason: "timeout, no advance"}, nil
   649  	}
   650  
   651  	nextHeight := sc.nextHeightToSchedule()
   652  	if nextHeight == -1 {
   653  		return noOp, nil
   654  	}
   655  
   656  	bestPeerID, err := sc.selectPeer(nextHeight)
   657  	if err != nil {
   658  		return scSchedulerFail{reason: err}, nil
   659  	}
   660  	if err := sc.markPending(bestPeerID, nextHeight, event.time); err != nil {
   661  		return scSchedulerFail{reason: err}, nil // XXX: peerError might be more appropriate
   662  	}
   663  	return scBlockRequest{peerID: bestPeerID, height: nextHeight}, nil
   664  
   665  }
   666  
   667  func (sc *scheduler) handleStatusResponse(event bcStatusResponse) (Event, error) {
   668  	err := sc.setPeerRange(event.peerID, event.base, event.height)
   669  	if err != nil {
   670  		return scPeerError{peerID: event.peerID, reason: err}, nil
   671  	}
   672  	return noOp, nil
   673  }
   674  
   675  func (sc *scheduler) handle(event Event) (Event, error) {
   676  	switch event := event.(type) {
   677  	case bcResetState:
   678  		nextEvent, err := sc.handleResetState(event)
   679  		return nextEvent, err
   680  	case bcStatusResponse:
   681  		nextEvent, err := sc.handleStatusResponse(event)
   682  		return nextEvent, err
   683  	case bcBlockResponse:
   684  		nextEvent, err := sc.handleBlockResponse(event)
   685  		return nextEvent, err
   686  	case bcNoBlockResponse:
   687  		nextEvent, err := sc.handleNoBlockResponse(event)
   688  		return nextEvent, err
   689  	case rTrySchedule:
   690  		nextEvent, err := sc.handleTrySchedule(event)
   691  		return nextEvent, err
   692  	case bcAddNewPeer:
   693  		nextEvent, err := sc.handleAddNewPeer(event)
   694  		return nextEvent, err
   695  	case bcRemovePeer:
   696  		nextEvent, err := sc.handleRemovePeer(event)
   697  		return nextEvent, err
   698  	case rTryPrunePeer:
   699  		nextEvent, err := sc.handleTryPrunePeer(event)
   700  		return nextEvent, err
   701  	case pcBlockProcessed:
   702  		nextEvent, err := sc.handleBlockProcessed(event)
   703  		return nextEvent, err
   704  	case pcBlockVerificationFailure:
   705  		nextEvent, err := sc.handleBlockProcessError(event)
   706  		return nextEvent, err
   707  	default:
   708  		return scSchedulerFail{reason: fmt.Errorf("unknown event %v", event)}, nil
   709  	}
   710  }