github.com/bigzoro/my_simplechain@v0.0.0-20240315012955-8ad0a2a29bb9/eth/handler.go (about)

     1  // Copyright 2015 The go-simplechain Authors
     2  // This file is part of the go-simplechain library.
     3  //
     4  // The go-simplechain library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-simplechain library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-simplechain library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package eth
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"math"
    24  	"math/big"
    25  	"strings"
    26  	"sync"
    27  	"sync/atomic"
    28  	"time"
    29  
    30  	"github.com/bigzoro/my_simplechain/crypto"
    31  	"golang.org/x/crypto/sha3"
    32  
    33  	"github.com/bigzoro/my_simplechain/common"
    34  	"github.com/bigzoro/my_simplechain/consensus"
    35  	"github.com/bigzoro/my_simplechain/core"
    36  	"github.com/bigzoro/my_simplechain/core/forkid"
    37  	"github.com/bigzoro/my_simplechain/core/types"
    38  	"github.com/bigzoro/my_simplechain/eth/downloader"
    39  	"github.com/bigzoro/my_simplechain/eth/fetcher"
    40  	"github.com/bigzoro/my_simplechain/ethdb"
    41  	"github.com/bigzoro/my_simplechain/event"
    42  	"github.com/bigzoro/my_simplechain/log"
    43  	"github.com/bigzoro/my_simplechain/p2p"
    44  	"github.com/bigzoro/my_simplechain/p2p/enode"
    45  	"github.com/bigzoro/my_simplechain/params"
    46  	"github.com/bigzoro/my_simplechain/rlp"
    47  	"github.com/bigzoro/my_simplechain/trie"
    48  )
    49  
    50  const (
    51  	softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
    52  	estHeaderRlpSize  = 500             // Approximate size of an RLP encoded block header
    53  
    54  	// txChanSize is the size of channel listening to NewTxsEvent.
    55  	// The number is referenced from the size of tx pool.
    56  	txChanSize = 4096
    57  
    58  	// minimim number of peers to broadcast new blocks to
    59  	minBroadcastPeers = 4
    60  )
    61  
    62  var (
    63  	syncChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the sync progress challenge
    64  )
    65  
    66  func errResp(code errCode, format string, v ...interface{}) error {
    67  	return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
    68  }
    69  
    70  type priorBroadcastSelector interface {
    71  	Prior(*types.Block) []enode.ID
    72  }
    73  
    74  type handler struct {
    75  	networkID  uint64
    76  	forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node
    77  
    78  	fastSync  uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
    79  	acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
    80  
    81  	checkpointNumber uint64      // Block number for the sync progress validator to cross reference
    82  	checkpointHash   common.Hash // Block hash for the sync progress validator to cross reference
    83  
    84  	txpool     txPool
    85  	blockchain *core.BlockChain
    86  	maxPeers   int
    87  
    88  	downloader *downloader.Downloader
    89  	fetcher    *fetcher.Fetcher
    90  	peers      *peerSet
    91  
    92  	eventMux      *event.TypeMux
    93  	txsCh         chan core.NewTxsEvent
    94  	txsSub        event.Subscription
    95  	minedBlockSub *event.TypeMuxSubscription
    96  	whitelist     map[uint64]common.Hash
    97  	pbs           priorBroadcastSelector
    98  
    99  	// channels for fetcher, syncer, txsyncLoop
   100  	newPeerCh   chan *peer
   101  	txsyncCh    chan *txsync
   102  	quitSync    chan struct{}
   103  	noMorePeers chan struct{}
   104  
   105  	// wait group is used for graceful shutdowns during downloading
   106  	// and processing
   107  	wg           sync.WaitGroup
   108  	raftMode     bool
   109  	superManager *SuperManager
   110  
   111  	certificateFeed event.Feed
   112  	scope           event.SubscriptionScope
   113  
   114  	commonDb ethdb.Database
   115  
   116  	needCheckPermission bool
   117  }
   118  
   119  // NewProtocolManager returns a new SimpleService sub protocol manager. The SimpleService sub protocol manages peers capable
   120  // with the SimpleService network.
   121  func NewHandler(config *params.ChainConfig, checkpoint *params.TrustedCheckpoint, mode downloader.SyncMode, networkID uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database, cacheLimit int, whitelist map[uint64]common.Hash, pbs priorBroadcastSelector) (*handler, error) {
   122  	// Create the protocol manager with the base fields
   123  	manager := &handler{
   124  		networkID:    networkID,
   125  		forkFilter:   forkid.NewFilter(blockchain),
   126  		eventMux:     mux,
   127  		txpool:       txpool,
   128  		blockchain:   blockchain,
   129  		peers:        newPeerSet(),
   130  		whitelist:    whitelist,
   131  		newPeerCh:    make(chan *peer),
   132  		noMorePeers:  make(chan struct{}),
   133  		txsyncCh:     make(chan *txsync),
   134  		quitSync:     make(chan struct{}),
   135  		raftMode:     config.Raft,
   136  		superManager: NewSuperManager(),
   137  		pbs:          pbs,
   138  	}
   139  
   140  	if mode == downloader.FullSync {
   141  		// The database seems empty as the current block is the genesis. Yet the fast
   142  		// block is ahead, so fast sync was enabled for this node at a certain point.
   143  		// The scenarios where this can happen is
   144  		// * if the user manually (or via a bad block) rolled back a fast sync node
   145  		//   below the sync point.
   146  		// * the last fast sync is not finished while user specifies a full sync this
   147  		//   time. But we don't have any recent state for full sync.
   148  		// In these cases however it's safe to reenable fast sync.
   149  		fullBlock, fastBlock := blockchain.CurrentBlock(), blockchain.CurrentFastBlock()
   150  		if fullBlock.NumberU64() == 0 && fastBlock.NumberU64() > 0 {
   151  			manager.fastSync = uint32(1)
   152  			log.Warn("Switch sync mode from full sync to fast sync")
   153  		}
   154  	} else {
   155  		if blockchain.CurrentBlock().NumberU64() > 0 {
   156  			// Print warning log if database is not empty to run fast sync.
   157  			log.Warn("Switch sync mode from fast sync to full sync")
   158  		} else {
   159  			// If fast sync was requested and our database is empty, grant it
   160  			manager.fastSync = uint32(1)
   161  		}
   162  	}
   163  	// If we have trusted checkpoints, enforce them on the chain
   164  	if checkpoint != nil {
   165  		manager.checkpointNumber = (checkpoint.SectionIndex+1)*params.CHTFrequency - 1
   166  		manager.checkpointHash = checkpoint.SectionHead
   167  	}
   168  
   169  	// Construct the downloader (long sync) and its backing state bloom if fast
   170  	// sync is requested. The downloader is responsible for deallocating the state
   171  	// bloom when it's done.
   172  	var stateBloom *trie.SyncBloom
   173  	if atomic.LoadUint32(&manager.fastSync) == 1 {
   174  		stateBloom = trie.NewSyncBloom(uint64(cacheLimit), chaindb)
   175  	}
   176  	manager.downloader = downloader.New(manager.checkpointNumber, chaindb, stateBloom, manager.eventMux, blockchain, nil, manager.removePeer)
   177  
   178  	// Construct the fetcher (short sync)
   179  	validator := func(header *types.Header) error {
   180  		return engine.VerifyHeader(blockchain, header, true)
   181  	}
   182  	heighter := func() uint64 {
   183  		return blockchain.CurrentBlock().NumberU64()
   184  	}
   185  	inserter := func(blocks types.Blocks) (int, error) {
   186  		// If sync hasn't reached the checkpoint yet, deny importing weird blocks.
   187  		//
   188  		// Ideally we would also compare the head block's timestamp and similarly reject
   189  		// the propagated block if the head is too old. Unfortunately there is a corner
   190  		// case when starting new networks, where the genesis might be ancient (0 unix)
   191  		// which would prevent full nodes from accepting it.
   192  		if manager.blockchain.CurrentBlock().NumberU64() < manager.checkpointNumber {
   193  			log.Warn("Unsynced yet, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
   194  			return 0, nil
   195  		}
   196  		// If fast sync is running, deny importing weird blocks. This is a problematic
   197  		// clause when starting up a new network, because fast-syncing miners might not
   198  		// accept each others' blocks until a restart. Unfortunately we haven't figured
   199  		// out a way yet where nodes can decide unilaterally whether the network is new
   200  		// or not. This should be fixed if we figure out a solution.
   201  		if atomic.LoadUint32(&manager.fastSync) == 1 {
   202  			log.Warn("Fast syncing, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
   203  			return 0, nil
   204  		}
   205  		n, err := manager.blockchain.InsertChain(blocks)
   206  		if err == nil {
   207  			atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import
   208  		}
   209  		return n, err
   210  	}
   211  	manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
   212  
   213  	return manager, nil
   214  }
   215  
   216  func (h *handler) makeProtocol(version uint) p2p.Protocol {
   217  	length, ok := protocolLengths[version]
   218  	if !ok {
   219  		panic("makeProtocol for unknown version")
   220  	}
   221  
   222  	return p2p.Protocol{
   223  		Name:    protocolName,
   224  		Version: version,
   225  		Length:  length,
   226  		Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   227  			peer := h.newPeer(int(version), p, rw)
   228  			select {
   229  			case h.newPeerCh <- peer:
   230  				h.wg.Add(1)
   231  				defer h.wg.Done()
   232  				return h.handle(peer)
   233  			case <-h.quitSync:
   234  				return p2p.DiscQuitting
   235  			}
   236  		},
   237  		NodeInfo: func() interface{} {
   238  			return h.NodeInfo()
   239  		},
   240  		PeerInfo: func(id enode.ID) interface{} {
   241  			if p := h.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
   242  				return p.Info()
   243  			}
   244  			return nil
   245  		},
   246  	}
   247  }
   248  
   249  func (h *handler) removePeer(id string) {
   250  	// Short circuit if the peer was already removed
   251  	peer := h.peers.Peer(id)
   252  	if peer == nil {
   253  		return
   254  	}
   255  	log.Info("handler removePeer", "peer", id)
   256  
   257  	// Unregister the peer from the downloader and SimpleService peer set
   258  	err := h.downloader.UnregisterPeer(id)
   259  	if err != nil {
   260  		log.Error("handler UnregisterPeer", "err", err.Error())
   261  	}
   262  	if err := h.peers.Unregister(id); err != nil {
   263  		log.Error("Peer removal failed", "peer", id, "err", err)
   264  	}
   265  	// Hard disconnect at the networking layer
   266  	if peer != nil {
   267  		peer.Peer.Disconnect(p2p.DiscUselessPeer)
   268  	}
   269  }
   270  
   271  func (h *handler) Start(maxPeers int) {
   272  	h.maxPeers = maxPeers
   273  
   274  	// broadcast transactions
   275  	h.txsCh = make(chan core.NewTxsEvent, txChanSize)
   276  	h.txsSub = h.txpool.SubscribeNewTxsEvent(h.txsCh)
   277  	go h.txBroadcastLoop()
   278  
   279  	if !h.raftMode {
   280  		// broadcast mined blocks
   281  		h.minedBlockSub = h.eventMux.Subscribe(core.NewMinedBlockEvent{})
   282  		go h.minedBroadcastLoop()
   283  	} else {
   284  		// We set this immediately in raft mode to make sure the miner never drops
   285  		// incoming txes. Raft mode doesn't use the fetcher or downloader, and so
   286  		// this would never be set otherwise.
   287  		atomic.StoreUint32(&h.acceptTxs, 1)
   288  	}
   289  
   290  	// start sync handlers
   291  	go h.syncer()
   292  	go h.txsyncLoop()
   293  }
   294  
   295  func (h *handler) Stop() {
   296  	log.Info("Stopping SimpleService protocol")
   297  
   298  	h.txsSub.Unsubscribe() // quits txBroadcastLoop
   299  	if !h.raftMode {
   300  		h.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
   301  	}
   302  	// Quit the sync loop.
   303  	// After this send has completed, no new peers will be accepted.
   304  	h.noMorePeers <- struct{}{}
   305  
   306  	// Quit fetcher, txsyncLoop.
   307  	close(h.quitSync)
   308  
   309  	// Disconnect existing sessions.
   310  	// This also closes the gate for any new registrations on the peer set.
   311  	// sessions which are already established but not added to h.peers yet
   312  	// will exit when they try to register.
   313  	h.peers.Close()
   314  
   315  	// Wait for all peer handler goroutines and the loops to come down.
   316  	h.wg.Wait()
   317  
   318  	log.Info("SimpleService protocol stopped")
   319  }
   320  
   321  func (h *handler) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
   322  	return newPeer(pv, p, newMeteredMsgWriter(rw))
   323  }
   324  
   325  // handle is the callback invoked to manage the life cycle of an eth peer. When
   326  // this function terminates, the peer is disconnected.
   327  func (h *handler) handle(p *peer) error {
   328  	// Ignore maxPeers if this is a trusted peer
   329  	if h.peers.Len() >= h.maxPeers && !p.Peer.Info().Network.Trusted {
   330  		return p2p.DiscTooManyPeers
   331  	}
   332  	p.Log().Debug("SimpleService peer connected", "name", p.Name())
   333  
   334  	// Execute the SimpleService handshake
   335  	var (
   336  		genesis = h.blockchain.Genesis()
   337  		head    = h.blockchain.CurrentHeader()
   338  		hash    = head.Hash()
   339  		number  = head.Number.Uint64()
   340  		td      = h.blockchain.GetTd(hash, number)
   341  	)
   342  	if err := p.Handshake(h.networkID, td, hash, genesis.Hash(), forkid.NewID(h.blockchain), h.forkFilter); err != nil {
   343  		p.Log().Debug("SimpleService handshake failed", "err", err)
   344  		return err
   345  	}
   346  	if !h.IsValid(p.Node().URLv4()) {
   347  		return errors.New(fmt.Sprintf("check if in permission list,enode %s", p.Node().URLv4()))
   348  	}
   349  
   350  	if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
   351  		rw.Init(p.version)
   352  	}
   353  	// Register the peer locally
   354  	if err := h.peers.Register(p); err != nil {
   355  		p.Log().Error("SimpleService peer registration failed", "err", err)
   356  		return err
   357  	}
   358  	defer h.removePeer(p.id)
   359  
   360  	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
   361  	if err := h.downloader.RegisterPeer(p.id, p.version, p); err != nil {
   362  		return err
   363  	}
   364  	// Propagate existing transactions. new transactions appearing
   365  	// after this will be sent via broadcasts.
   366  	h.syncTransactions(p)
   367  
   368  	// If we have a trusted CHT, reject all peers below that (avoid fast sync eclipse)
   369  	if h.checkpointHash != (common.Hash{}) {
   370  		// Request the peer's checkpoint header for chain height/weight validation
   371  		if err := p.RequestHeadersByNumber(h.checkpointNumber, 1, 0, false); err != nil {
   372  			return err
   373  		}
   374  		// Start a timer to disconnect if the peer doesn't reply in time
   375  		p.syncDrop = time.AfterFunc(syncChallengeTimeout, func() {
   376  			p.Log().Warn("Checkpoint challenge timed out, dropping", "addr", p.RemoteAddr(), "type", p.Name())
   377  			h.removePeer(p.id)
   378  		})
   379  		// Make sure it's cleaned up if the peer dies off
   380  		defer func() {
   381  			if p.syncDrop != nil {
   382  				p.syncDrop.Stop()
   383  				p.syncDrop = nil
   384  			}
   385  		}()
   386  	}
   387  	// If we have any explicit whitelist block hashes, request them
   388  	for number := range h.whitelist {
   389  		if err := p.RequestHeadersByNumber(number, 1, 0, false); err != nil {
   390  			return err
   391  		}
   392  	}
   393  	// Handle incoming messages until the connection is torn down
   394  	for {
   395  		if err := h.handleMsg(p); err != nil {
   396  			p.Log().Debug("SimpleService message handling failed", "err", err)
   397  			return err
   398  		}
   399  	}
   400  }
   401  
   402  // handleMsg is invoked whenever an inbound message is received from a remote
   403  // peer. The remote connection is torn down upon returning any error.
   404  func (h *handler) handleMsg(p *peer) error {
   405  	// Read the next message from the remote peer, and ensure it's fully consumed
   406  	msg, err := p.rw.ReadMsg()
   407  	if err != nil {
   408  		return err
   409  	}
   410  	if msg.Size > protocolMaxMsgSize {
   411  		return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, protocolMaxMsgSize)
   412  	}
   413  	defer msg.Discard()
   414  	if h.raftMode {
   415  		if msg.Code != TxMsg &&
   416  			msg.Code != GetBlockHeadersMsg && msg.Code != BlockHeadersMsg &&
   417  			msg.Code != GetBlockBodiesMsg && msg.Code != BlockBodiesMsg {
   418  
   419  			log.Info("raft: ignoring message", "code", msg.Code)
   420  
   421  			return nil
   422  		}
   423  	}
   424  	// Handle the message depending on its contents
   425  	switch {
   426  	case msg.Code == StatusMsg:
   427  		// Status messages should never arrive after the handshake
   428  		return errResp(ErrExtraStatusMsg, "uncontrolled status message")
   429  
   430  	// Block header query, collect the requested headers and reply
   431  	case msg.Code == GetBlockHeadersMsg:
   432  		// Decode the complex header query
   433  		var query getBlockHeadersData
   434  		if err := msg.Decode(&query); err != nil {
   435  			return errResp(ErrDecode, "%v: %v", msg, err)
   436  		}
   437  		hashMode := query.Origin.Hash != (common.Hash{})
   438  		first := true
   439  		maxNonCanonical := uint64(100)
   440  
   441  		// Gather headers until the fetch or network limits is reached
   442  		var (
   443  			bytes   common.StorageSize
   444  			headers []*types.Header
   445  			unknown bool
   446  		)
   447  		for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch {
   448  			// Retrieve the next header satisfying the query
   449  			var origin *types.Header
   450  			if hashMode {
   451  				if first {
   452  					first = false
   453  					origin = h.blockchain.GetHeaderByHash(query.Origin.Hash)
   454  					if origin != nil {
   455  						query.Origin.Number = origin.Number.Uint64()
   456  					}
   457  				} else {
   458  					origin = h.blockchain.GetHeader(query.Origin.Hash, query.Origin.Number)
   459  				}
   460  			} else {
   461  				origin = h.blockchain.GetHeaderByNumber(query.Origin.Number)
   462  			}
   463  			if origin == nil {
   464  				break
   465  			}
   466  			headers = append(headers, origin)
   467  			bytes += estHeaderRlpSize
   468  
   469  			// Advance to the next header of the query
   470  			switch {
   471  			case hashMode && query.Reverse:
   472  				// Hash based traversal towards the genesis block
   473  				ancestor := query.Skip + 1
   474  				if ancestor == 0 {
   475  					unknown = true
   476  				} else {
   477  					query.Origin.Hash, query.Origin.Number = h.blockchain.GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical)
   478  					unknown = (query.Origin.Hash == common.Hash{})
   479  				}
   480  			case hashMode && !query.Reverse:
   481  				// Hash based traversal towards the leaf block
   482  				var (
   483  					current = origin.Number.Uint64()
   484  					next    = current + query.Skip + 1
   485  				)
   486  				if next <= current {
   487  					infos, _ := json.MarshalIndent(p.Peer.Info(), "", "  ")
   488  					p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
   489  					unknown = true
   490  				} else {
   491  					if header := h.blockchain.GetHeaderByNumber(next); header != nil {
   492  						nextHash := header.Hash()
   493  						expOldHash, _ := h.blockchain.GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical)
   494  						if expOldHash == query.Origin.Hash {
   495  							query.Origin.Hash, query.Origin.Number = nextHash, next
   496  						} else {
   497  							unknown = true
   498  						}
   499  					} else {
   500  						unknown = true
   501  					}
   502  				}
   503  			case query.Reverse:
   504  				// Number based traversal towards the genesis block
   505  				if query.Origin.Number >= query.Skip+1 {
   506  					query.Origin.Number -= query.Skip + 1
   507  				} else {
   508  					unknown = true
   509  				}
   510  
   511  			case !query.Reverse:
   512  				// Number based traversal towards the leaf block
   513  				query.Origin.Number += query.Skip + 1
   514  			}
   515  		}
   516  		return p.SendBlockHeaders(headers)
   517  
   518  	case msg.Code == BlockHeadersMsg:
   519  		// A batch of headers arrived to one of our previous requests
   520  		var headers []*types.Header
   521  		if err := msg.Decode(&headers); err != nil {
   522  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   523  		}
   524  		// If no headers were received, but we're expencting a checkpoint header, consider it that
   525  		if len(headers) == 0 && p.syncDrop != nil {
   526  			// Stop the timer either way, decide later to drop or not
   527  			p.syncDrop.Stop()
   528  			p.syncDrop = nil
   529  
   530  			// If we're doing a fast sync, we must enforce the checkpoint block to avoid
   531  			// eclipse attacks. Unsynced nodes are welcome to connect after we're done
   532  			// joining the network
   533  			if atomic.LoadUint32(&h.fastSync) == 1 {
   534  				p.Log().Warn("Dropping unsynced node during fast sync", "addr", p.RemoteAddr(), "type", p.Name())
   535  				return errors.New("unsynced node cannot serve fast sync")
   536  			}
   537  		}
   538  		// Filter out any explicitly requested headers, deliver the rest to the downloader
   539  		filter := len(headers) == 1
   540  		if filter {
   541  			// If it's a potential sync progress check, validate the content and advertised chain weight
   542  			if p.syncDrop != nil && headers[0].Number.Uint64() == h.checkpointNumber {
   543  				// Disable the sync drop timer
   544  				p.syncDrop.Stop()
   545  				p.syncDrop = nil
   546  
   547  				// Validate the header and either drop the peer or continue
   548  				if headers[0].Hash() != h.checkpointHash {
   549  					return errors.New("checkpoint hash mismatch")
   550  				}
   551  				return nil
   552  			}
   553  			// Otherwise if it's a whitelisted block, validate against the set
   554  			if want, ok := h.whitelist[headers[0].Number.Uint64()]; ok {
   555  				if hash := headers[0].Hash(); want != hash {
   556  					p.Log().Info("Whitelist mismatch, dropping peer", "number", headers[0].Number.Uint64(), "hash", hash, "want", want)
   557  					return errors.New("whitelist block mismatch")
   558  				}
   559  				p.Log().Debug("Whitelist block verified", "number", headers[0].Number.Uint64(), "hash", want)
   560  			}
   561  			// Irrelevant of the fork checks, send the header to the fetcher just in case
   562  			headers = h.fetcher.FilterHeaders(p.id, headers, time.Now())
   563  		}
   564  		if len(headers) > 0 || !filter {
   565  			err := h.downloader.DeliverHeaders(p.id, headers)
   566  			if err != nil {
   567  				log.Debug("Failed to deliver headers", "err", err)
   568  			}
   569  		}
   570  
   571  	case msg.Code == GetBlockBodiesMsg:
   572  		// Decode the retrieval message
   573  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   574  		if _, err := msgStream.List(); err != nil {
   575  			return err
   576  		}
   577  		// Gather blocks until the fetch or network limits is reached
   578  		var (
   579  			hash   common.Hash
   580  			bytes  int
   581  			bodies []rlp.RawValue
   582  		)
   583  		for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch {
   584  			// Retrieve the hash of the next block
   585  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   586  				break
   587  			} else if err != nil {
   588  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   589  			}
   590  			// Retrieve the requested block body, stopping if enough was found
   591  			if data := h.blockchain.GetBodyRLP(hash); len(data) != 0 {
   592  				bodies = append(bodies, data)
   593  				bytes += len(data)
   594  			}
   595  		}
   596  		log.Debug("send block bodies", "count", len(bodies), "id", p.id)
   597  		return p.SendBlockBodiesRLP(bodies)
   598  
   599  	case msg.Code == BlockBodiesMsg:
   600  		// A batch of block bodies arrived to one of our previous requests
   601  		var request blockBodiesData
   602  		if err := msg.Decode(&request); err != nil {
   603  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   604  		}
   605  		// Deliver them all to the downloader for queuing
   606  		transactions := make([][]*types.Transaction, len(request))
   607  		uncles := make([][]*types.Header, len(request))
   608  
   609  		for i, body := range request {
   610  			transactions[i] = body.Transactions
   611  			uncles[i] = body.Uncles
   612  		}
   613  		log.Debug("receive block bodies", "count", len(transactions), "id", p.id)
   614  
   615  		// Filter out any explicitly requested bodies, deliver the rest to the downloader
   616  		filter := len(transactions) > 0 || len(uncles) > 0
   617  		if filter {
   618  			transactions, uncles = h.fetcher.FilterBodies(p.id, transactions, uncles, time.Now())
   619  		}
   620  		if len(transactions) > 0 || len(uncles) > 0 || !filter {
   621  			err := h.downloader.DeliverBodies(p.id, transactions, uncles)
   622  			if err != nil {
   623  				log.Debug("Failed to deliver bodies", "err", err)
   624  			}
   625  		}
   626  
   627  	case p.version >= eth63 && msg.Code == GetNodeDataMsg:
   628  		// Decode the retrieval message
   629  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   630  		if _, err := msgStream.List(); err != nil {
   631  			return err
   632  		}
   633  		// Gather state data until the fetch or network limits is reached
   634  		var (
   635  			hash  common.Hash
   636  			bytes int
   637  			data  [][]byte
   638  		)
   639  		for bytes < softResponseLimit && len(data) < downloader.MaxStateFetch {
   640  			// Retrieve the hash of the next state entry
   641  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   642  				break
   643  			} else if err != nil {
   644  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   645  			}
   646  			// Retrieve the requested state entry, stopping if enough was found
   647  			if entry, err := h.blockchain.TrieNode(hash); err == nil {
   648  				data = append(data, entry)
   649  				bytes += len(entry)
   650  			}
   651  		}
   652  		return p.SendNodeData(data)
   653  
   654  	case p.version >= eth63 && msg.Code == NodeDataMsg:
   655  		// A batch of node state data arrived to one of our previous requests
   656  		var data [][]byte
   657  		if err := msg.Decode(&data); err != nil {
   658  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   659  		}
   660  		// Deliver all to the downloader
   661  		if err := h.downloader.DeliverNodeData(p.id, data); err != nil {
   662  			log.Debug("Failed to deliver node state data", "err", err)
   663  		}
   664  
   665  	case p.version >= eth63 && msg.Code == GetReceiptsMsg:
   666  		// Decode the retrieval message
   667  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   668  		if _, err := msgStream.List(); err != nil {
   669  			return err
   670  		}
   671  		// Gather state data until the fetch or network limits is reached
   672  		var (
   673  			hash     common.Hash
   674  			bytes    int
   675  			receipts []rlp.RawValue
   676  		)
   677  		for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptFetch {
   678  			// Retrieve the hash of the next block
   679  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   680  				break
   681  			} else if err != nil {
   682  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   683  			}
   684  			// Retrieve the requested block's receipts, skipping if unknown to us
   685  			results := h.blockchain.GetReceiptsByHash(hash)
   686  			if results == nil {
   687  				if header := h.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
   688  					continue
   689  				}
   690  			}
   691  			// If known, encode and queue for response packet
   692  			if encoded, err := rlp.EncodeToBytes(results); err != nil {
   693  				log.Error("Failed to encode receipt", "err", err)
   694  			} else {
   695  				receipts = append(receipts, encoded)
   696  				bytes += len(encoded)
   697  			}
   698  		}
   699  		return p.SendReceiptsRLP(receipts)
   700  
   701  	case p.version >= eth63 && msg.Code == ReceiptsMsg:
   702  		// A batch of receipts arrived to one of our previous requests
   703  		var receipts [][]*types.Receipt
   704  		if err := msg.Decode(&receipts); err != nil {
   705  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   706  		}
   707  		// Deliver all to the downloader
   708  		if err := h.downloader.DeliverReceipts(p.id, receipts); err != nil {
   709  			log.Debug("Failed to deliver receipts", "err", err)
   710  		}
   711  
   712  	case msg.Code == NewBlockHashesMsg:
   713  		var announces newBlockHashesData
   714  		if err := msg.Decode(&announces); err != nil {
   715  			return errResp(ErrDecode, "%v: %v", msg, err)
   716  		}
   717  		// Mark the hashes as present at the remote node
   718  		for _, block := range announces {
   719  			p.MarkBlock(block.Hash)
   720  		}
   721  		// Schedule all the unknown hashes for retrieval
   722  		unknown := make(newBlockHashesData, 0, len(announces))
   723  		for _, block := range announces {
   724  			if !h.blockchain.HasBlock(block.Hash, block.Number) {
   725  				unknown = append(unknown, block)
   726  			}
   727  		}
   728  		for _, block := range unknown {
   729  			h.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
   730  		}
   731  
   732  	case msg.Code == NewBlockMsg:
   733  		// Retrieve and decode the propagated block
   734  		var request newBlockData
   735  		if err := msg.Decode(&request); err != nil {
   736  			return errResp(ErrDecode, "%v: %v", msg, err)
   737  		}
   738  		if err := request.sanityCheck(); err != nil {
   739  			return err
   740  		}
   741  		request.Block.ReceivedAt = msg.ReceivedAt
   742  		request.Block.ReceivedFrom = p
   743  
   744  		// Mark the peer as owning the block and schedule it for import
   745  		p.MarkBlock(request.Block.Hash())
   746  		h.fetcher.Enqueue(p.id, request.Block)
   747  
   748  		// Assuming the block is importable by the peer, but possibly not yet done so,
   749  		// calculate the head hash and TD that the peer truly must have.
   750  		var (
   751  			trueHead = request.Block.ParentHash()
   752  			trueTD   = new(big.Int).Sub(request.TD, request.Block.Difficulty())
   753  		)
   754  		// Update the peer's total difficulty if better than the previous
   755  		if _, td := p.Head(); trueTD.Cmp(td) > 0 {
   756  			p.SetHead(trueHead, trueTD)
   757  
   758  			// Schedule a sync if above ours. Note, this will not fire a sync for a gap of
   759  			// a single block (as the true TD is below the propagated block), however this
   760  			// scenario should easily be covered by the fetcher.
   761  			currentBlock := h.blockchain.CurrentBlock()
   762  			if trueTD.Cmp(h.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())) > 0 {
   763  				go h.synchronise(p)
   764  			}
   765  		}
   766  
   767  	case msg.Code == TxMsg:
   768  		// Transactions arrived, make sure we have a valid and fresh chain to handle them
   769  		if atomic.LoadUint32(&h.acceptTxs) == 0 {
   770  			break
   771  		}
   772  		// Transactions can be processed, parse all of them and deliver to the pool
   773  		var txs []*types.Transaction
   774  		if err := msg.Decode(&txs); err != nil {
   775  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   776  		}
   777  		for i, tx := range txs {
   778  			// Validate and mark the remote transaction
   779  			if tx == nil {
   780  				return errResp(ErrDecode, "transaction %d is nil", i)
   781  			}
   782  			p.MarkTransaction(tx.Hash())
   783  		}
   784  		h.txpool.AddRemotes(txs)
   785  	case msg.Code == CertificateRevocationListMsg:
   786  		log.Error("receive CertificateRevocationListMsg ")
   787  		object := new(CertificateRevocationListPacket)
   788  		if err := msg.Decode(object); err != nil {
   789  			return fmt.Errorf("message %v: %v", msg, err)
   790  		}
   791  		certificate := &types.CertificateContent{
   792  			Content:   object.Content,
   793  			Signature: object.Signature,
   794  		}
   795  		addr, err := h.ReadAddress(certificate)
   796  		if err != nil {
   797  			return err
   798  		}
   799  		if !h.superManager.IsManager(addr) {
   800  			return errors.New("invalid address")
   801  		}
   802  		//save and broadcast
   803  		h.BroadcastCRL(certificate)
   804  	default:
   805  		return errResp(ErrInvalidMsgCode, "%v", msg.Code)
   806  	}
   807  	return nil
   808  }
   809  
   810  func (h *handler) isSigner(addr common.Address, signers []common.Address) bool {
   811  	for _, signer := range signers {
   812  		if signer == addr {
   813  			return true
   814  		}
   815  	}
   816  	return false
   817  }
   818  
   819  // BroadcastBlock will either propagate a block to a subset of it's peers, or
   820  // will only announce it's availability (depending what's requested).
   821  func (h *handler) BroadcastBlock(block *types.Block, propagate bool) {
   822  	hash := block.Hash()
   823  
   824  	peers := h.peers.PeersWithoutBlock(hash)
   825  
   826  	// If propagation is requested, send to a subset of the peer
   827  	if propagate {
   828  		// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)
   829  		var td *big.Int
   830  		if parent := h.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil {
   831  			td = new(big.Int).Add(block.Difficulty(), h.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))
   832  		} else {
   833  			log.Error("Propagating dangling block", "number", block.Number(), "hash", hash)
   834  			return
   835  		}
   836  
   837  		var transfer []*peer
   838  		if h.pbs != nil {
   839  			// Use the selector to send the block to its corresponding priority notification peer.
   840  			eids := h.pbs.Prior(block)
   841  			prior := make(map[enode.ID]struct{}, len(eids))
   842  			for _, eid := range eids {
   843  				prior[eid] = struct{}{}
   844  			}
   845  
   846  			transfer = make([]*peer, 0, len(eids))
   847  			for _, peer := range peers {
   848  				if _, ok := prior[peer.Peer.ID()]; ok {
   849  					transfer = append(transfer, peer)
   850  				}
   851  			}
   852  
   853  		} else {
   854  			// Send the block to a subset of our peers
   855  			transferLen := int(math.Sqrt(float64(len(peers))))
   856  			if transferLen < minBroadcastPeers {
   857  				transferLen = minBroadcastPeers
   858  			}
   859  			if transferLen > len(peers) {
   860  				transferLen = len(peers)
   861  			}
   862  			transfer = peers[:transferLen]
   863  		}
   864  
   865  		for _, peer := range transfer {
   866  			peer.AsyncSendNewBlock(block, td)
   867  		}
   868  		log.Trace("Propagated block", "hash", hash, "recipients", len(transfer), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
   869  		return
   870  	}
   871  	// Otherwise if the block is indeed in out own chain, announce it
   872  	if h.blockchain.HasBlock(hash, block.NumberU64()) {
   873  		for _, peer := range peers {
   874  			peer.AsyncSendNewBlockHash(block)
   875  		}
   876  		log.Trace("Announced block", "hash", hash, "recipients", len(peers), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
   877  	}
   878  }
   879  
   880  // BroadcastTxs will propagate a batch of transactions to all peers which are not known to
   881  // already have the given transaction.
   882  func (h *handler) BroadcastTxs(txs types.Transactions) {
   883  	var txset = make(map[*peer]types.Transactions)
   884  
   885  	// Broadcast transactions to a batch of peers not knowing about it
   886  	for _, tx := range txs {
   887  		peers := h.peers.PeersWithoutTx(tx.Hash())
   888  		for _, peer := range peers {
   889  			txset[peer] = append(txset[peer], tx)
   890  		}
   891  		log.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(peers))
   892  	}
   893  	// FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))]
   894  	for peer, txs := range txset {
   895  		peer.AsyncSendTransactions(txs)
   896  	}
   897  }
   898  
   899  // Mined broadcast loop
   900  func (h *handler) minedBroadcastLoop() {
   901  	// automatically stops if unsubscribe
   902  	for obj := range h.minedBlockSub.Chan() {
   903  		if ev, ok := obj.Data.(core.NewMinedBlockEvent); ok {
   904  			h.BroadcastBlock(ev.Block, true)  // First propagate block to peers
   905  			h.BroadcastBlock(ev.Block, false) // Only then announce to the rest
   906  		}
   907  	}
   908  }
   909  
   910  func (h *handler) txBroadcastLoop() {
   911  	for {
   912  		select {
   913  		case eventObj := <-h.txsCh:
   914  			h.BroadcastTxs(eventObj.Txs)
   915  
   916  		// Err() channel will be closed when unsubscribing.
   917  		case <-h.txsSub.Err():
   918  			return
   919  		}
   920  	}
   921  }
   922  
   923  // NodeInfo represents a short summary of the SimpleService sub-protocol metadata
   924  // known about the host peer.
   925  type NodeInfo struct {
   926  	Network    uint64              `json:"network"`    // SimpleService network ID (1=Mainnet, 2=Testnet)
   927  	Difficulty *big.Int            `json:"difficulty"` // Total difficulty of the host's blockchain
   928  	Genesis    common.Hash         `json:"genesis"`    // SHA3 hash of the host's genesis block
   929  	Config     *params.ChainConfig `json:"config"`     // Chain configuration for the fork rules
   930  	Head       common.Hash         `json:"head"`       // SHA3 hash of the host's best owned block
   931  }
   932  
   933  // NodeInfo retrieves some protocol metadata about the running host node.
   934  func (h *handler) NodeInfo() *NodeInfo {
   935  	currentBlock := h.blockchain.CurrentBlock()
   936  	return &NodeInfo{
   937  		Network:    h.networkID,
   938  		Difficulty: h.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()),
   939  		Genesis:    h.blockchain.Genesis().Hash(),
   940  		Config:     h.blockchain.Config(),
   941  		Head:       currentBlock.Hash(),
   942  	}
   943  }
   944  func (h *handler) BroadcastCRL(certificate *types.CertificateContent) error {
   945  	log.Info("BroadcastCRL...")
   946  	hash := rlpHash(certificate.Content)
   947  	addr, err := h.ReadAddress(certificate)
   948  	if err != nil {
   949  		return err
   950  	}
   951  	if !h.superManager.IsManager(addr) {
   952  		return errors.New("invalid address")
   953  	}
   954  	h.certificateFeed.Send(types.CertificateEvent{CertificateContent: certificate})
   955  	peers := h.peers.PeersWithoutCRL(hash)
   956  	for _, peer := range peers {
   957  		peer.AsyncSendCRL(certificate)
   958  	}
   959  	return nil
   960  }
   961  func (h *handler) SubscribeCertificateEvent(ch chan<- types.CertificateEvent) event.Subscription {
   962  	return h.scope.Track(h.certificateFeed.Subscribe(ch))
   963  }
   964  func rlpHash(x interface{}) (h common.Hash) {
   965  	hw := sha3.NewLegacyKeccak256()
   966  	rlp.Encode(hw, x)
   967  	hw.Sum(h[:0])
   968  	return h
   969  }
   970  func (h *handler) ReadAddress(certificate *types.CertificateContent) (common.Address, error) {
   971  	hash := rlpHash(certificate.Content)
   972  	publicKey, err := crypto.SigToPub(hash[:], certificate.Signature)
   973  	if err != nil {
   974  		return common.Address{}, err
   975  	}
   976  	addr := crypto.PubkeyToAddress(*publicKey)
   977  	return addr, nil
   978  }
   979  
   980  func (h *handler) SetCommonDb(commonDb ethdb.Database) {
   981  	h.commonDb = commonDb
   982  }
   983  
   984  // IsValid 如果返回true,则表示允许该节点通过验证
   985  func (h *handler) IsValid(nodeId string) bool {
   986  	//todo 简单是否有权限参数生效(true)
   987  	//首先是否已经完成了合约的初始化
   988  	log.Info("handler", "isValid nodeId", nodeId)
   989  	if !h.needCheckPermission {
   990  		log.Info("handler no need to check permission")
   991  		return true
   992  	}
   993  	if h.commonDb != nil {
   994  		has, err := h.commonDb.Has([]byte(common.PermissionInitFinished))
   995  		if err != nil {
   996  			log.Error("handler permissionInitFinished Has", "err", err.Error())
   997  			return false
   998  		}
   999  		if !has {
  1000  			log.Info("handler 说明管理合约还没有初始化完成,此时我们允许任意的节点相连")
  1001  			//说明管理合约还没有初始化完成,此时我们允许任意的节点相连
  1002  			return true
  1003  		} else {
  1004  			//管理合约已经初始化完成,此时我们需要判断nodeId是否存在,存在则通过,不存在,则不让通过
  1005  			has, err := h.commonDb.Has([]byte(h.filterNodeId(nodeId)))
  1006  			if err != nil {
  1007  				log.Error("handler node Has", "err", err.Error(), "nodeId", nodeId)
  1008  				return false
  1009  			}
  1010  			log.Info("handler has nodeId", "has", has, "nodeId", nodeId)
  1011  			//has 为true则通过,false则不通过
  1012  			return has
  1013  		}
  1014  	}
  1015  	return false
  1016  }
  1017  func (h *handler) SetNeedCheckPermission(need bool) {
  1018  	h.needCheckPermission = need
  1019  }
  1020  func (h *handler) filterNodeId(nodeId string) string {
  1021  	if strings.Contains(nodeId, "enode://") {
  1022  		nodeId = strings.ReplaceAll(nodeId, "enode://", "")
  1023  	}
  1024  	if strings.Contains(nodeId, "@") {
  1025  		strArr := strings.Split(nodeId, "@")
  1026  		nodeId = strArr[0]
  1027  	}
  1028  	return nodeId
  1029  }