github.com/vantum/vantum@v0.0.0-20180815184342-fe37d5f7a990/swarm/network/protocol.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package network
    18  
    19  /*
    20  bzz implements the swarm wire protocol [bzz] (sister of eth and shh)
    21  the protocol instance is launched on each peer by the network layer if the
    22  bzz protocol handler is registered on the p2p server.
    23  
    24  The bzz protocol component speaks the bzz protocol
    25  * handle the protocol handshake
    26  * register peers in the KΛÐΞMLIΛ table via the hive logistic manager
    27  * dispatch to hive for handling the DHT logic
    28  * encode and decode requests for storage and retrieval
    29  * handle sync protocol messages via the syncer
    30  * talks the SWAP payment protocol (swap accounting is done within NetStore)
    31  */
    32  
    33  import (
    34  	"errors"
    35  	"fmt"
    36  	"net"
    37  	"strconv"
    38  	"time"
    39  
    40  	"github.com/vantum/vantum/contracts/chequebook"
    41  	"github.com/vantum/vantum/log"
    42  	"github.com/vantum/vantum/p2p"
    43  	bzzswap "github.com/vantum/vantum/swarm/services/swap"
    44  	"github.com/vantum/vantum/swarm/services/swap/swap"
    45  	"github.com/vantum/vantum/swarm/storage"
    46  )
    47  
    48  const (
    49  	Version            = 0
    50  	ProtocolLength     = uint64(8)
    51  	ProtocolMaxMsgSize = 10 * 1024 * 1024
    52  	NetworkId          = 3
    53  )
    54  
    55  // bzz represents the swarm wire protocol
    56  // an instance is running on each peer
    57  type bzz struct {
    58  	storage    StorageHandler       // handler storage/retrieval related requests coming via the bzz wire protocol
    59  	hive       *Hive                // the logistic manager, peerPool, routing service and peer handler
    60  	dbAccess   *DbAccess            // access to db storage counter and iterator for syncing
    61  	requestDb  *storage.LDBDatabase // db to persist backlog of deliveries to aid syncing
    62  	remoteAddr *peerAddr            // remote peers address
    63  	peer       *p2p.Peer            // the p2p peer object
    64  	rw         p2p.MsgReadWriter    // messageReadWriter to send messages to
    65  	backend    chequebook.Backend
    66  	lastActive time.Time
    67  	NetworkId  uint64
    68  
    69  	swap        *swap.Swap          // swap instance for the peer connection
    70  	swapParams  *bzzswap.SwapParams // swap settings both local and remote
    71  	swapEnabled bool                // flag to enable SWAP (will be set via Caps in handshake)
    72  	syncEnabled bool                // flag to enable SYNC (will be set via Caps in handshake)
    73  	syncer      *syncer             // syncer instance for the peer connection
    74  	syncParams  *SyncParams         // syncer params
    75  	syncState   *syncState          // outgoing syncronisation state (contains reference to remote peers db counter)
    76  }
    77  
    78  // interface type for handler of storage/retrieval related requests coming
    79  // via the bzz wire protocol
    80  // messages: UnsyncedKeys, DeliveryRequest, StoreRequest, RetrieveRequest
    81  type StorageHandler interface {
    82  	HandleUnsyncedKeysMsg(req *unsyncedKeysMsgData, p *peer) error
    83  	HandleDeliveryRequestMsg(req *deliveryRequestMsgData, p *peer) error
    84  	HandleStoreRequestMsg(req *storeRequestMsgData, p *peer)
    85  	HandleRetrieveRequestMsg(req *retrieveRequestMsgData, p *peer)
    86  }
    87  
    88  /*
    89  main entrypoint, wrappers starting a server that will run the bzz protocol
    90  use this constructor to attach the protocol ("class") to server caps
    91  This is done by node.Node#Register(func(node.ServiceContext) (Service, error))
    92  Service implements Protocols() which is an array of protocol constructors
    93  at node startup the protocols are initialised
    94  the Dev p2p layer then calls Run(p *p2p.Peer, rw p2p.MsgReadWriter) error
    95  on each peer connection
    96  The Run function of the Bzz protocol class creates a bzz instance
    97  which will represent the peer for the swarm hive and all peer-aware components
    98  */
    99  func Bzz(cloud StorageHandler, backend chequebook.Backend, hive *Hive, dbaccess *DbAccess, sp *bzzswap.SwapParams, sy *SyncParams, networkId uint64) (p2p.Protocol, error) {
   100  
   101  	// a single global request db is created for all peer connections
   102  	// this is to persist delivery backlog and aid syncronisation
   103  	requestDb, err := storage.NewLDBDatabase(sy.RequestDbPath)
   104  	if err != nil {
   105  		return p2p.Protocol{}, fmt.Errorf("error setting up request db: %v", err)
   106  	}
   107  	if networkId == 0 {
   108  		networkId = NetworkId
   109  	}
   110  	return p2p.Protocol{
   111  		Name:    "bzz",
   112  		Version: Version,
   113  		Length:  ProtocolLength,
   114  		Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   115  			return run(requestDb, cloud, backend, hive, dbaccess, sp, sy, networkId, p, rw)
   116  		},
   117  	}, nil
   118  }
   119  
   120  /*
   121  the main protocol loop that
   122   * does the handshake by exchanging statusMsg
   123   * if peer is valid and accepted, registers with the hive
   124   * then enters into a forever loop handling incoming messages
   125   * storage and retrieval related queries coming via bzz are dispatched to StorageHandler
   126   * peer-related messages are dispatched to the hive
   127   * payment related messages are relayed to SWAP service
   128   * on disconnect, unregister the peer in the hive (note RemovePeer in the post-disconnect hook)
   129   * whenever the loop terminates, the peer will disconnect with Subprotocol error
   130   * whenever handlers return an error the loop terminates
   131  */
   132  func run(requestDb *storage.LDBDatabase, depo StorageHandler, backend chequebook.Backend, hive *Hive, dbaccess *DbAccess, sp *bzzswap.SwapParams, sy *SyncParams, networkId uint64, p *p2p.Peer, rw p2p.MsgReadWriter) (err error) {
   133  
   134  	self := &bzz{
   135  		storage:     depo,
   136  		backend:     backend,
   137  		hive:        hive,
   138  		dbAccess:    dbaccess,
   139  		requestDb:   requestDb,
   140  		peer:        p,
   141  		rw:          rw,
   142  		swapParams:  sp,
   143  		syncParams:  sy,
   144  		swapEnabled: hive.swapEnabled,
   145  		syncEnabled: true,
   146  		NetworkId:   networkId,
   147  	}
   148  
   149  	// handle handshake
   150  	err = self.handleStatus()
   151  	if err != nil {
   152  		return err
   153  	}
   154  	defer func() {
   155  		// if the handler loop exits, the peer is disconnecting
   156  		// deregister the peer in the hive
   157  		self.hive.removePeer(&peer{bzz: self})
   158  		if self.syncer != nil {
   159  			self.syncer.stop() // quits request db and delivery loops, save requests
   160  		}
   161  		if self.swap != nil {
   162  			self.swap.Stop() // quits chequebox autocash etc
   163  		}
   164  	}()
   165  
   166  	// the main forever loop that handles incoming requests
   167  	for {
   168  		if self.hive.blockRead {
   169  			log.Warn(fmt.Sprintf("Cannot read network"))
   170  			time.Sleep(100 * time.Millisecond)
   171  			continue
   172  		}
   173  		err = self.handle()
   174  		if err != nil {
   175  			return
   176  		}
   177  	}
   178  }
   179  
   180  // TODO: may need to implement protocol drop only? don't want to kick off the peer
   181  // if they are useful for other protocols
   182  func (self *bzz) Drop() {
   183  	self.peer.Disconnect(p2p.DiscSubprotocolError)
   184  }
   185  
   186  // one cycle of the main forever loop that handles and dispatches incoming messages
   187  func (self *bzz) handle() error {
   188  	msg, err := self.rw.ReadMsg()
   189  	log.Debug(fmt.Sprintf("<- %v", msg))
   190  	if err != nil {
   191  		return err
   192  	}
   193  	if msg.Size > ProtocolMaxMsgSize {
   194  		return fmt.Errorf("message too long: %v > %v", msg.Size, ProtocolMaxMsgSize)
   195  	}
   196  	// make sure that the payload has been fully consumed
   197  	defer msg.Discard()
   198  
   199  	switch msg.Code {
   200  
   201  	case statusMsg:
   202  		// no extra status message allowed. The one needed already handled by
   203  		// handleStatus
   204  		log.Debug(fmt.Sprintf("Status message: %v", msg))
   205  		return errors.New("extra status message")
   206  
   207  	case storeRequestMsg:
   208  		// store requests are dispatched to netStore
   209  		var req storeRequestMsgData
   210  		if err := msg.Decode(&req); err != nil {
   211  			return fmt.Errorf("<- %v: %v", msg, err)
   212  		}
   213  		if n := len(req.SData); n < 9 {
   214  			return fmt.Errorf("<- %v: Data too short (%v)", msg, n)
   215  		}
   216  		// last Active time is set only when receiving chunks
   217  		self.lastActive = time.Now()
   218  		log.Trace(fmt.Sprintf("incoming store request: %s", req.String()))
   219  		// swap accounting is done within forwarding
   220  		self.storage.HandleStoreRequestMsg(&req, &peer{bzz: self})
   221  
   222  	case retrieveRequestMsg:
   223  		// retrieve Requests are dispatched to netStore
   224  		var req retrieveRequestMsgData
   225  		if err := msg.Decode(&req); err != nil {
   226  			return fmt.Errorf("<- %v: %v", msg, err)
   227  		}
   228  		req.from = &peer{bzz: self}
   229  		// if request is lookup and not to be delivered
   230  		if req.isLookup() {
   231  			log.Trace(fmt.Sprintf("self lookup for %v: responding with peers only...", req.from))
   232  		} else if req.Key == nil {
   233  			return fmt.Errorf("protocol handler: req.Key == nil || req.Timeout == nil")
   234  		} else {
   235  			// swap accounting is done within netStore
   236  			self.storage.HandleRetrieveRequestMsg(&req, &peer{bzz: self})
   237  		}
   238  		// direct response with peers, TODO: sort this out
   239  		self.hive.peers(&req)
   240  
   241  	case peersMsg:
   242  		// response to lookups and immediate response to retrieve requests
   243  		// dispatches new peer data to the hive that adds them to KADDB
   244  		var req peersMsgData
   245  		if err := msg.Decode(&req); err != nil {
   246  			return fmt.Errorf("<- %v: %v", msg, err)
   247  		}
   248  		req.from = &peer{bzz: self}
   249  		log.Trace(fmt.Sprintf("<- peer addresses: %v", req))
   250  		self.hive.HandlePeersMsg(&req, &peer{bzz: self})
   251  
   252  	case syncRequestMsg:
   253  		var req syncRequestMsgData
   254  		if err := msg.Decode(&req); err != nil {
   255  			return fmt.Errorf("<- %v: %v", msg, err)
   256  		}
   257  		log.Debug(fmt.Sprintf("<- sync request: %v", req))
   258  		self.lastActive = time.Now()
   259  		self.sync(req.SyncState)
   260  
   261  	case unsyncedKeysMsg:
   262  		// coming from parent node offering
   263  		var req unsyncedKeysMsgData
   264  		if err := msg.Decode(&req); err != nil {
   265  			return fmt.Errorf("<- %v: %v", msg, err)
   266  		}
   267  		log.Debug(fmt.Sprintf("<- unsynced keys : %s", req.String()))
   268  		err := self.storage.HandleUnsyncedKeysMsg(&req, &peer{bzz: self})
   269  		self.lastActive = time.Now()
   270  		if err != nil {
   271  			return fmt.Errorf("<- %v: %v", msg, err)
   272  		}
   273  
   274  	case deliveryRequestMsg:
   275  		// response to syncKeysMsg hashes filtered not existing in db
   276  		// also relays the last synced state to the source
   277  		var req deliveryRequestMsgData
   278  		if err := msg.Decode(&req); err != nil {
   279  			return fmt.Errorf("<-msg %v: %v", msg, err)
   280  		}
   281  		log.Debug(fmt.Sprintf("<- delivery request: %s", req.String()))
   282  		err := self.storage.HandleDeliveryRequestMsg(&req, &peer{bzz: self})
   283  		self.lastActive = time.Now()
   284  		if err != nil {
   285  			return fmt.Errorf("<- %v: %v", msg, err)
   286  		}
   287  
   288  	case paymentMsg:
   289  		// swap protocol message for payment, Units paid for, Cheque paid with
   290  		if self.swapEnabled {
   291  			var req paymentMsgData
   292  			if err := msg.Decode(&req); err != nil {
   293  				return fmt.Errorf("<- %v: %v", msg, err)
   294  			}
   295  			log.Debug(fmt.Sprintf("<- payment: %s", req.String()))
   296  			self.swap.Receive(int(req.Units), req.Promise)
   297  		}
   298  
   299  	default:
   300  		// no other message is allowed
   301  		return fmt.Errorf("invalid message code: %v", msg.Code)
   302  	}
   303  	return nil
   304  }
   305  
   306  func (self *bzz) handleStatus() (err error) {
   307  
   308  	handshake := &statusMsgData{
   309  		Version:   uint64(Version),
   310  		ID:        "honey",
   311  		Addr:      self.selfAddr(),
   312  		NetworkId: self.NetworkId,
   313  		Swap: &bzzswap.SwapProfile{
   314  			Profile:    self.swapParams.Profile,
   315  			PayProfile: self.swapParams.PayProfile,
   316  		},
   317  	}
   318  
   319  	err = p2p.Send(self.rw, statusMsg, handshake)
   320  	if err != nil {
   321  		return err
   322  	}
   323  
   324  	// read and handle remote status
   325  	var msg p2p.Msg
   326  	msg, err = self.rw.ReadMsg()
   327  	if err != nil {
   328  		return err
   329  	}
   330  
   331  	if msg.Code != statusMsg {
   332  		return fmt.Errorf("first msg has code %x (!= %x)", msg.Code, statusMsg)
   333  	}
   334  
   335  	if msg.Size > ProtocolMaxMsgSize {
   336  		return fmt.Errorf("message too long: %v > %v", msg.Size, ProtocolMaxMsgSize)
   337  	}
   338  
   339  	var status statusMsgData
   340  	if err := msg.Decode(&status); err != nil {
   341  		return fmt.Errorf("<- %v: %v", msg, err)
   342  	}
   343  
   344  	if status.NetworkId != self.NetworkId {
   345  		return fmt.Errorf("network id mismatch: %d (!= %d)", status.NetworkId, self.NetworkId)
   346  	}
   347  
   348  	if Version != status.Version {
   349  		return fmt.Errorf("protocol version mismatch: %d (!= %d)", status.Version, Version)
   350  	}
   351  
   352  	self.remoteAddr = self.peerAddr(status.Addr)
   353  	log.Trace(fmt.Sprintf("self: advertised IP: %v, peer advertised: %v, local address: %v\npeer: advertised IP: %v, remote address: %v\n", self.selfAddr(), self.remoteAddr, self.peer.LocalAddr(), status.Addr.IP, self.peer.RemoteAddr()))
   354  
   355  	if self.swapEnabled {
   356  		// set remote profile for accounting
   357  		self.swap, err = bzzswap.NewSwap(self.swapParams, status.Swap, self.backend, self)
   358  		if err != nil {
   359  			return err
   360  		}
   361  	}
   362  
   363  	log.Info(fmt.Sprintf("Peer %08x is capable (%d/%d)", self.remoteAddr.Addr[:4], status.Version, status.NetworkId))
   364  	err = self.hive.addPeer(&peer{bzz: self})
   365  	if err != nil {
   366  		return err
   367  	}
   368  
   369  	// hive sets syncstate so sync should start after node added
   370  	log.Info(fmt.Sprintf("syncronisation request sent with %v", self.syncState))
   371  	self.syncRequest()
   372  
   373  	return nil
   374  }
   375  
   376  func (self *bzz) sync(state *syncState) error {
   377  	// syncer setup
   378  	if self.syncer != nil {
   379  		return errors.New("sync request can only be sent once")
   380  	}
   381  
   382  	cnt := self.dbAccess.counter()
   383  	remoteaddr := self.remoteAddr.Addr
   384  	start, stop := self.hive.kad.KeyRange(remoteaddr)
   385  
   386  	// an explicitly received nil syncstate disables syncronisation
   387  	if state == nil {
   388  		self.syncEnabled = false
   389  		log.Warn(fmt.Sprintf("syncronisation disabled for peer %v", self))
   390  		state = &syncState{DbSyncState: &storage.DbSyncState{}, Synced: true}
   391  	} else {
   392  		state.synced = make(chan bool)
   393  		state.SessionAt = cnt
   394  		if storage.IsZeroKey(state.Stop) && state.Synced {
   395  			state.Start = storage.Key(start[:])
   396  			state.Stop = storage.Key(stop[:])
   397  		}
   398  		log.Debug(fmt.Sprintf("syncronisation requested by peer %v at state %v", self, state))
   399  	}
   400  	var err error
   401  	self.syncer, err = newSyncer(
   402  		self.requestDb,
   403  		storage.Key(remoteaddr[:]),
   404  		self.dbAccess,
   405  		self.unsyncedKeys, self.store,
   406  		self.syncParams, state, func() bool { return self.syncEnabled },
   407  	)
   408  	if err != nil {
   409  		return nil
   410  	}
   411  	log.Trace(fmt.Sprintf("syncer set for peer %v", self))
   412  	return nil
   413  }
   414  
   415  func (self *bzz) String() string {
   416  	return self.remoteAddr.String()
   417  }
   418  
   419  // repair reported address if IP missing
   420  func (self *bzz) peerAddr(base *peerAddr) *peerAddr {
   421  	if base.IP.IsUnspecified() {
   422  		host, _, _ := net.SplitHostPort(self.peer.RemoteAddr().String())
   423  		base.IP = net.ParseIP(host)
   424  	}
   425  	return base
   426  }
   427  
   428  // returns self advertised node connection info (listening address w enodes)
   429  // IP will get repaired on the other end if missing
   430  // or resolved via ID by discovery at dialout
   431  func (self *bzz) selfAddr() *peerAddr {
   432  	id := self.hive.id
   433  	host, port, _ := net.SplitHostPort(self.hive.listenAddr())
   434  	intport, _ := strconv.Atoi(port)
   435  	addr := &peerAddr{
   436  		Addr: self.hive.addr,
   437  		ID:   id[:],
   438  		IP:   net.ParseIP(host),
   439  		Port: uint16(intport),
   440  	}
   441  	return addr
   442  }
   443  
   444  // outgoing messages
   445  // send retrieveRequestMsg
   446  func (self *bzz) retrieve(req *retrieveRequestMsgData) error {
   447  	return self.send(retrieveRequestMsg, req)
   448  }
   449  
   450  // send storeRequestMsg
   451  func (self *bzz) store(req *storeRequestMsgData) error {
   452  	return self.send(storeRequestMsg, req)
   453  }
   454  
   455  func (self *bzz) syncRequest() error {
   456  	req := &syncRequestMsgData{}
   457  	if self.hive.syncEnabled {
   458  		log.Debug(fmt.Sprintf("syncronisation request to peer %v at state %v", self, self.syncState))
   459  		req.SyncState = self.syncState
   460  	}
   461  	if self.syncState == nil {
   462  		log.Warn(fmt.Sprintf("syncronisation disabled for peer %v at state %v", self, self.syncState))
   463  	}
   464  	return self.send(syncRequestMsg, req)
   465  }
   466  
   467  // queue storeRequestMsg in request db
   468  func (self *bzz) deliveryRequest(reqs []*syncRequest) error {
   469  	req := &deliveryRequestMsgData{
   470  		Deliver: reqs,
   471  	}
   472  	return self.send(deliveryRequestMsg, req)
   473  }
   474  
   475  // batch of syncRequests to send off
   476  func (self *bzz) unsyncedKeys(reqs []*syncRequest, state *syncState) error {
   477  	req := &unsyncedKeysMsgData{
   478  		Unsynced: reqs,
   479  		State:    state,
   480  	}
   481  	return self.send(unsyncedKeysMsg, req)
   482  }
   483  
   484  // send paymentMsg
   485  func (self *bzz) Pay(units int, promise swap.Promise) {
   486  	req := &paymentMsgData{uint(units), promise.(*chequebook.Cheque)}
   487  	self.payment(req)
   488  }
   489  
   490  // send paymentMsg
   491  func (self *bzz) payment(req *paymentMsgData) error {
   492  	return self.send(paymentMsg, req)
   493  }
   494  
   495  // sends peersMsg
   496  func (self *bzz) peers(req *peersMsgData) error {
   497  	return self.send(peersMsg, req)
   498  }
   499  
   500  func (self *bzz) send(msg uint64, data interface{}) error {
   501  	if self.hive.blockWrite {
   502  		return fmt.Errorf("network write blocked")
   503  	}
   504  	log.Trace(fmt.Sprintf("-> %v: %v (%T) to %v", msg, data, data, self))
   505  	err := p2p.Send(self.rw, msg, data)
   506  	if err != nil {
   507  		self.Drop()
   508  	}
   509  	return err
   510  }