github.com/jincm/wesharechain@v0.0.0-20210122032815-1537409ce26a/chain/swarm/pss/pss.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package pss
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"crypto/ecdsa"
    23  	"crypto/rand"
    24  	"errors"
    25  	"fmt"
    26  	"hash"
    27  	"sync"
    28  	"time"
    29  
    30  	"github.com/ethereum/go-ethereum/common"
    31  	"github.com/ethereum/go-ethereum/crypto"
    32  	"github.com/ethereum/go-ethereum/metrics"
    33  	"github.com/ethereum/go-ethereum/p2p"
    34  	"github.com/ethereum/go-ethereum/p2p/enode"
    35  	"github.com/ethereum/go-ethereum/p2p/protocols"
    36  	"github.com/ethereum/go-ethereum/rpc"
    37  	"github.com/ethereum/go-ethereum/swarm/log"
    38  	"github.com/ethereum/go-ethereum/swarm/network"
    39  	"github.com/ethereum/go-ethereum/swarm/pot"
    40  	"github.com/ethereum/go-ethereum/swarm/storage"
    41  	whisper "github.com/ethereum/go-ethereum/whisper/whisperv6"
    42  	"golang.org/x/crypto/sha3"
    43  )
    44  
    45  const (
    46  	defaultPaddingByteSize     = 16
    47  	DefaultMsgTTL              = time.Second * 120
    48  	defaultDigestCacheTTL      = time.Second * 10
    49  	defaultSymKeyCacheCapacity = 512
    50  	digestLength               = 32 // byte length of digest used for pss cache (currently same as swarm chunk hash)
    51  	defaultWhisperWorkTime     = 3
    52  	defaultWhisperPoW          = 0.0000000001
    53  	defaultMaxMsgSize          = 1024 * 1024
    54  	defaultCleanInterval       = time.Second * 60 * 10
    55  	defaultOutboxCapacity      = 100000
    56  	pssProtocolName            = "pss"
    57  	pssVersion                 = 2
    58  	hasherCount                = 8
    59  )
    60  
    61  var (
    62  	addressLength = len(pot.Address{})
    63  )
    64  
    65  // cache is used for preventing backwards routing
    66  // will also be instrumental in flood guard mechanism
    67  // and mailbox implementation
    68  type pssCacheEntry struct {
    69  	expiresAt time.Time
    70  }
    71  
    72  // abstraction to enable access to p2p.protocols.Peer.Send
    73  type senderPeer interface {
    74  	Info() *p2p.PeerInfo
    75  	ID() enode.ID
    76  	Address() []byte
    77  	Send(context.Context, interface{}) error
    78  }
    79  
    80  // per-key peer related information
    81  // member `protected` prevents garbage collection of the instance
    82  type pssPeer struct {
    83  	lastSeen  time.Time
    84  	address   PssAddress
    85  	protected bool
    86  }
    87  
    88  // Pss configuration parameters
    89  type PssParams struct {
    90  	MsgTTL              time.Duration
    91  	CacheTTL            time.Duration
    92  	privateKey          *ecdsa.PrivateKey
    93  	SymKeyCacheCapacity int
    94  	AllowRaw            bool // If true, enables sending and receiving messages without builtin pss encryption
    95  }
    96  
    97  // Sane defaults for Pss
    98  func NewPssParams() *PssParams {
    99  	return &PssParams{
   100  		MsgTTL:              DefaultMsgTTL,
   101  		CacheTTL:            defaultDigestCacheTTL,
   102  		SymKeyCacheCapacity: defaultSymKeyCacheCapacity,
   103  	}
   104  }
   105  
   106  func (params *PssParams) WithPrivateKey(privatekey *ecdsa.PrivateKey) *PssParams {
   107  	params.privateKey = privatekey
   108  	return params
   109  }
   110  
   111  // Toplevel pss object, takes care of message sending, receiving, decryption and encryption, message handler dispatchers and message forwarding.
   112  //
   113  // Implements node.Service
   114  type Pss struct {
   115  	*network.Kademlia // we can get the Kademlia address from this
   116  	*KeyStore
   117  
   118  	privateKey *ecdsa.PrivateKey // pss can have it's own independent key
   119  	auxAPIs    []rpc.API         // builtins (handshake, test) can add APIs
   120  
   121  	// sending and forwarding
   122  	fwdPool         map[string]*protocols.Peer // keep track of all peers sitting on the pssmsg routing layer
   123  	fwdPoolMu       sync.RWMutex
   124  	fwdCache        map[pssDigest]pssCacheEntry // checksum of unique fields from pssmsg mapped to expiry, cache to determine whether to drop msg
   125  	fwdCacheMu      sync.RWMutex
   126  	cacheTTL        time.Duration // how long to keep messages in fwdCache (not implemented)
   127  	msgTTL          time.Duration
   128  	paddingByteSize int
   129  	capstring       string
   130  	outbox          chan *PssMsg
   131  
   132  	// message handling
   133  	handlers           map[Topic]map[*handler]bool // topic and version based pss payload handlers. See pss.Handle()
   134  	handlersMu         sync.RWMutex
   135  	hashPool           sync.Pool
   136  	topicHandlerCaps   map[Topic]*handlerCaps // caches capabilities of each topic's handlers
   137  	topicHandlerCapsMu sync.RWMutex
   138  
   139  	// process
   140  	quitC chan struct{}
   141  }
   142  
   143  func (p *Pss) String() string {
   144  	return fmt.Sprintf("pss: addr %x, pubkey %v", p.BaseAddr(), common.ToHex(crypto.FromECDSAPub(&p.privateKey.PublicKey)))
   145  }
   146  
   147  // Creates a new Pss instance.
   148  //
   149  // In addition to params, it takes a swarm network Kademlia
   150  // and a FileStore storage for message cache storage.
   151  func NewPss(k *network.Kademlia, params *PssParams) (*Pss, error) {
   152  	if params.privateKey == nil {
   153  		return nil, errors.New("missing private key for pss")
   154  	}
   155  	cap := p2p.Cap{
   156  		Name:    pssProtocolName,
   157  		Version: pssVersion,
   158  	}
   159  	ps := &Pss{
   160  		Kademlia: k,
   161  		KeyStore: loadKeyStore(),
   162  
   163  		privateKey: params.privateKey,
   164  		quitC:      make(chan struct{}),
   165  
   166  		fwdPool:         make(map[string]*protocols.Peer),
   167  		fwdCache:        make(map[pssDigest]pssCacheEntry),
   168  		cacheTTL:        params.CacheTTL,
   169  		msgTTL:          params.MsgTTL,
   170  		paddingByteSize: defaultPaddingByteSize,
   171  		capstring:       cap.String(),
   172  		outbox:          make(chan *PssMsg, defaultOutboxCapacity),
   173  
   174  		handlers:         make(map[Topic]map[*handler]bool),
   175  		topicHandlerCaps: make(map[Topic]*handlerCaps),
   176  
   177  		hashPool: sync.Pool{
   178  			New: func() interface{} {
   179  				return sha3.NewLegacyKeccak256()
   180  			},
   181  		},
   182  	}
   183  
   184  	for i := 0; i < hasherCount; i++ {
   185  		hashfunc := storage.MakeHashFunc(storage.DefaultHash)()
   186  		ps.hashPool.Put(hashfunc)
   187  	}
   188  
   189  	return ps, nil
   190  }
   191  
   192  /////////////////////////////////////////////////////////////////////
   193  // SECTION: node.Service interface
   194  /////////////////////////////////////////////////////////////////////
   195  
   196  func (p *Pss) Start(srv *p2p.Server) error {
   197  	go func() {
   198  		ticker := time.NewTicker(defaultCleanInterval)
   199  		cacheTicker := time.NewTicker(p.cacheTTL)
   200  		defer ticker.Stop()
   201  		defer cacheTicker.Stop()
   202  		for {
   203  			select {
   204  			case <-cacheTicker.C:
   205  				p.cleanFwdCache()
   206  			case <-ticker.C:
   207  				p.cleanKeys()
   208  			case <-p.quitC:
   209  				return
   210  			}
   211  		}
   212  	}()
   213  	go func() {
   214  		for {
   215  			select {
   216  			case msg := <-p.outbox:
   217  				err := p.forward(msg)
   218  				if err != nil {
   219  					log.Error(err.Error())
   220  					metrics.GetOrRegisterCounter("pss.forward.err", nil).Inc(1)
   221  				}
   222  			case <-p.quitC:
   223  				return
   224  			}
   225  		}
   226  	}()
   227  	log.Info("Started Pss")
   228  	log.Info("Loaded EC keys", "pubkey", common.ToHex(crypto.FromECDSAPub(p.PublicKey())), "secp256", common.ToHex(crypto.CompressPubkey(p.PublicKey())))
   229  	return nil
   230  }
   231  
   232  func (p *Pss) Stop() error {
   233  	log.Info("Pss shutting down")
   234  	close(p.quitC)
   235  	return nil
   236  }
   237  
   238  var pssSpec = &protocols.Spec{
   239  	Name:       pssProtocolName,
   240  	Version:    pssVersion,
   241  	MaxMsgSize: defaultMaxMsgSize,
   242  	Messages: []interface{}{
   243  		PssMsg{},
   244  	},
   245  }
   246  
   247  func (p *Pss) Protocols() []p2p.Protocol {
   248  	return []p2p.Protocol{
   249  		{
   250  			Name:    pssSpec.Name,
   251  			Version: pssSpec.Version,
   252  			Length:  pssSpec.Length(),
   253  			Run:     p.Run,
   254  		},
   255  	}
   256  }
   257  
   258  func (p *Pss) Run(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
   259  	pp := protocols.NewPeer(peer, rw, pssSpec)
   260  	p.fwdPoolMu.Lock()
   261  	p.fwdPool[peer.Info().ID] = pp
   262  	p.fwdPoolMu.Unlock()
   263  	return pp.Run(p.handlePssMsg)
   264  }
   265  
   266  func (p *Pss) APIs() []rpc.API {
   267  	apis := []rpc.API{
   268  		{
   269  			Namespace: "pss",
   270  			Version:   "1.0",
   271  			Service:   NewAPI(p),
   272  			Public:    true,
   273  		},
   274  	}
   275  	apis = append(apis, p.auxAPIs...)
   276  	return apis
   277  }
   278  
   279  // add API methods to the pss API
   280  // must be run before node is started
   281  func (p *Pss) addAPI(api rpc.API) {
   282  	p.auxAPIs = append(p.auxAPIs, api)
   283  }
   284  
   285  // Returns the swarm Kademlia address of the pss node
   286  func (p *Pss) BaseAddr() []byte {
   287  	return p.Kademlia.BaseAddr()
   288  }
   289  
   290  // Returns the pss node's public key
   291  func (p *Pss) PublicKey() *ecdsa.PublicKey {
   292  	return &p.privateKey.PublicKey
   293  }
   294  
   295  /////////////////////////////////////////////////////////////////////
   296  // SECTION: Message handling
   297  /////////////////////////////////////////////////////////////////////
   298  
   299  func (p *Pss) getTopicHandlerCaps(topic Topic) (hc *handlerCaps, found bool) {
   300  	p.topicHandlerCapsMu.RLock()
   301  	defer p.topicHandlerCapsMu.RUnlock()
   302  	hc, found = p.topicHandlerCaps[topic]
   303  	return
   304  }
   305  
   306  func (p *Pss) setTopicHandlerCaps(topic Topic, hc *handlerCaps) {
   307  	p.topicHandlerCapsMu.Lock()
   308  	defer p.topicHandlerCapsMu.Unlock()
   309  	p.topicHandlerCaps[topic] = hc
   310  }
   311  
   312  // Links a handler function to a Topic
   313  //
   314  // All incoming messages with an envelope Topic matching the
   315  // topic specified will be passed to the given Handler function.
   316  //
   317  // There may be an arbitrary number of handler functions per topic.
   318  //
   319  // Returns a deregister function which needs to be called to
   320  // deregister the handler,
   321  func (p *Pss) Register(topic *Topic, hndlr *handler) func() {
   322  	p.handlersMu.Lock()
   323  	defer p.handlersMu.Unlock()
   324  	handlers := p.handlers[*topic]
   325  	if handlers == nil {
   326  		handlers = make(map[*handler]bool)
   327  		p.handlers[*topic] = handlers
   328  		log.Debug("registered handler", "capabilities", hndlr.caps)
   329  	}
   330  	if hndlr.caps == nil {
   331  		hndlr.caps = &handlerCaps{}
   332  	}
   333  	handlers[hndlr] = true
   334  
   335  	capabilities, ok := p.getTopicHandlerCaps(*topic)
   336  	if !ok {
   337  		capabilities = &handlerCaps{}
   338  		p.setTopicHandlerCaps(*topic, capabilities)
   339  	}
   340  
   341  	if hndlr.caps.raw {
   342  		capabilities.raw = true
   343  	}
   344  	if hndlr.caps.prox {
   345  		capabilities.prox = true
   346  	}
   347  	return func() { p.deregister(topic, hndlr) }
   348  }
   349  
   350  func (p *Pss) deregister(topic *Topic, hndlr *handler) {
   351  	p.handlersMu.Lock()
   352  	defer p.handlersMu.Unlock()
   353  	handlers := p.handlers[*topic]
   354  	if len(handlers) > 1 {
   355  		delete(p.handlers, *topic)
   356  		// topic caps might have changed now that a handler is gone
   357  		caps := &handlerCaps{}
   358  		for h := range handlers {
   359  			if h.caps.raw {
   360  				caps.raw = true
   361  			}
   362  			if h.caps.prox {
   363  				caps.prox = true
   364  			}
   365  		}
   366  		p.setTopicHandlerCaps(*topic, caps)
   367  		return
   368  	}
   369  	delete(handlers, hndlr)
   370  }
   371  
   372  // Filters incoming messages for processing or forwarding.
   373  // Check if address partially matches
   374  // If yes, it CAN be for us, and we process it
   375  // Only passes error to pss protocol handler if payload is not valid pssmsg
   376  func (p *Pss) handlePssMsg(ctx context.Context, msg interface{}) error {
   377  	metrics.GetOrRegisterCounter("pss.handlepssmsg", nil).Inc(1)
   378  	pssmsg, ok := msg.(*PssMsg)
   379  	if !ok {
   380  		return fmt.Errorf("invalid message type. Expected *PssMsg, got %T ", msg)
   381  	}
   382  	log.Trace("handler", "self", label(p.Kademlia.BaseAddr()), "topic", label(pssmsg.Payload.Topic[:]))
   383  	if int64(pssmsg.Expire) < time.Now().Unix() {
   384  		metrics.GetOrRegisterCounter("pss.expire", nil).Inc(1)
   385  		log.Warn("pss filtered expired message", "from", common.ToHex(p.Kademlia.BaseAddr()), "to", common.ToHex(pssmsg.To))
   386  		return nil
   387  	}
   388  	if p.checkFwdCache(pssmsg) {
   389  		log.Trace("pss relay block-cache match (process)", "from", common.ToHex(p.Kademlia.BaseAddr()), "to", (common.ToHex(pssmsg.To)))
   390  		return nil
   391  	}
   392  	p.addFwdCache(pssmsg)
   393  
   394  	psstopic := Topic(pssmsg.Payload.Topic)
   395  
   396  	// raw is simplest handler contingency to check, so check that first
   397  	var isRaw bool
   398  	if pssmsg.isRaw() {
   399  		if capabilities, ok := p.getTopicHandlerCaps(psstopic); ok {
   400  			if !capabilities.raw {
   401  				log.Debug("No handler for raw message", "topic", psstopic)
   402  				return nil
   403  			}
   404  		}
   405  		isRaw = true
   406  	}
   407  
   408  	// check if we can be recipient:
   409  	// - no prox handler on message and partial address matches
   410  	// - prox handler on message and we are in prox regardless of partial address match
   411  	// store this result so we don't calculate again on every handler
   412  	var isProx bool
   413  	if capabilities, ok := p.getTopicHandlerCaps(psstopic); ok {
   414  		isProx = capabilities.prox
   415  	}
   416  	isRecipient := p.isSelfPossibleRecipient(pssmsg, isProx)
   417  	if !isRecipient {
   418  		log.Trace("pss was for someone else :'( ... forwarding", "pss", common.ToHex(p.BaseAddr()), "prox", isProx)
   419  		return p.enqueue(pssmsg)
   420  	}
   421  
   422  	log.Trace("pss for us, yay! ... let's process!", "pss", common.ToHex(p.BaseAddr()), "prox", isProx, "raw", isRaw, "topic", label(pssmsg.Payload.Topic[:]))
   423  	if err := p.process(pssmsg, isRaw, isProx); err != nil {
   424  		qerr := p.enqueue(pssmsg)
   425  		if qerr != nil {
   426  			return fmt.Errorf("process fail: processerr %v, queueerr: %v", err, qerr)
   427  		}
   428  	}
   429  	return nil
   430  }
   431  
   432  // Entry point to processing a message for which the current node can be the intended recipient.
   433  // Attempts symmetric and asymmetric decryption with stored keys.
   434  // Dispatches message to all handlers matching the message topic
   435  func (p *Pss) process(pssmsg *PssMsg, raw bool, prox bool) error {
   436  	metrics.GetOrRegisterCounter("pss.process", nil).Inc(1)
   437  
   438  	var err error
   439  	var recvmsg *whisper.ReceivedMessage
   440  	var payload []byte
   441  	var from PssAddress
   442  	var asymmetric bool
   443  	var keyid string
   444  	var keyFunc func(envelope *whisper.Envelope) (*whisper.ReceivedMessage, string, PssAddress, error)
   445  
   446  	envelope := pssmsg.Payload
   447  	psstopic := Topic(envelope.Topic)
   448  
   449  	if raw {
   450  		payload = pssmsg.Payload.Data
   451  	} else {
   452  		if pssmsg.isSym() {
   453  			keyFunc = p.processSym
   454  		} else {
   455  			asymmetric = true
   456  			keyFunc = p.processAsym
   457  		}
   458  
   459  		recvmsg, keyid, from, err = keyFunc(envelope)
   460  		if err != nil {
   461  			return errors.New("Decryption failed")
   462  		}
   463  		payload = recvmsg.Payload
   464  	}
   465  
   466  	if len(pssmsg.To) < addressLength {
   467  		if err := p.enqueue(pssmsg); err != nil {
   468  			return err
   469  		}
   470  	}
   471  	p.executeHandlers(psstopic, payload, from, raw, prox, asymmetric, keyid)
   472  
   473  	return nil
   474  }
   475  
   476  // copy all registered handlers for respective topic in order to avoid data race or deadlock
   477  func (p *Pss) getHandlers(topic Topic) (ret []*handler) {
   478  	p.handlersMu.RLock()
   479  	defer p.handlersMu.RUnlock()
   480  	for k := range p.handlers[topic] {
   481  		ret = append(ret, k)
   482  	}
   483  	return ret
   484  }
   485  
   486  func (p *Pss) executeHandlers(topic Topic, payload []byte, from PssAddress, raw bool, prox bool, asymmetric bool, keyid string) {
   487  	handlers := p.getHandlers(topic)
   488  	peer := p2p.NewPeer(enode.ID{}, fmt.Sprintf("%x", from), []p2p.Cap{})
   489  	for _, h := range handlers {
   490  		if !h.caps.raw && raw {
   491  			log.Warn("norawhandler")
   492  			continue
   493  		}
   494  		if !h.caps.prox && prox {
   495  			log.Warn("noproxhandler")
   496  			continue
   497  		}
   498  		err := (h.f)(payload, peer, asymmetric, keyid)
   499  		if err != nil {
   500  			log.Warn("Pss handler failed", "err", err)
   501  		}
   502  	}
   503  }
   504  
   505  // will return false if using partial address
   506  func (p *Pss) isSelfRecipient(msg *PssMsg) bool {
   507  	return bytes.Equal(msg.To, p.Kademlia.BaseAddr())
   508  }
   509  
   510  // test match of leftmost bytes in given message to node's Kademlia address
   511  func (p *Pss) isSelfPossibleRecipient(msg *PssMsg, prox bool) bool {
   512  	local := p.Kademlia.BaseAddr()
   513  
   514  	// if a partial address matches we are possible recipient regardless of prox
   515  	// if not and prox is not set, we are surely not
   516  	if bytes.Equal(msg.To, local[:len(msg.To)]) {
   517  
   518  		return true
   519  	} else if !prox {
   520  		return false
   521  	}
   522  
   523  	depth := p.Kademlia.NeighbourhoodDepth()
   524  	po, _ := network.Pof(p.Kademlia.BaseAddr(), msg.To, 0)
   525  	log.Trace("selfpossible", "po", po, "depth", depth)
   526  
   527  	return depth <= po
   528  }
   529  
   530  /////////////////////////////////////////////////////////////////////
   531  // SECTION: Message sending
   532  /////////////////////////////////////////////////////////////////////
   533  
   534  func (p *Pss) enqueue(msg *PssMsg) error {
   535  	select {
   536  	case p.outbox <- msg:
   537  		return nil
   538  	default:
   539  	}
   540  
   541  	metrics.GetOrRegisterCounter("pss.enqueue.outbox.full", nil).Inc(1)
   542  	return errors.New("outbox full")
   543  }
   544  
   545  // Send a raw message (any encryption is responsibility of calling client)
   546  //
   547  // Will fail if raw messages are disallowed
   548  func (p *Pss) SendRaw(address PssAddress, topic Topic, msg []byte) error {
   549  	if err := validateAddress(address); err != nil {
   550  		return err
   551  	}
   552  	pssMsgParams := &msgParams{
   553  		raw: true,
   554  	}
   555  	payload := &whisper.Envelope{
   556  		Data:  msg,
   557  		Topic: whisper.TopicType(topic),
   558  	}
   559  	pssMsg := newPssMsg(pssMsgParams)
   560  	pssMsg.To = address
   561  	pssMsg.Expire = uint32(time.Now().Add(p.msgTTL).Unix())
   562  	pssMsg.Payload = payload
   563  	p.addFwdCache(pssMsg)
   564  	err := p.enqueue(pssMsg)
   565  	if err != nil {
   566  		return err
   567  	}
   568  
   569  	// if we have a proxhandler on this topic
   570  	// also deliver message to ourselves
   571  	if capabilities, ok := p.getTopicHandlerCaps(topic); ok {
   572  		if p.isSelfPossibleRecipient(pssMsg, true) && capabilities.prox {
   573  			return p.process(pssMsg, true, true)
   574  		}
   575  	}
   576  	return nil
   577  }
   578  
   579  // Send a message using symmetric encryption
   580  //
   581  // Fails if the key id does not match any of the stored symmetric keys
   582  func (p *Pss) SendSym(symkeyid string, topic Topic, msg []byte) error {
   583  	symkey, err := p.GetSymmetricKey(symkeyid)
   584  	if err != nil {
   585  		return fmt.Errorf("missing valid send symkey %s: %v", symkeyid, err)
   586  	}
   587  	psp, ok := p.getPeerSym(symkeyid, topic)
   588  	if !ok {
   589  		return fmt.Errorf("invalid topic '%s' for symkey '%s'", topic.String(), symkeyid)
   590  	}
   591  	return p.send(psp.address, topic, msg, false, symkey)
   592  }
   593  
   594  // Send a message using asymmetric encryption
   595  //
   596  // Fails if the key id does not match any in of the stored public keys
   597  func (p *Pss) SendAsym(pubkeyid string, topic Topic, msg []byte) error {
   598  	if _, err := crypto.UnmarshalPubkey(common.FromHex(pubkeyid)); err != nil {
   599  		return fmt.Errorf("Cannot unmarshal pubkey: %x", pubkeyid)
   600  	}
   601  	psp, ok := p.getPeerPub(pubkeyid, topic)
   602  	if !ok {
   603  		return fmt.Errorf("invalid topic '%s' for pubkey '%s'", topic.String(), pubkeyid)
   604  	}
   605  	return p.send(psp.address, topic, msg, true, common.FromHex(pubkeyid))
   606  }
   607  
   608  // Send is payload agnostic, and will accept any byte slice as payload
   609  // It generates an whisper envelope for the specified recipient and topic,
   610  // and wraps the message payload in it.
   611  // TODO: Implement proper message padding
   612  func (p *Pss) send(to []byte, topic Topic, msg []byte, asymmetric bool, key []byte) error {
   613  	metrics.GetOrRegisterCounter("pss.send", nil).Inc(1)
   614  
   615  	if key == nil || bytes.Equal(key, []byte{}) {
   616  		return fmt.Errorf("Zero length key passed to pss send")
   617  	}
   618  	padding := make([]byte, p.paddingByteSize)
   619  	c, err := rand.Read(padding)
   620  	if err != nil {
   621  		return err
   622  	} else if c < p.paddingByteSize {
   623  		return fmt.Errorf("invalid padding length: %d", c)
   624  	}
   625  	wparams := &whisper.MessageParams{
   626  		TTL:      defaultWhisperTTL,
   627  		Src:      p.privateKey,
   628  		Topic:    whisper.TopicType(topic),
   629  		WorkTime: defaultWhisperWorkTime,
   630  		PoW:      defaultWhisperPoW,
   631  		Payload:  msg,
   632  		Padding:  padding,
   633  	}
   634  	if asymmetric {
   635  		pk, err := crypto.UnmarshalPubkey(key)
   636  		if err != nil {
   637  			return fmt.Errorf("Cannot unmarshal pubkey: %x", key)
   638  		}
   639  		wparams.Dst = pk
   640  	} else {
   641  		wparams.KeySym = key
   642  	}
   643  	// set up outgoing message container, which does encryption and envelope wrapping
   644  	woutmsg, err := whisper.NewSentMessage(wparams)
   645  	if err != nil {
   646  		return fmt.Errorf("failed to generate whisper message encapsulation: %v", err)
   647  	}
   648  	// performs encryption.
   649  	// Does NOT perform / performs negligible PoW due to very low difficulty setting
   650  	// after this the message is ready for sending
   651  	envelope, err := woutmsg.Wrap(wparams)
   652  	if err != nil {
   653  		return fmt.Errorf("failed to perform whisper encryption: %v", err)
   654  	}
   655  	log.Trace("pssmsg whisper done", "env", envelope, "wparams payload", common.ToHex(wparams.Payload), "to", common.ToHex(to), "asym", asymmetric, "key", common.ToHex(key))
   656  
   657  	// prepare for devp2p transport
   658  	pssMsgParams := &msgParams{
   659  		sym: !asymmetric,
   660  	}
   661  	pssMsg := newPssMsg(pssMsgParams)
   662  	pssMsg.To = to
   663  	pssMsg.Expire = uint32(time.Now().Add(p.msgTTL).Unix())
   664  	pssMsg.Payload = envelope
   665  	err = p.enqueue(pssMsg)
   666  	if err != nil {
   667  		return err
   668  	}
   669  	if capabilities, ok := p.getTopicHandlerCaps(topic); ok {
   670  		if p.isSelfPossibleRecipient(pssMsg, true) && capabilities.prox {
   671  			return p.process(pssMsg, true, true)
   672  		}
   673  	}
   674  	return nil
   675  }
   676  
   677  // sendFunc is a helper function that tries to send a message and returns true on success.
   678  // It is set here for usage in production, and optionally overridden in tests.
   679  var sendFunc = sendMsg
   680  
   681  // tries to send a message, returns true if successful
   682  func sendMsg(p *Pss, sp *network.Peer, msg *PssMsg) bool {
   683  	var isPssEnabled bool
   684  	info := sp.Info()
   685  	for _, capability := range info.Caps {
   686  		if capability == p.capstring {
   687  			isPssEnabled = true
   688  			break
   689  		}
   690  	}
   691  	if !isPssEnabled {
   692  		log.Error("peer doesn't have matching pss capabilities, skipping", "peer", info.Name, "caps", info.Caps)
   693  		return false
   694  	}
   695  
   696  	// get the protocol peer from the forwarding peer cache
   697  	p.fwdPoolMu.RLock()
   698  	pp := p.fwdPool[sp.Info().ID]
   699  	p.fwdPoolMu.RUnlock()
   700  
   701  	err := pp.Send(context.TODO(), msg)
   702  	if err != nil {
   703  		metrics.GetOrRegisterCounter("pss.pp.send.error", nil).Inc(1)
   704  		log.Error(err.Error())
   705  	}
   706  
   707  	return err == nil
   708  }
   709  
   710  // Forwards a pss message to the peer(s) based on recipient address according to the algorithm
   711  // described below. The recipient address can be of any length, and the byte slice will be matched
   712  // to the MSB slice of the peer address of the equivalent length.
   713  //
   714  // If the recipient address (or partial address) is within the neighbourhood depth of the forwarding
   715  // node, then it will be forwarded to all the nearest neighbours of the forwarding node. In case of
   716  // partial address, it should be forwarded to all the peers matching the partial address, if there
   717  // are any; otherwise only to one peer, closest to the recipient address. In any case, if the message
   718  // forwarding fails, the node should try to forward it to the next best peer, until the message is
   719  // successfully forwarded to at least one peer.
   720  func (p *Pss) forward(msg *PssMsg) error {
   721  	metrics.GetOrRegisterCounter("pss.forward", nil).Inc(1)
   722  	sent := 0 // number of successful sends
   723  	to := make([]byte, addressLength)
   724  	copy(to[:len(msg.To)], msg.To)
   725  	neighbourhoodDepth := p.Kademlia.NeighbourhoodDepth()
   726  
   727  	// luminosity is the opposite of darkness. the more bytes are removed from the address, the higher is darkness,
   728  	// but the luminosity is less. here luminosity equals the number of bits given in the destination address.
   729  	luminosityRadius := len(msg.To) * 8
   730  
   731  	// proximity order function matching up to neighbourhoodDepth bits (po <= neighbourhoodDepth)
   732  	pof := pot.DefaultPof(neighbourhoodDepth)
   733  
   734  	// soft threshold for msg broadcast
   735  	broadcastThreshold, _ := pof(to, p.BaseAddr(), 0)
   736  	if broadcastThreshold > luminosityRadius {
   737  		broadcastThreshold = luminosityRadius
   738  	}
   739  
   740  	var onlySendOnce bool // indicates if the message should only be sent to one peer with closest address
   741  
   742  	// if measured from the recipient address as opposed to the base address (see Kademlia.EachConn
   743  	// call below), then peers that fall in the same proximity bin as recipient address will appear
   744  	// [at least] one bit closer, but only if these additional bits are given in the recipient address.
   745  	if broadcastThreshold < luminosityRadius && broadcastThreshold < neighbourhoodDepth {
   746  		broadcastThreshold++
   747  		onlySendOnce = true
   748  	}
   749  
   750  	p.Kademlia.EachConn(to, addressLength*8, func(sp *network.Peer, po int) bool {
   751  		if po < broadcastThreshold && sent > 0 {
   752  			return false // stop iterating
   753  		}
   754  		if sendFunc(p, sp, msg) {
   755  			sent++
   756  			if onlySendOnce {
   757  				return false
   758  			}
   759  			if po == addressLength*8 {
   760  				// stop iterating if successfully sent to the exact recipient (perfect match of full address)
   761  				return false
   762  			}
   763  		}
   764  		return true
   765  	})
   766  
   767  	// if we failed to send to anyone, re-insert message in the send-queue
   768  	if sent == 0 {
   769  		log.Debug("unable to forward to any peers")
   770  		if err := p.enqueue(msg); err != nil {
   771  			metrics.GetOrRegisterCounter("pss.forward.enqueue.error", nil).Inc(1)
   772  			log.Error(err.Error())
   773  			return err
   774  		}
   775  	}
   776  
   777  	// cache the message
   778  	p.addFwdCache(msg)
   779  	return nil
   780  }
   781  
   782  /////////////////////////////////////////////////////////////////////
   783  // SECTION: Caching
   784  /////////////////////////////////////////////////////////////////////
   785  
   786  // cleanFwdCache is used to periodically remove expired entries from the forward cache
   787  func (p *Pss) cleanFwdCache() {
   788  	metrics.GetOrRegisterCounter("pss.cleanfwdcache", nil).Inc(1)
   789  	p.fwdCacheMu.Lock()
   790  	defer p.fwdCacheMu.Unlock()
   791  	for k, v := range p.fwdCache {
   792  		if v.expiresAt.Before(time.Now()) {
   793  			delete(p.fwdCache, k)
   794  		}
   795  	}
   796  }
   797  
   798  func label(b []byte) string {
   799  	return fmt.Sprintf("%04x", b[:2])
   800  }
   801  
   802  // add a message to the cache
   803  func (p *Pss) addFwdCache(msg *PssMsg) error {
   804  	metrics.GetOrRegisterCounter("pss.addfwdcache", nil).Inc(1)
   805  
   806  	var entry pssCacheEntry
   807  	var ok bool
   808  
   809  	p.fwdCacheMu.Lock()
   810  	defer p.fwdCacheMu.Unlock()
   811  
   812  	digest := p.digest(msg)
   813  	if entry, ok = p.fwdCache[digest]; !ok {
   814  		entry = pssCacheEntry{}
   815  	}
   816  	entry.expiresAt = time.Now().Add(p.cacheTTL)
   817  	p.fwdCache[digest] = entry
   818  	return nil
   819  }
   820  
   821  // check if message is in the cache
   822  func (p *Pss) checkFwdCache(msg *PssMsg) bool {
   823  	p.fwdCacheMu.Lock()
   824  	defer p.fwdCacheMu.Unlock()
   825  
   826  	digest := p.digest(msg)
   827  	entry, ok := p.fwdCache[digest]
   828  	if ok {
   829  		if entry.expiresAt.After(time.Now()) {
   830  			log.Trace("unexpired cache", "digest", fmt.Sprintf("%x", digest))
   831  			metrics.GetOrRegisterCounter("pss.checkfwdcache.unexpired", nil).Inc(1)
   832  			return true
   833  		}
   834  		metrics.GetOrRegisterCounter("pss.checkfwdcache.expired", nil).Inc(1)
   835  	}
   836  	return false
   837  }
   838  
   839  // Digest of message
   840  func (p *Pss) digest(msg *PssMsg) pssDigest {
   841  	return p.digestBytes(msg.serialize())
   842  }
   843  
   844  func (p *Pss) digestBytes(msg []byte) pssDigest {
   845  	hasher := p.hashPool.Get().(hash.Hash)
   846  	defer p.hashPool.Put(hasher)
   847  	hasher.Reset()
   848  	hasher.Write(msg)
   849  	digest := pssDigest{}
   850  	key := hasher.Sum(nil)
   851  	copy(digest[:], key[:digestLength])
   852  	return digest
   853  }
   854  
   855  func validateAddress(addr PssAddress) error {
   856  	if len(addr) > addressLength {
   857  		return errors.New("address too long")
   858  	}
   859  	return nil
   860  }