github.com/sberex/go-sberex@v1.8.2-0.20181113200658-ed96ac38f7d7/p2p/discv5/database.go (about)

     1  // This file is part of the go-sberex library. The go-sberex library is 
     2  // free software: you can redistribute it and/or modify it under the terms 
     3  // of the GNU Lesser General Public License as published by the Free 
     4  // Software Foundation, either version 3 of the License, or (at your option)
     5  // any later version.
     6  //
     7  // The go-sberex library is distributed in the hope that it will be useful, 
     8  // but WITHOUT ANY WARRANTY; without even the implied warranty of
     9  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser 
    10  // General Public License <http://www.gnu.org/licenses/> for more details.
    11  
    12  // Contains the node database, storing previously seen nodes and any collected
    13  // metadata about them for QoS purposes.
    14  
    15  package discv5
    16  
    17  import (
    18  	"bytes"
    19  	"crypto/rand"
    20  	"encoding/binary"
    21  	"fmt"
    22  	"os"
    23  	"sync"
    24  	"time"
    25  
    26  	"github.com/Sberex/go-sberex/crypto"
    27  	"github.com/Sberex/go-sberex/log"
    28  	"github.com/Sberex/go-sberex/rlp"
    29  	"github.com/syndtr/goleveldb/leveldb"
    30  	"github.com/syndtr/goleveldb/leveldb/errors"
    31  	"github.com/syndtr/goleveldb/leveldb/iterator"
    32  	"github.com/syndtr/goleveldb/leveldb/opt"
    33  	"github.com/syndtr/goleveldb/leveldb/storage"
    34  	"github.com/syndtr/goleveldb/leveldb/util"
    35  )
    36  
    37  var (
    38  	nodeDBNilNodeID      = NodeID{}       // Special node ID to use as a nil element.
    39  	nodeDBNodeExpiration = 24 * time.Hour // Time after which an unseen node should be dropped.
    40  	nodeDBCleanupCycle   = time.Hour      // Time period for running the expiration task.
    41  )
    42  
    43  // nodeDB stores all nodes we know about.
    44  type nodeDB struct {
    45  	lvl    *leveldb.DB   // Interface to the database itself
    46  	self   NodeID        // Own node id to prevent adding it into the database
    47  	runner sync.Once     // Ensures we can start at most one expirer
    48  	quit   chan struct{} // Channel to signal the expiring thread to stop
    49  }
    50  
    51  // Schema layout for the node database
    52  var (
    53  	nodeDBVersionKey = []byte("version") // Version of the database to flush if changes
    54  	nodeDBItemPrefix = []byte("n:")      // Identifier to prefix node entries with
    55  
    56  	nodeDBDiscoverRoot          = ":discover"
    57  	nodeDBDiscoverPing          = nodeDBDiscoverRoot + ":lastping"
    58  	nodeDBDiscoverPong          = nodeDBDiscoverRoot + ":lastpong"
    59  	nodeDBDiscoverFindFails     = nodeDBDiscoverRoot + ":findfail"
    60  	nodeDBDiscoverLocalEndpoint = nodeDBDiscoverRoot + ":localendpoint"
    61  	nodeDBTopicRegTickets       = ":tickets"
    62  )
    63  
    64  // newNodeDB creates a new node database for storing and retrieving infos about
    65  // known peers in the network. If no path is given, an in-memory, temporary
    66  // database is constructed.
    67  func newNodeDB(path string, version int, self NodeID) (*nodeDB, error) {
    68  	if path == "" {
    69  		return newMemoryNodeDB(self)
    70  	}
    71  	return newPersistentNodeDB(path, version, self)
    72  }
    73  
    74  // newMemoryNodeDB creates a new in-memory node database without a persistent
    75  // backend.
    76  func newMemoryNodeDB(self NodeID) (*nodeDB, error) {
    77  	db, err := leveldb.Open(storage.NewMemStorage(), nil)
    78  	if err != nil {
    79  		return nil, err
    80  	}
    81  	return &nodeDB{
    82  		lvl:  db,
    83  		self: self,
    84  		quit: make(chan struct{}),
    85  	}, nil
    86  }
    87  
    88  // newPersistentNodeDB creates/opens a leveldb backed persistent node database,
    89  // also flushing its contents in case of a version mismatch.
    90  func newPersistentNodeDB(path string, version int, self NodeID) (*nodeDB, error) {
    91  	opts := &opt.Options{OpenFilesCacheCapacity: 5}
    92  	db, err := leveldb.OpenFile(path, opts)
    93  	if _, iscorrupted := err.(*errors.ErrCorrupted); iscorrupted {
    94  		db, err = leveldb.RecoverFile(path, nil)
    95  	}
    96  	if err != nil {
    97  		return nil, err
    98  	}
    99  	// The nodes contained in the cache correspond to a certain protocol version.
   100  	// Flush all nodes if the version doesn't match.
   101  	currentVer := make([]byte, binary.MaxVarintLen64)
   102  	currentVer = currentVer[:binary.PutVarint(currentVer, int64(version))]
   103  
   104  	blob, err := db.Get(nodeDBVersionKey, nil)
   105  	switch err {
   106  	case leveldb.ErrNotFound:
   107  		// Version not found (i.e. empty cache), insert it
   108  		if err := db.Put(nodeDBVersionKey, currentVer, nil); err != nil {
   109  			db.Close()
   110  			return nil, err
   111  		}
   112  
   113  	case nil:
   114  		// Version present, flush if different
   115  		if !bytes.Equal(blob, currentVer) {
   116  			db.Close()
   117  			if err = os.RemoveAll(path); err != nil {
   118  				return nil, err
   119  			}
   120  			return newPersistentNodeDB(path, version, self)
   121  		}
   122  	}
   123  	return &nodeDB{
   124  		lvl:  db,
   125  		self: self,
   126  		quit: make(chan struct{}),
   127  	}, nil
   128  }
   129  
   130  // makeKey generates the leveldb key-blob from a node id and its particular
   131  // field of interest.
   132  func makeKey(id NodeID, field string) []byte {
   133  	if bytes.Equal(id[:], nodeDBNilNodeID[:]) {
   134  		return []byte(field)
   135  	}
   136  	return append(nodeDBItemPrefix, append(id[:], field...)...)
   137  }
   138  
   139  // splitKey tries to split a database key into a node id and a field part.
   140  func splitKey(key []byte) (id NodeID, field string) {
   141  	// If the key is not of a node, return it plainly
   142  	if !bytes.HasPrefix(key, nodeDBItemPrefix) {
   143  		return NodeID{}, string(key)
   144  	}
   145  	// Otherwise split the id and field
   146  	item := key[len(nodeDBItemPrefix):]
   147  	copy(id[:], item[:len(id)])
   148  	field = string(item[len(id):])
   149  
   150  	return id, field
   151  }
   152  
   153  // fetchInt64 retrieves an integer instance associated with a particular
   154  // database key.
   155  func (db *nodeDB) fetchInt64(key []byte) int64 {
   156  	blob, err := db.lvl.Get(key, nil)
   157  	if err != nil {
   158  		return 0
   159  	}
   160  	val, read := binary.Varint(blob)
   161  	if read <= 0 {
   162  		return 0
   163  	}
   164  	return val
   165  }
   166  
   167  // storeInt64 update a specific database entry to the current time instance as a
   168  // unix timestamp.
   169  func (db *nodeDB) storeInt64(key []byte, n int64) error {
   170  	blob := make([]byte, binary.MaxVarintLen64)
   171  	blob = blob[:binary.PutVarint(blob, n)]
   172  	return db.lvl.Put(key, blob, nil)
   173  }
   174  
   175  func (db *nodeDB) storeRLP(key []byte, val interface{}) error {
   176  	blob, err := rlp.EncodeToBytes(val)
   177  	if err != nil {
   178  		return err
   179  	}
   180  	return db.lvl.Put(key, blob, nil)
   181  }
   182  
   183  func (db *nodeDB) fetchRLP(key []byte, val interface{}) error {
   184  	blob, err := db.lvl.Get(key, nil)
   185  	if err != nil {
   186  		return err
   187  	}
   188  	err = rlp.DecodeBytes(blob, val)
   189  	if err != nil {
   190  		log.Warn(fmt.Sprintf("key %x (%T) %v", key, val, err))
   191  	}
   192  	return err
   193  }
   194  
   195  // node retrieves a node with a given id from the database.
   196  func (db *nodeDB) node(id NodeID) *Node {
   197  	var node Node
   198  	if err := db.fetchRLP(makeKey(id, nodeDBDiscoverRoot), &node); err != nil {
   199  		return nil
   200  	}
   201  	node.sha = crypto.Keccak256Hash(node.ID[:])
   202  	return &node
   203  }
   204  
   205  // updateNode inserts - potentially overwriting - a node into the peer database.
   206  func (db *nodeDB) updateNode(node *Node) error {
   207  	return db.storeRLP(makeKey(node.ID, nodeDBDiscoverRoot), node)
   208  }
   209  
   210  // deleteNode deletes all information/keys associated with a node.
   211  func (db *nodeDB) deleteNode(id NodeID) error {
   212  	deleter := db.lvl.NewIterator(util.BytesPrefix(makeKey(id, "")), nil)
   213  	for deleter.Next() {
   214  		if err := db.lvl.Delete(deleter.Key(), nil); err != nil {
   215  			return err
   216  		}
   217  	}
   218  	return nil
   219  }
   220  
   221  // ensureExpirer is a small helper method ensuring that the data expiration
   222  // mechanism is running. If the expiration goroutine is already running, this
   223  // method simply returns.
   224  //
   225  // The goal is to start the data evacuation only after the network successfully
   226  // bootstrapped itself (to prevent dumping potentially useful seed nodes). Since
   227  // it would require significant overhead to exactly trace the first successful
   228  // convergence, it's simpler to "ensure" the correct state when an appropriate
   229  // condition occurs (i.e. a successful bonding), and discard further events.
   230  func (db *nodeDB) ensureExpirer() {
   231  	db.runner.Do(func() { go db.expirer() })
   232  }
   233  
   234  // expirer should be started in a go routine, and is responsible for looping ad
   235  // infinitum and dropping stale data from the database.
   236  func (db *nodeDB) expirer() {
   237  	tick := time.NewTicker(nodeDBCleanupCycle)
   238  	defer tick.Stop()
   239  	for {
   240  		select {
   241  		case <-tick.C:
   242  			if err := db.expireNodes(); err != nil {
   243  				log.Error(fmt.Sprintf("Failed to expire nodedb items: %v", err))
   244  			}
   245  		case <-db.quit:
   246  			return
   247  		}
   248  	}
   249  }
   250  
   251  // expireNodes iterates over the database and deletes all nodes that have not
   252  // been seen (i.e. received a pong from) for some allotted time.
   253  func (db *nodeDB) expireNodes() error {
   254  	threshold := time.Now().Add(-nodeDBNodeExpiration)
   255  
   256  	// Find discovered nodes that are older than the allowance
   257  	it := db.lvl.NewIterator(nil, nil)
   258  	defer it.Release()
   259  
   260  	for it.Next() {
   261  		// Skip the item if not a discovery node
   262  		id, field := splitKey(it.Key())
   263  		if field != nodeDBDiscoverRoot {
   264  			continue
   265  		}
   266  		// Skip the node if not expired yet (and not self)
   267  		if !bytes.Equal(id[:], db.self[:]) {
   268  			if seen := db.lastPong(id); seen.After(threshold) {
   269  				continue
   270  			}
   271  		}
   272  		// Otherwise delete all associated information
   273  		db.deleteNode(id)
   274  	}
   275  	return nil
   276  }
   277  
   278  // lastPing retrieves the time of the last ping packet send to a remote node,
   279  // requesting binding.
   280  func (db *nodeDB) lastPing(id NodeID) time.Time {
   281  	return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPing)), 0)
   282  }
   283  
   284  // updateLastPing updates the last time we tried contacting a remote node.
   285  func (db *nodeDB) updateLastPing(id NodeID, instance time.Time) error {
   286  	return db.storeInt64(makeKey(id, nodeDBDiscoverPing), instance.Unix())
   287  }
   288  
   289  // lastPong retrieves the time of the last successful contact from remote node.
   290  func (db *nodeDB) lastPong(id NodeID) time.Time {
   291  	return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPong)), 0)
   292  }
   293  
   294  // updateLastPong updates the last time a remote node successfully contacted.
   295  func (db *nodeDB) updateLastPong(id NodeID, instance time.Time) error {
   296  	return db.storeInt64(makeKey(id, nodeDBDiscoverPong), instance.Unix())
   297  }
   298  
   299  // findFails retrieves the number of findnode failures since bonding.
   300  func (db *nodeDB) findFails(id NodeID) int {
   301  	return int(db.fetchInt64(makeKey(id, nodeDBDiscoverFindFails)))
   302  }
   303  
   304  // updateFindFails updates the number of findnode failures since bonding.
   305  func (db *nodeDB) updateFindFails(id NodeID, fails int) error {
   306  	return db.storeInt64(makeKey(id, nodeDBDiscoverFindFails), int64(fails))
   307  }
   308  
   309  // localEndpoint returns the last local endpoint communicated to the
   310  // given remote node.
   311  func (db *nodeDB) localEndpoint(id NodeID) *rpcEndpoint {
   312  	var ep rpcEndpoint
   313  	if err := db.fetchRLP(makeKey(id, nodeDBDiscoverLocalEndpoint), &ep); err != nil {
   314  		return nil
   315  	}
   316  	return &ep
   317  }
   318  
   319  func (db *nodeDB) updateLocalEndpoint(id NodeID, ep rpcEndpoint) error {
   320  	return db.storeRLP(makeKey(id, nodeDBDiscoverLocalEndpoint), &ep)
   321  }
   322  
   323  // querySeeds retrieves random nodes to be used as potential seed nodes
   324  // for bootstrapping.
   325  func (db *nodeDB) querySeeds(n int, maxAge time.Duration) []*Node {
   326  	var (
   327  		now   = time.Now()
   328  		nodes = make([]*Node, 0, n)
   329  		it    = db.lvl.NewIterator(nil, nil)
   330  		id    NodeID
   331  	)
   332  	defer it.Release()
   333  
   334  seek:
   335  	for seeks := 0; len(nodes) < n && seeks < n*5; seeks++ {
   336  		// Seek to a random entry. The first byte is incremented by a
   337  		// random amount each time in order to increase the likelihood
   338  		// of hitting all existing nodes in very small databases.
   339  		ctr := id[0]
   340  		rand.Read(id[:])
   341  		id[0] = ctr + id[0]%16
   342  		it.Seek(makeKey(id, nodeDBDiscoverRoot))
   343  
   344  		n := nextNode(it)
   345  		if n == nil {
   346  			id[0] = 0
   347  			continue seek // iterator exhausted
   348  		}
   349  		if n.ID == db.self {
   350  			continue seek
   351  		}
   352  		if now.Sub(db.lastPong(n.ID)) > maxAge {
   353  			continue seek
   354  		}
   355  		for i := range nodes {
   356  			if nodes[i].ID == n.ID {
   357  				continue seek // duplicate
   358  			}
   359  		}
   360  		nodes = append(nodes, n)
   361  	}
   362  	return nodes
   363  }
   364  
   365  func (db *nodeDB) fetchTopicRegTickets(id NodeID) (issued, used uint32) {
   366  	key := makeKey(id, nodeDBTopicRegTickets)
   367  	blob, _ := db.lvl.Get(key, nil)
   368  	if len(blob) != 8 {
   369  		return 0, 0
   370  	}
   371  	issued = binary.BigEndian.Uint32(blob[0:4])
   372  	used = binary.BigEndian.Uint32(blob[4:8])
   373  	return
   374  }
   375  
   376  func (db *nodeDB) updateTopicRegTickets(id NodeID, issued, used uint32) error {
   377  	key := makeKey(id, nodeDBTopicRegTickets)
   378  	blob := make([]byte, 8)
   379  	binary.BigEndian.PutUint32(blob[0:4], issued)
   380  	binary.BigEndian.PutUint32(blob[4:8], used)
   381  	return db.lvl.Put(key, blob, nil)
   382  }
   383  
   384  // reads the next node record from the iterator, skipping over other
   385  // database entries.
   386  func nextNode(it iterator.Iterator) *Node {
   387  	for end := false; !end; end = !it.Next() {
   388  		id, field := splitKey(it.Key())
   389  		if field != nodeDBDiscoverRoot {
   390  			continue
   391  		}
   392  		var n Node
   393  		if err := rlp.DecodeBytes(it.Value(), &n); err != nil {
   394  			log.Warn(fmt.Sprintf("invalid node %x: %v", id, err))
   395  			continue
   396  		}
   397  		return &n
   398  	}
   399  	return nil
   400  }
   401  
   402  // close flushes and closes the database files.
   403  func (db *nodeDB) close() {
   404  	close(db.quit)
   405  	db.lvl.Close()
   406  }