github.com/aigarnetwork/aigar@v0.0.0-20191115204914-d59a6eb70f8e/p2p/enode/nodedb.go (about)

     1  //  Copyright 2018 The go-ethereum Authors
     2  //  Copyright 2019 The go-aigar Authors
     3  //  This file is part of the go-aigar library.
     4  //
     5  //  The go-aigar library is free software: you can redistribute it and/or modify
     6  //  it under the terms of the GNU Lesser General Public License as published by
     7  //  the Free Software Foundation, either version 3 of the License, or
     8  //  (at your option) any later version.
     9  //
    10  //  The go-aigar library is distributed in the hope that it will be useful,
    11  //  but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  //  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  //  GNU Lesser General Public License for more details.
    14  //
    15  //  You should have received a copy of the GNU Lesser General Public License
    16  //  along with the go-aigar library. If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package enode
    19  
    20  import (
    21  	"bytes"
    22  	"crypto/rand"
    23  	"encoding/binary"
    24  	"fmt"
    25  	"net"
    26  	"os"
    27  	"sync"
    28  	"time"
    29  
    30  	"github.com/AigarNetwork/aigar/rlp"
    31  	"github.com/syndtr/goleveldb/leveldb"
    32  	"github.com/syndtr/goleveldb/leveldb/errors"
    33  	"github.com/syndtr/goleveldb/leveldb/iterator"
    34  	"github.com/syndtr/goleveldb/leveldb/opt"
    35  	"github.com/syndtr/goleveldb/leveldb/storage"
    36  	"github.com/syndtr/goleveldb/leveldb/util"
    37  )
    38  
    39  // Keys in the node database.
    40  const (
    41  	dbVersionKey   = "version" // Version of the database to flush if changes
    42  	dbNodePrefix   = "n:"      // Identifier to prefix node entries with
    43  	dbLocalPrefix  = "local:"
    44  	dbDiscoverRoot = "v4"
    45  
    46  	// These fields are stored per ID and IP, the full key is "n:<ID>:v4:<IP>:findfail".
    47  	// Use nodeItemKey to create those keys.
    48  	dbNodeFindFails = "findfail"
    49  	dbNodePing      = "lastping"
    50  	dbNodePong      = "lastpong"
    51  	dbNodeSeq       = "seq"
    52  
    53  	// Local information is keyed by ID only, the full key is "local:<ID>:seq".
    54  	// Use localItemKey to create those keys.
    55  	dbLocalSeq = "seq"
    56  )
    57  
    58  const (
    59  	dbNodeExpiration = 24 * time.Hour // Time after which an unseen node should be dropped.
    60  	dbCleanupCycle   = time.Hour      // Time period for running the expiration task.
    61  	dbVersion        = 9
    62  )
    63  
    64  var zeroIP = make(net.IP, 16)
    65  
    66  // DB is the node database, storing previously seen nodes and any collected metadata about
    67  // them for QoS purposes.
    68  type DB struct {
    69  	lvl    *leveldb.DB   // Interface to the database itself
    70  	runner sync.Once     // Ensures we can start at most one expirer
    71  	quit   chan struct{} // Channel to signal the expiring thread to stop
    72  }
    73  
    74  // OpenDB opens a node database for storing and retrieving infos about known peers in the
    75  // network. If no path is given an in-memory, temporary database is constructed.
    76  func OpenDB(path string) (*DB, error) {
    77  	if path == "" {
    78  		return newMemoryDB()
    79  	}
    80  	return newPersistentDB(path)
    81  }
    82  
    83  // newMemoryNodeDB creates a new in-memory node database without a persistent backend.
    84  func newMemoryDB() (*DB, error) {
    85  	db, err := leveldb.Open(storage.NewMemStorage(), nil)
    86  	if err != nil {
    87  		return nil, err
    88  	}
    89  	return &DB{lvl: db, quit: make(chan struct{})}, nil
    90  }
    91  
    92  // newPersistentNodeDB creates/opens a leveldb backed persistent node database,
    93  // also flushing its contents in case of a version mismatch.
    94  func newPersistentDB(path string) (*DB, error) {
    95  	opts := &opt.Options{OpenFilesCacheCapacity: 5}
    96  	db, err := leveldb.OpenFile(path, opts)
    97  	if _, iscorrupted := err.(*errors.ErrCorrupted); iscorrupted {
    98  		db, err = leveldb.RecoverFile(path, nil)
    99  	}
   100  	if err != nil {
   101  		return nil, err
   102  	}
   103  	// The nodes contained in the cache correspond to a certain protocol version.
   104  	// Flush all nodes if the version doesn't match.
   105  	currentVer := make([]byte, binary.MaxVarintLen64)
   106  	currentVer = currentVer[:binary.PutVarint(currentVer, int64(dbVersion))]
   107  
   108  	blob, err := db.Get([]byte(dbVersionKey), nil)
   109  	switch err {
   110  	case leveldb.ErrNotFound:
   111  		// Version not found (i.e. empty cache), insert it
   112  		if err := db.Put([]byte(dbVersionKey), currentVer, nil); err != nil {
   113  			db.Close()
   114  			return nil, err
   115  		}
   116  
   117  	case nil:
   118  		// Version present, flush if different
   119  		if !bytes.Equal(blob, currentVer) {
   120  			db.Close()
   121  			if err = os.RemoveAll(path); err != nil {
   122  				return nil, err
   123  			}
   124  			return newPersistentDB(path)
   125  		}
   126  	}
   127  	return &DB{lvl: db, quit: make(chan struct{})}, nil
   128  }
   129  
   130  // nodeKey returns the database key for a node record.
   131  func nodeKey(id ID) []byte {
   132  	key := append([]byte(dbNodePrefix), id[:]...)
   133  	key = append(key, ':')
   134  	key = append(key, dbDiscoverRoot...)
   135  	return key
   136  }
   137  
   138  // splitNodeKey returns the node ID of a key created by nodeKey.
   139  func splitNodeKey(key []byte) (id ID, rest []byte) {
   140  	if !bytes.HasPrefix(key, []byte(dbNodePrefix)) {
   141  		return ID{}, nil
   142  	}
   143  	item := key[len(dbNodePrefix):]
   144  	copy(id[:], item[:len(id)])
   145  	return id, item[len(id)+1:]
   146  }
   147  
   148  // nodeItemKey returns the database key for a node metadata field.
   149  func nodeItemKey(id ID, ip net.IP, field string) []byte {
   150  	ip16 := ip.To16()
   151  	if ip16 == nil {
   152  		panic(fmt.Errorf("invalid IP (length %d)", len(ip)))
   153  	}
   154  	return bytes.Join([][]byte{nodeKey(id), ip16, []byte(field)}, []byte{':'})
   155  }
   156  
   157  // splitNodeItemKey returns the components of a key created by nodeItemKey.
   158  func splitNodeItemKey(key []byte) (id ID, ip net.IP, field string) {
   159  	id, key = splitNodeKey(key)
   160  	// Skip discover root.
   161  	if string(key) == dbDiscoverRoot {
   162  		return id, nil, ""
   163  	}
   164  	key = key[len(dbDiscoverRoot)+1:]
   165  	// Split out the IP.
   166  	ip = net.IP(key[:16])
   167  	if ip4 := ip.To4(); ip4 != nil {
   168  		ip = ip4
   169  	}
   170  	key = key[16+1:]
   171  	// Field is the remainder of key.
   172  	field = string(key)
   173  	return id, ip, field
   174  }
   175  
   176  // localItemKey returns the key of a local node item.
   177  func localItemKey(id ID, field string) []byte {
   178  	key := append([]byte(dbLocalPrefix), id[:]...)
   179  	key = append(key, ':')
   180  	key = append(key, field...)
   181  	return key
   182  }
   183  
   184  // fetchInt64 retrieves an integer associated with a particular key.
   185  func (db *DB) fetchInt64(key []byte) int64 {
   186  	blob, err := db.lvl.Get(key, nil)
   187  	if err != nil {
   188  		return 0
   189  	}
   190  	val, read := binary.Varint(blob)
   191  	if read <= 0 {
   192  		return 0
   193  	}
   194  	return val
   195  }
   196  
   197  // storeInt64 stores an integer in the given key.
   198  func (db *DB) storeInt64(key []byte, n int64) error {
   199  	blob := make([]byte, binary.MaxVarintLen64)
   200  	blob = blob[:binary.PutVarint(blob, n)]
   201  	return db.lvl.Put(key, blob, nil)
   202  }
   203  
   204  // fetchUint64 retrieves an integer associated with a particular key.
   205  func (db *DB) fetchUint64(key []byte) uint64 {
   206  	blob, err := db.lvl.Get(key, nil)
   207  	if err != nil {
   208  		return 0
   209  	}
   210  	val, _ := binary.Uvarint(blob)
   211  	return val
   212  }
   213  
   214  // storeUint64 stores an integer in the given key.
   215  func (db *DB) storeUint64(key []byte, n uint64) error {
   216  	blob := make([]byte, binary.MaxVarintLen64)
   217  	blob = blob[:binary.PutUvarint(blob, n)]
   218  	return db.lvl.Put(key, blob, nil)
   219  }
   220  
   221  // Node retrieves a node with a given id from the database.
   222  func (db *DB) Node(id ID) *Node {
   223  	blob, err := db.lvl.Get(nodeKey(id), nil)
   224  	if err != nil {
   225  		return nil
   226  	}
   227  	return mustDecodeNode(id[:], blob)
   228  }
   229  
   230  func mustDecodeNode(id, data []byte) *Node {
   231  	node := new(Node)
   232  	if err := rlp.DecodeBytes(data, &node.r); err != nil {
   233  		panic(fmt.Errorf("p2p/enode: can't decode node %x in DB: %v", id, err))
   234  	}
   235  	// Restore node id cache.
   236  	copy(node.id[:], id)
   237  	return node
   238  }
   239  
   240  // UpdateNode inserts - potentially overwriting - a node into the peer database.
   241  func (db *DB) UpdateNode(node *Node) error {
   242  	if node.Seq() < db.NodeSeq(node.ID()) {
   243  		return nil
   244  	}
   245  	blob, err := rlp.EncodeToBytes(&node.r)
   246  	if err != nil {
   247  		return err
   248  	}
   249  	if err := db.lvl.Put(nodeKey(node.ID()), blob, nil); err != nil {
   250  		return err
   251  	}
   252  	return db.storeUint64(nodeItemKey(node.ID(), zeroIP, dbNodeSeq), node.Seq())
   253  }
   254  
   255  // NodeSeq returns the stored record sequence number of the given node.
   256  func (db *DB) NodeSeq(id ID) uint64 {
   257  	return db.fetchUint64(nodeItemKey(id, zeroIP, dbNodeSeq))
   258  }
   259  
   260  // Resolve returns the stored record of the node if it has a larger sequence
   261  // number than n.
   262  func (db *DB) Resolve(n *Node) *Node {
   263  	if n.Seq() > db.NodeSeq(n.ID()) {
   264  		return n
   265  	}
   266  	return db.Node(n.ID())
   267  }
   268  
   269  // DeleteNode deletes all information associated with a node.
   270  func (db *DB) DeleteNode(id ID) {
   271  	deleteRange(db.lvl, nodeKey(id))
   272  }
   273  
   274  func deleteRange(db *leveldb.DB, prefix []byte) {
   275  	it := db.NewIterator(util.BytesPrefix(prefix), nil)
   276  	defer it.Release()
   277  	for it.Next() {
   278  		db.Delete(it.Key(), nil)
   279  	}
   280  }
   281  
   282  // ensureExpirer is a small helper method ensuring that the data expiration
   283  // mechanism is running. If the expiration goroutine is already running, this
   284  // method simply returns.
   285  //
   286  // The goal is to start the data evacuation only after the network successfully
   287  // bootstrapped itself (to prevent dumping potentially useful seed nodes). Since
   288  // it would require significant overhead to exactly trace the first successful
   289  // convergence, it's simpler to "ensure" the correct state when an appropriate
   290  // condition occurs (i.e. a successful bonding), and discard further events.
   291  func (db *DB) ensureExpirer() {
   292  	db.runner.Do(func() { go db.expirer() })
   293  }
   294  
   295  // expirer should be started in a go routine, and is responsible for looping ad
   296  // infinitum and dropping stale data from the database.
   297  func (db *DB) expirer() {
   298  	tick := time.NewTicker(dbCleanupCycle)
   299  	defer tick.Stop()
   300  	for {
   301  		select {
   302  		case <-tick.C:
   303  			db.expireNodes()
   304  		case <-db.quit:
   305  			return
   306  		}
   307  	}
   308  }
   309  
   310  // expireNodes iterates over the database and deletes all nodes that have not
   311  // been seen (i.e. received a pong from) for some time.
   312  func (db *DB) expireNodes() {
   313  	it := db.lvl.NewIterator(util.BytesPrefix([]byte(dbNodePrefix)), nil)
   314  	defer it.Release()
   315  	if !it.Next() {
   316  		return
   317  	}
   318  
   319  	var (
   320  		threshold    = time.Now().Add(-dbNodeExpiration).Unix()
   321  		youngestPong int64
   322  		atEnd        = false
   323  	)
   324  	for !atEnd {
   325  		id, ip, field := splitNodeItemKey(it.Key())
   326  		if field == dbNodePong {
   327  			time, _ := binary.Varint(it.Value())
   328  			if time > youngestPong {
   329  				youngestPong = time
   330  			}
   331  			if time < threshold {
   332  				// Last pong from this IP older than threshold, remove fields belonging to it.
   333  				deleteRange(db.lvl, nodeItemKey(id, ip, ""))
   334  			}
   335  		}
   336  		atEnd = !it.Next()
   337  		nextID, _ := splitNodeKey(it.Key())
   338  		if atEnd || nextID != id {
   339  			// We've moved beyond the last entry of the current ID.
   340  			// Remove everything if there was no recent enough pong.
   341  			if youngestPong > 0 && youngestPong < threshold {
   342  				deleteRange(db.lvl, nodeKey(id))
   343  			}
   344  			youngestPong = 0
   345  		}
   346  	}
   347  }
   348  
   349  // LastPingReceived retrieves the time of the last ping packet received from
   350  // a remote node.
   351  func (db *DB) LastPingReceived(id ID, ip net.IP) time.Time {
   352  	return time.Unix(db.fetchInt64(nodeItemKey(id, ip, dbNodePing)), 0)
   353  }
   354  
   355  // UpdateLastPingReceived updates the last time we tried contacting a remote node.
   356  func (db *DB) UpdateLastPingReceived(id ID, ip net.IP, instance time.Time) error {
   357  	return db.storeInt64(nodeItemKey(id, ip, dbNodePing), instance.Unix())
   358  }
   359  
   360  // LastPongReceived retrieves the time of the last successful pong from remote node.
   361  func (db *DB) LastPongReceived(id ID, ip net.IP) time.Time {
   362  	// Launch expirer
   363  	db.ensureExpirer()
   364  	return time.Unix(db.fetchInt64(nodeItemKey(id, ip, dbNodePong)), 0)
   365  }
   366  
   367  // UpdateLastPongReceived updates the last pong time of a node.
   368  func (db *DB) UpdateLastPongReceived(id ID, ip net.IP, instance time.Time) error {
   369  	return db.storeInt64(nodeItemKey(id, ip, dbNodePong), instance.Unix())
   370  }
   371  
   372  // FindFails retrieves the number of findnode failures since bonding.
   373  func (db *DB) FindFails(id ID, ip net.IP) int {
   374  	return int(db.fetchInt64(nodeItemKey(id, ip, dbNodeFindFails)))
   375  }
   376  
   377  // UpdateFindFails updates the number of findnode failures since bonding.
   378  func (db *DB) UpdateFindFails(id ID, ip net.IP, fails int) error {
   379  	return db.storeInt64(nodeItemKey(id, ip, dbNodeFindFails), int64(fails))
   380  }
   381  
   382  // LocalSeq retrieves the local record sequence counter.
   383  func (db *DB) localSeq(id ID) uint64 {
   384  	return db.fetchUint64(localItemKey(id, dbLocalSeq))
   385  }
   386  
   387  // storeLocalSeq stores the local record sequence counter.
   388  func (db *DB) storeLocalSeq(id ID, n uint64) {
   389  	db.storeUint64(localItemKey(id, dbLocalSeq), n)
   390  }
   391  
   392  // QuerySeeds retrieves random nodes to be used as potential seed nodes
   393  // for bootstrapping.
   394  func (db *DB) QuerySeeds(n int, maxAge time.Duration) []*Node {
   395  	var (
   396  		now   = time.Now()
   397  		nodes = make([]*Node, 0, n)
   398  		it    = db.lvl.NewIterator(nil, nil)
   399  		id    ID
   400  	)
   401  	defer it.Release()
   402  
   403  seek:
   404  	for seeks := 0; len(nodes) < n && seeks < n*5; seeks++ {
   405  		// Seek to a random entry. The first byte is incremented by a
   406  		// random amount each time in order to increase the likelihood
   407  		// of hitting all existing nodes in very small databases.
   408  		ctr := id[0]
   409  		rand.Read(id[:])
   410  		id[0] = ctr + id[0]%16
   411  		it.Seek(nodeKey(id))
   412  
   413  		n := nextNode(it)
   414  		if n == nil {
   415  			id[0] = 0
   416  			continue seek // iterator exhausted
   417  		}
   418  		if now.Sub(db.LastPongReceived(n.ID(), n.IP())) > maxAge {
   419  			continue seek
   420  		}
   421  		for i := range nodes {
   422  			if nodes[i].ID() == n.ID() {
   423  				continue seek // duplicate
   424  			}
   425  		}
   426  		nodes = append(nodes, n)
   427  	}
   428  	return nodes
   429  }
   430  
   431  // reads the next node record from the iterator, skipping over other
   432  // database entries.
   433  func nextNode(it iterator.Iterator) *Node {
   434  	for end := false; !end; end = !it.Next() {
   435  		id, rest := splitNodeKey(it.Key())
   436  		if string(rest) != dbDiscoverRoot {
   437  			continue
   438  		}
   439  		return mustDecodeNode(id[:], it.Value())
   440  	}
   441  	return nil
   442  }
   443  
   444  // close flushes and closes the database files.
   445  func (db *DB) Close() {
   446  	close(db.quit)
   447  	db.lvl.Close()
   448  }