github.com/sberex/go-sberex@v1.8.2-0.20181113200658-ed96ac38f7d7/swarm/network/kademlia/kaddb.go (about)

     1  // This file is part of the go-sberex library. The go-sberex library is 
     2  // free software: you can redistribute it and/or modify it under the terms 
     3  // of the GNU Lesser General Public License as published by the Free 
     4  // Software Foundation, either version 3 of the License, or (at your option)
     5  // any later version.
     6  //
     7  // The go-sberex library is distributed in the hope that it will be useful, 
     8  // but WITHOUT ANY WARRANTY; without even the implied warranty of
     9  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser 
    10  // General Public License <http://www.gnu.org/licenses/> for more details.
    11  
    12  package kademlia
    13  
    14  import (
    15  	"encoding/json"
    16  	"fmt"
    17  	"io/ioutil"
    18  	"os"
    19  	"sync"
    20  	"time"
    21  
    22  	"github.com/Sberex/go-sberex/log"
    23  )
    24  
    25  type NodeData interface {
    26  	json.Marshaler
    27  	json.Unmarshaler
    28  }
    29  
    30  // allow inactive peers under
    31  type NodeRecord struct {
    32  	Addr  Address          // address of node
    33  	Url   string           // Url, used to connect to node
    34  	After time.Time        // next call after time
    35  	Seen  time.Time        // last connected at time
    36  	Meta  *json.RawMessage // arbitrary metadata saved for a peer
    37  
    38  	node Node
    39  }
    40  
    41  func (self *NodeRecord) setSeen() {
    42  	t := time.Now()
    43  	self.Seen = t
    44  	self.After = t
    45  }
    46  
    47  func (self *NodeRecord) String() string {
    48  	return fmt.Sprintf("<%v>", self.Addr)
    49  }
    50  
    51  // persisted node record database ()
    52  type KadDb struct {
    53  	Address              Address
    54  	Nodes                [][]*NodeRecord
    55  	index                map[Address]*NodeRecord
    56  	cursors              []int
    57  	lock                 sync.RWMutex
    58  	purgeInterval        time.Duration
    59  	initialRetryInterval time.Duration
    60  	connRetryExp         int
    61  }
    62  
    63  func newKadDb(addr Address, params *KadParams) *KadDb {
    64  	return &KadDb{
    65  		Address:              addr,
    66  		Nodes:                make([][]*NodeRecord, params.MaxProx+1), // overwritten by load
    67  		cursors:              make([]int, params.MaxProx+1),
    68  		index:                make(map[Address]*NodeRecord),
    69  		purgeInterval:        params.PurgeInterval,
    70  		initialRetryInterval: params.InitialRetryInterval,
    71  		connRetryExp:         params.ConnRetryExp,
    72  	}
    73  }
    74  
    75  func (self *KadDb) findOrCreate(index int, a Address, url string) *NodeRecord {
    76  	defer self.lock.Unlock()
    77  	self.lock.Lock()
    78  
    79  	record, found := self.index[a]
    80  	if !found {
    81  		record = &NodeRecord{
    82  			Addr: a,
    83  			Url:  url,
    84  		}
    85  		log.Info(fmt.Sprintf("add new record %v to kaddb", record))
    86  		// insert in kaddb
    87  		self.index[a] = record
    88  		self.Nodes[index] = append(self.Nodes[index], record)
    89  	} else {
    90  		log.Info(fmt.Sprintf("found record %v in kaddb", record))
    91  	}
    92  	// update last seen time
    93  	record.setSeen()
    94  	// update with url in case IP/port changes
    95  	record.Url = url
    96  	return record
    97  }
    98  
    99  // add adds node records to kaddb (persisted node record db)
   100  func (self *KadDb) add(nrs []*NodeRecord, proximityBin func(Address) int) {
   101  	defer self.lock.Unlock()
   102  	self.lock.Lock()
   103  	var n int
   104  	var nodes []*NodeRecord
   105  	for _, node := range nrs {
   106  		_, found := self.index[node.Addr]
   107  		if !found && node.Addr != self.Address {
   108  			node.setSeen()
   109  			self.index[node.Addr] = node
   110  			index := proximityBin(node.Addr)
   111  			dbcursor := self.cursors[index]
   112  			nodes = self.Nodes[index]
   113  			// this is inefficient for allocation, need to just append then shift
   114  			newnodes := make([]*NodeRecord, len(nodes)+1)
   115  			copy(newnodes[:], nodes[:dbcursor])
   116  			newnodes[dbcursor] = node
   117  			copy(newnodes[dbcursor+1:], nodes[dbcursor:])
   118  			log.Trace(fmt.Sprintf("new nodes: %v, nodes: %v", newnodes, nodes))
   119  			self.Nodes[index] = newnodes
   120  			n++
   121  		}
   122  	}
   123  	if n > 0 {
   124  		log.Debug(fmt.Sprintf("%d/%d node records (new/known)", n, len(nrs)))
   125  	}
   126  }
   127  
   128  /*
   129  next return one node record with the highest priority for desired
   130  connection.
   131  This is used to pick candidates for live nodes that are most wanted for
   132  a higly connected low centrality network structure for Swarm which best suits
   133  for a Kademlia-style routing.
   134  
   135  * Starting as naive node with empty db, this implements Kademlia bootstrapping
   136  * As a mature node, it fills short lines. All on demand.
   137  
   138  The candidate is chosen using the following strategy:
   139  We check for missing online nodes in the buckets for 1 upto Max BucketSize rounds.
   140  On each round we proceed from the low to high proximity order buckets.
   141  If the number of active nodes (=connected peers) is < rounds, then start looking
   142  for a known candidate. To determine if there is a candidate to recommend the
   143  kaddb node record database row corresponding to the bucket is checked.
   144  
   145  If the row cursor is on position i, the ith element in the row is chosen.
   146  If the record is scheduled not to be retried before NOW, the next element is taken.
   147  If the record is scheduled to be retried, it is set as checked, scheduled for
   148  checking and is returned. The time of the next check is in X (duration) such that
   149  X = ConnRetryExp * delta where delta is the time past since the last check and
   150  ConnRetryExp is constant obsoletion factor. (Note that when node records are added
   151  from peer messages, they are marked as checked and placed at the cursor, ie.
   152  given priority over older entries). Entries which were checked more than
   153  purgeInterval ago are deleted from the kaddb row. If no candidate is found after
   154  a full round of checking the next bucket up is considered. If no candidate is
   155  found when we reach the maximum-proximity bucket, the next round starts.
   156  
   157  node record a is more favoured to b a > b iff a is a passive node (record of
   158  offline past peer)
   159  |proxBin(a)| < |proxBin(b)|
   160  || (proxBin(a) < proxBin(b) && |proxBin(a)| == |proxBin(b)|)
   161  || (proxBin(a) == proxBin(b) && lastChecked(a) < lastChecked(b))
   162  
   163  
   164  The second argument returned names the first missing slot found
   165  */
   166  func (self *KadDb) findBest(maxBinSize int, binSize func(int) int) (node *NodeRecord, need bool, proxLimit int) {
   167  	// return nil, proxLimit indicates that all buckets are filled
   168  	defer self.lock.Unlock()
   169  	self.lock.Lock()
   170  
   171  	var interval time.Duration
   172  	var found bool
   173  	var purge []bool
   174  	var delta time.Duration
   175  	var cursor int
   176  	var count int
   177  	var after time.Time
   178  
   179  	// iterate over columns maximum bucketsize times
   180  	for rounds := 1; rounds <= maxBinSize; rounds++ {
   181  	ROUND:
   182  		// iterate over rows from PO 0 upto MaxProx
   183  		for po, dbrow := range self.Nodes {
   184  			// if row has rounds connected peers, then take the next
   185  			if binSize(po) >= rounds {
   186  				continue ROUND
   187  			}
   188  			if !need {
   189  				// set proxlimit to the PO where the first missing slot is found
   190  				proxLimit = po
   191  				need = true
   192  			}
   193  			purge = make([]bool, len(dbrow))
   194  
   195  			// there is a missing slot - finding a node to connect to
   196  			// select a node record from the relavant kaddb row (of identical prox order)
   197  		ROW:
   198  			for cursor = self.cursors[po]; !found && count < len(dbrow); cursor = (cursor + 1) % len(dbrow) {
   199  				count++
   200  				node = dbrow[cursor]
   201  
   202  				// skip already connected nodes
   203  				if node.node != nil {
   204  					log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d/%d) already connected", node.Addr, po, cursor, len(dbrow)))
   205  					continue ROW
   206  				}
   207  
   208  				// if node is scheduled to connect
   209  				if node.After.After(time.Now()) {
   210  					log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) skipped. seen at %v (%v ago), scheduled at %v", node.Addr, po, cursor, node.Seen, delta, node.After))
   211  					continue ROW
   212  				}
   213  
   214  				delta = time.Since(node.Seen)
   215  				if delta < self.initialRetryInterval {
   216  					delta = self.initialRetryInterval
   217  				}
   218  				if delta > self.purgeInterval {
   219  					// remove node
   220  					purge[cursor] = true
   221  					log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) unreachable since %v. Removed", node.Addr, po, cursor, node.Seen))
   222  					continue ROW
   223  				}
   224  
   225  				log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) ready to be tried. seen at %v (%v ago), scheduled at %v", node.Addr, po, cursor, node.Seen, delta, node.After))
   226  
   227  				// scheduling next check
   228  				interval = delta * time.Duration(self.connRetryExp)
   229  				after = time.Now().Add(interval)
   230  
   231  				log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) selected as candidate connection %v. seen at %v (%v ago), selectable since %v, retry after %v (in %v)", node.Addr, po, cursor, rounds, node.Seen, delta, node.After, after, interval))
   232  				node.After = after
   233  				found = true
   234  			} // ROW
   235  			self.cursors[po] = cursor
   236  			self.delete(po, purge)
   237  			if found {
   238  				return node, need, proxLimit
   239  			}
   240  		} // ROUND
   241  	} // ROUNDS
   242  
   243  	return nil, need, proxLimit
   244  }
   245  
   246  // deletes the noderecords of a kaddb row corresponding to the indexes
   247  // caller must hold the dblock
   248  // the call is unsafe, no index checks
   249  func (self *KadDb) delete(row int, purge []bool) {
   250  	var nodes []*NodeRecord
   251  	dbrow := self.Nodes[row]
   252  	for i, del := range purge {
   253  		if i == self.cursors[row] {
   254  			//reset cursor
   255  			self.cursors[row] = len(nodes)
   256  		}
   257  		// delete the entry to be purged
   258  		if del {
   259  			delete(self.index, dbrow[i].Addr)
   260  			continue
   261  		}
   262  		// otherwise append to new list
   263  		nodes = append(nodes, dbrow[i])
   264  	}
   265  	self.Nodes[row] = nodes
   266  }
   267  
   268  // save persists kaddb on disk (written to file on path in json format.
   269  func (self *KadDb) save(path string, cb func(*NodeRecord, Node)) error {
   270  	defer self.lock.Unlock()
   271  	self.lock.Lock()
   272  
   273  	var n int
   274  
   275  	for _, b := range self.Nodes {
   276  		for _, node := range b {
   277  			n++
   278  			node.After = time.Now()
   279  			node.Seen = time.Now()
   280  			if cb != nil {
   281  				cb(node, node.node)
   282  			}
   283  		}
   284  	}
   285  
   286  	data, err := json.MarshalIndent(self, "", " ")
   287  	if err != nil {
   288  		return err
   289  	}
   290  	err = ioutil.WriteFile(path, data, os.ModePerm)
   291  	if err != nil {
   292  		log.Warn(fmt.Sprintf("unable to save kaddb with %v nodes to %v: %v", n, path, err))
   293  	} else {
   294  		log.Info(fmt.Sprintf("saved kaddb with %v nodes to %v", n, path))
   295  	}
   296  	return err
   297  }
   298  
   299  // Load(path) loads the node record database (kaddb) from file on path.
   300  func (self *KadDb) load(path string, cb func(*NodeRecord, Node) error) (err error) {
   301  	defer self.lock.Unlock()
   302  	self.lock.Lock()
   303  
   304  	var data []byte
   305  	data, err = ioutil.ReadFile(path)
   306  	if err != nil {
   307  		return
   308  	}
   309  
   310  	err = json.Unmarshal(data, self)
   311  	if err != nil {
   312  		return
   313  	}
   314  	var n int
   315  	var purge []bool
   316  	for po, b := range self.Nodes {
   317  		purge = make([]bool, len(b))
   318  	ROW:
   319  		for i, node := range b {
   320  			if cb != nil {
   321  				err = cb(node, node.node)
   322  				if err != nil {
   323  					purge[i] = true
   324  					continue ROW
   325  				}
   326  			}
   327  			n++
   328  			if node.After.IsZero() {
   329  				node.After = time.Now()
   330  			}
   331  			self.index[node.Addr] = node
   332  		}
   333  		self.delete(po, purge)
   334  	}
   335  	log.Info(fmt.Sprintf("loaded kaddb with %v nodes from %v", n, path))
   336  
   337  	return
   338  }
   339  
   340  // accessor for KAD offline db count
   341  func (self *KadDb) count() int {
   342  	defer self.lock.Unlock()
   343  	self.lock.Lock()
   344  	return len(self.index)
   345  }