github.com/SmartMeshFoundation/Spectrum@v0.0.0-20220621030607-452a266fee1e/p2p/discover/database.go (about) 1 // Copyright 2015 The Spectrum Authors 2 // This file is part of the Spectrum library. 3 // 4 // The Spectrum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The Spectrum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the Spectrum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Contains the node database, storing previously seen nodes and any collected 18 // metadata about them for QoS purposes. 19 20 package discover 21 22 import ( 23 "bytes" 24 "crypto/rand" 25 "encoding/binary" 26 "os" 27 "sync" 28 "time" 29 30 "github.com/SmartMeshFoundation/Spectrum/crypto" 31 "github.com/SmartMeshFoundation/Spectrum/log" 32 "github.com/SmartMeshFoundation/Spectrum/rlp" 33 "github.com/syndtr/goleveldb/leveldb" 34 "github.com/syndtr/goleveldb/leveldb/errors" 35 "github.com/syndtr/goleveldb/leveldb/iterator" 36 "github.com/syndtr/goleveldb/leveldb/opt" 37 "github.com/syndtr/goleveldb/leveldb/storage" 38 "github.com/syndtr/goleveldb/leveldb/util" 39 ) 40 41 var ( 42 nodeDBNilNodeID = NodeID{} // Special node ID to use as a nil element. 43 nodeDBNodeExpiration = 24 * time.Hour // Time after which an unseen node should be dropped. 44 nodeDBCleanupCycle = time.Hour // Time period for running the expiration task. 45 ) 46 47 // nodeDB stores all nodes we know about. 48 type nodeDB struct { 49 lvl *leveldb.DB // Interface to the database itself 50 self NodeID // Own node id to prevent adding it into the database 51 runner sync.Once // Ensures we can start at most one expirer 52 quit chan struct{} // Channel to signal the expiring thread to stop 53 } 54 55 // Schema layout for the node database 56 var ( 57 nodeDBVersionKey = []byte("version") // Version of the database to flush if changes 58 nodeDBItemPrefix = []byte("n:") // Identifier to prefix node entries with 59 60 nodeDBDiscoverRoot = ":discover" 61 nodeDBDiscoverPing = nodeDBDiscoverRoot + ":lastping" 62 nodeDBDiscoverPong = nodeDBDiscoverRoot + ":lastpong" 63 nodeDBDiscoverFindFails = nodeDBDiscoverRoot + ":findfail" 64 ) 65 66 // newNodeDB creates a new node database for storing and retrieving infos about 67 // known peers in the network. If no path is given, an in-memory, temporary 68 // database is constructed. 69 func newNodeDB(path string, version int, self NodeID) (*nodeDB, error) { 70 if path == "" { 71 return newMemoryNodeDB(self) 72 } 73 return newPersistentNodeDB(path, version, self) 74 } 75 76 // newMemoryNodeDB creates a new in-memory node database without a persistent 77 // backend. 78 func newMemoryNodeDB(self NodeID) (*nodeDB, error) { 79 db, err := leveldb.Open(storage.NewMemStorage(), nil) 80 if err != nil { 81 return nil, err 82 } 83 return &nodeDB{ 84 lvl: db, 85 self: self, 86 quit: make(chan struct{}), 87 }, nil 88 } 89 90 // newPersistentNodeDB creates/opens a leveldb backed persistent node database, 91 // also flushing its contents in case of a version mismatch. 92 func newPersistentNodeDB(path string, version int, self NodeID) (*nodeDB, error) { 93 opts := &opt.Options{OpenFilesCacheCapacity: 5} 94 db, err := leveldb.OpenFile(path, opts) 95 if _, iscorrupted := err.(*errors.ErrCorrupted); iscorrupted { 96 db, err = leveldb.RecoverFile(path, nil) 97 } 98 if err != nil { 99 return nil, err 100 } 101 // The nodes contained in the cache correspond to a certain protocol version. 102 // Flush all nodes if the version doesn't match. 103 currentVer := make([]byte, binary.MaxVarintLen64) 104 currentVer = currentVer[:binary.PutVarint(currentVer, int64(version))] 105 106 blob, err := db.Get(nodeDBVersionKey, nil) 107 switch err { 108 case leveldb.ErrNotFound: 109 // Version not found (i.e. empty cache), insert it 110 if err := db.Put(nodeDBVersionKey, currentVer, nil); err != nil { 111 db.Close() 112 return nil, err 113 } 114 115 case nil: 116 // Version present, flush if different 117 if !bytes.Equal(blob, currentVer) { 118 db.Close() 119 if err = os.RemoveAll(path); err != nil { 120 return nil, err 121 } 122 return newPersistentNodeDB(path, version, self) 123 } 124 } 125 return &nodeDB{ 126 lvl: db, 127 self: self, 128 quit: make(chan struct{}), 129 }, nil 130 } 131 132 // makeKey generates the leveldb key-blob from a node id and its particular 133 // field of interest. 134 func makeKey(id NodeID, field string) []byte { 135 if bytes.Equal(id[:], nodeDBNilNodeID[:]) { 136 return []byte(field) 137 } 138 return append(nodeDBItemPrefix, append(id[:], field...)...) 139 } 140 141 // splitKey tries to split a database key into a node id and a field part. 142 func splitKey(key []byte) (id NodeID, field string) { 143 // If the key is not of a node, return it plainly 144 if !bytes.HasPrefix(key, nodeDBItemPrefix) { 145 return NodeID{}, string(key) 146 } 147 // Otherwise split the id and field 148 item := key[len(nodeDBItemPrefix):] 149 copy(id[:], item[:len(id)]) 150 field = string(item[len(id):]) 151 152 return id, field 153 } 154 155 // fetchInt64 retrieves an integer instance associated with a particular 156 // database key. 157 func (db *nodeDB) fetchInt64(key []byte) int64 { 158 blob, err := db.lvl.Get(key, nil) 159 if err != nil { 160 return 0 161 } 162 val, read := binary.Varint(blob) 163 if read <= 0 { 164 return 0 165 } 166 return val 167 } 168 169 // storeInt64 update a specific database entry to the current time instance as a 170 // unix timestamp. 171 func (db *nodeDB) storeInt64(key []byte, n int64) error { 172 blob := make([]byte, binary.MaxVarintLen64) 173 blob = blob[:binary.PutVarint(blob, n)] 174 175 return db.lvl.Put(key, blob, nil) 176 } 177 178 // node retrieves a node with a given id from the database. 179 func (db *nodeDB) node(id NodeID) *Node { 180 blob, err := db.lvl.Get(makeKey(id, nodeDBDiscoverRoot), nil) 181 if err != nil { 182 return nil 183 } 184 node := new(Node) 185 if err := rlp.DecodeBytes(blob, node); err != nil { 186 log.Error("Failed to decode node RLP", "err", err) 187 return nil 188 } 189 node.sha = crypto.Keccak256Hash(node.ID[:]) 190 return node 191 } 192 193 // updateNode inserts - potentially overwriting - a node into the peer database. 194 func (db *nodeDB) updateNode(node *Node) error { 195 blob, err := rlp.EncodeToBytes(node) 196 if err != nil { 197 return err 198 } 199 return db.lvl.Put(makeKey(node.ID, nodeDBDiscoverRoot), blob, nil) 200 } 201 202 // deleteNode deletes all information/keys associated with a node. 203 func (db *nodeDB) deleteNode(id NodeID) error { 204 deleter := db.lvl.NewIterator(util.BytesPrefix(makeKey(id, "")), nil) 205 for deleter.Next() { 206 if err := db.lvl.Delete(deleter.Key(), nil); err != nil { 207 return err 208 } 209 } 210 return nil 211 } 212 213 // ensureExpirer is a small helper method ensuring that the data expiration 214 // mechanism is running. If the expiration goroutine is already running, this 215 // method simply returns. 216 // 217 // The goal is to start the data evacuation only after the network successfully 218 // bootstrapped itself (to prevent dumping potentially useful seed nodes). Since 219 // it would require significant overhead to exactly trace the first successful 220 // convergence, it's simpler to "ensure" the correct state when an appropriate 221 // condition occurs (i.e. a successful bonding), and discard further events. 222 func (db *nodeDB) ensureExpirer() { 223 db.runner.Do(func() { go db.expirer() }) 224 } 225 226 // expirer should be started in a go routine, and is responsible for looping ad 227 // infinitum and dropping stale data from the database. 228 func (db *nodeDB) expirer() { 229 tick := time.NewTicker(nodeDBCleanupCycle) 230 defer tick.Stop() 231 for { 232 select { 233 case <-tick.C: 234 if err := db.expireNodes(); err != nil { 235 log.Error("Failed to expire nodedb items", "err", err) 236 } 237 case <-db.quit: 238 return 239 } 240 } 241 } 242 243 // expireNodes iterates over the database and deletes all nodes that have not 244 // been seen (i.e. received a pong from) for some allotted time. 245 func (db *nodeDB) expireNodes() error { 246 threshold := time.Now().Add(-nodeDBNodeExpiration) 247 248 // Find discovered nodes that are older than the allowance 249 it := db.lvl.NewIterator(nil, nil) 250 defer it.Release() 251 252 for it.Next() { 253 // Skip the item if not a discovery node 254 id, field := splitKey(it.Key()) 255 if field != nodeDBDiscoverRoot { 256 continue 257 } 258 // Skip the node if not expired yet (and not self) 259 if !bytes.Equal(id[:], db.self[:]) { 260 if seen := db.lastPong(id); seen.After(threshold) { 261 continue 262 } 263 } 264 // Otherwise delete all associated information 265 db.deleteNode(id) 266 } 267 return nil 268 } 269 270 // lastPing retrieves the time of the last ping packet send to a remote node, 271 // requesting binding. 272 func (db *nodeDB) lastPing(id NodeID) time.Time { 273 return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPing)), 0) 274 } 275 276 // updateLastPing updates the last time we tried contacting a remote node. 277 func (db *nodeDB) updateLastPing(id NodeID, instance time.Time) error { 278 return db.storeInt64(makeKey(id, nodeDBDiscoverPing), instance.Unix()) 279 } 280 281 // lastPong retrieves the time of the last successful contact from remote node. 282 func (db *nodeDB) lastPong(id NodeID) time.Time { 283 return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPong)), 0) 284 } 285 286 // updateLastPong updates the last time a remote node successfully contacted. 287 func (db *nodeDB) updateLastPong(id NodeID, instance time.Time) error { 288 return db.storeInt64(makeKey(id, nodeDBDiscoverPong), instance.Unix()) 289 } 290 291 // findFails retrieves the number of findnode failures since bonding. 292 func (db *nodeDB) findFails(id NodeID) int { 293 return int(db.fetchInt64(makeKey(id, nodeDBDiscoverFindFails))) 294 } 295 296 // updateFindFails updates the number of findnode failures since bonding. 297 func (db *nodeDB) updateFindFails(id NodeID, fails int) error { 298 return db.storeInt64(makeKey(id, nodeDBDiscoverFindFails), int64(fails)) 299 } 300 301 // querySeeds retrieves random nodes to be used as potential seed nodes 302 // for bootstrapping. 303 func (db *nodeDB) querySeeds(n int, maxAge time.Duration) []*Node { 304 var ( 305 now = time.Now() 306 nodes = make([]*Node, 0, n) 307 it = db.lvl.NewIterator(nil, nil) 308 id NodeID 309 ) 310 defer it.Release() 311 312 seek: 313 for seeks := 0; len(nodes) < n && seeks < n*5; seeks++ { 314 // Seek to a random entry. The first byte is incremented by a 315 // random amount each time in order to increase the likelihood 316 // of hitting all existing nodes in very small databases. 317 ctr := id[0] 318 rand.Read(id[:]) 319 id[0] = ctr + id[0]%16 320 it.Seek(makeKey(id, nodeDBDiscoverRoot)) 321 322 n := nextNode(it) 323 if n == nil { 324 id[0] = 0 325 continue seek // iterator exhausted 326 } 327 if n.ID == db.self { 328 continue seek 329 } 330 if now.Sub(db.lastPong(n.ID)) > maxAge { 331 continue seek 332 } 333 for i := range nodes { 334 if nodes[i].ID == n.ID { 335 continue seek // duplicate 336 } 337 } 338 nodes = append(nodes, n) 339 } 340 return nodes 341 } 342 343 // reads the next node record from the iterator, skipping over other 344 // database entries. 345 func nextNode(it iterator.Iterator) *Node { 346 for end := false; !end; end = !it.Next() { 347 id, field := splitKey(it.Key()) 348 if field != nodeDBDiscoverRoot { 349 continue 350 } 351 var n Node 352 if err := rlp.DecodeBytes(it.Value(), &n); err != nil { 353 log.Warn("Failed to decode node RLP", "id", id, "err", err) 354 continue 355 } 356 return &n 357 } 358 return nil 359 } 360 361 // close flushes and closes the database files. 362 func (db *nodeDB) close() { 363 close(db.quit) 364 db.lvl.Close() 365 }