github.com/pslzym/go-ethereum@v1.8.17-0.20180926104442-4b6824e07b1b/p2p/enode/nodedb.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package enode 18 19 import ( 20 "bytes" 21 "crypto/rand" 22 "encoding/binary" 23 "fmt" 24 "os" 25 "sync" 26 "time" 27 28 "github.com/ethereum/go-ethereum/log" 29 "github.com/ethereum/go-ethereum/rlp" 30 "github.com/syndtr/goleveldb/leveldb" 31 "github.com/syndtr/goleveldb/leveldb/errors" 32 "github.com/syndtr/goleveldb/leveldb/iterator" 33 "github.com/syndtr/goleveldb/leveldb/opt" 34 "github.com/syndtr/goleveldb/leveldb/storage" 35 "github.com/syndtr/goleveldb/leveldb/util" 36 ) 37 38 var ( 39 nodeDBNilID = ID{} // Special node ID to use as a nil element. 40 nodeDBNodeExpiration = 24 * time.Hour // Time after which an unseen node should be dropped. 41 nodeDBCleanupCycle = time.Hour // Time period for running the expiration task. 42 nodeDBVersion = 6 43 ) 44 45 // DB is the node database, storing previously seen nodes and any collected metadata about 46 // them for QoS purposes. 47 type DB struct { 48 lvl *leveldb.DB // Interface to the database itself 49 runner sync.Once // Ensures we can start at most one expirer 50 quit chan struct{} // Channel to signal the expiring thread to stop 51 } 52 53 // Schema layout for the node database 54 var ( 55 nodeDBVersionKey = []byte("version") // Version of the database to flush if changes 56 nodeDBItemPrefix = []byte("n:") // Identifier to prefix node entries with 57 58 nodeDBDiscoverRoot = ":discover" 59 nodeDBDiscoverPing = nodeDBDiscoverRoot + ":lastping" 60 nodeDBDiscoverPong = nodeDBDiscoverRoot + ":lastpong" 61 nodeDBDiscoverFindFails = nodeDBDiscoverRoot + ":findfail" 62 ) 63 64 // OpenDB opens a node database for storing and retrieving infos about known peers in the 65 // network. If no path is given an in-memory, temporary database is constructed. 66 func OpenDB(path string) (*DB, error) { 67 if path == "" { 68 return newMemoryDB() 69 } 70 return newPersistentDB(path) 71 } 72 73 // newMemoryNodeDB creates a new in-memory node database without a persistent backend. 74 func newMemoryDB() (*DB, error) { 75 db, err := leveldb.Open(storage.NewMemStorage(), nil) 76 if err != nil { 77 return nil, err 78 } 79 return &DB{lvl: db, quit: make(chan struct{})}, nil 80 } 81 82 // newPersistentNodeDB creates/opens a leveldb backed persistent node database, 83 // also flushing its contents in case of a version mismatch. 84 func newPersistentDB(path string) (*DB, error) { 85 opts := &opt.Options{OpenFilesCacheCapacity: 5} 86 db, err := leveldb.OpenFile(path, opts) 87 if _, iscorrupted := err.(*errors.ErrCorrupted); iscorrupted { 88 db, err = leveldb.RecoverFile(path, nil) 89 } 90 if err != nil { 91 return nil, err 92 } 93 // The nodes contained in the cache correspond to a certain protocol version. 94 // Flush all nodes if the version doesn't match. 95 currentVer := make([]byte, binary.MaxVarintLen64) 96 currentVer = currentVer[:binary.PutVarint(currentVer, int64(nodeDBVersion))] 97 98 blob, err := db.Get(nodeDBVersionKey, nil) 99 switch err { 100 case leveldb.ErrNotFound: 101 // Version not found (i.e. empty cache), insert it 102 if err := db.Put(nodeDBVersionKey, currentVer, nil); err != nil { 103 db.Close() 104 return nil, err 105 } 106 107 case nil: 108 // Version present, flush if different 109 if !bytes.Equal(blob, currentVer) { 110 db.Close() 111 if err = os.RemoveAll(path); err != nil { 112 return nil, err 113 } 114 return newPersistentDB(path) 115 } 116 } 117 return &DB{lvl: db, quit: make(chan struct{})}, nil 118 } 119 120 // makeKey generates the leveldb key-blob from a node id and its particular 121 // field of interest. 122 func makeKey(id ID, field string) []byte { 123 if bytes.Equal(id[:], nodeDBNilID[:]) { 124 return []byte(field) 125 } 126 return append(nodeDBItemPrefix, append(id[:], field...)...) 127 } 128 129 // splitKey tries to split a database key into a node id and a field part. 130 func splitKey(key []byte) (id ID, field string) { 131 // If the key is not of a node, return it plainly 132 if !bytes.HasPrefix(key, nodeDBItemPrefix) { 133 return ID{}, string(key) 134 } 135 // Otherwise split the id and field 136 item := key[len(nodeDBItemPrefix):] 137 copy(id[:], item[:len(id)]) 138 field = string(item[len(id):]) 139 140 return id, field 141 } 142 143 // fetchInt64 retrieves an integer instance associated with a particular 144 // database key. 145 func (db *DB) fetchInt64(key []byte) int64 { 146 blob, err := db.lvl.Get(key, nil) 147 if err != nil { 148 return 0 149 } 150 val, read := binary.Varint(blob) 151 if read <= 0 { 152 return 0 153 } 154 return val 155 } 156 157 // storeInt64 update a specific database entry to the current time instance as a 158 // unix timestamp. 159 func (db *DB) storeInt64(key []byte, n int64) error { 160 blob := make([]byte, binary.MaxVarintLen64) 161 blob = blob[:binary.PutVarint(blob, n)] 162 163 return db.lvl.Put(key, blob, nil) 164 } 165 166 // Node retrieves a node with a given id from the database. 167 func (db *DB) Node(id ID) *Node { 168 blob, err := db.lvl.Get(makeKey(id, nodeDBDiscoverRoot), nil) 169 if err != nil { 170 return nil 171 } 172 return mustDecodeNode(id[:], blob) 173 } 174 175 func mustDecodeNode(id, data []byte) *Node { 176 node := new(Node) 177 if err := rlp.DecodeBytes(data, &node.r); err != nil { 178 panic(fmt.Errorf("p2p/enode: can't decode node %x in DB: %v", id, err)) 179 } 180 // Restore node id cache. 181 copy(node.id[:], id) 182 return node 183 } 184 185 // UpdateNode inserts - potentially overwriting - a node into the peer database. 186 func (db *DB) UpdateNode(node *Node) error { 187 blob, err := rlp.EncodeToBytes(&node.r) 188 if err != nil { 189 return err 190 } 191 return db.lvl.Put(makeKey(node.ID(), nodeDBDiscoverRoot), blob, nil) 192 } 193 194 // DeleteNode deletes all information/keys associated with a node. 195 func (db *DB) DeleteNode(id ID) error { 196 deleter := db.lvl.NewIterator(util.BytesPrefix(makeKey(id, "")), nil) 197 for deleter.Next() { 198 if err := db.lvl.Delete(deleter.Key(), nil); err != nil { 199 return err 200 } 201 } 202 return nil 203 } 204 205 // ensureExpirer is a small helper method ensuring that the data expiration 206 // mechanism is running. If the expiration goroutine is already running, this 207 // method simply returns. 208 // 209 // The goal is to start the data evacuation only after the network successfully 210 // bootstrapped itself (to prevent dumping potentially useful seed nodes). Since 211 // it would require significant overhead to exactly trace the first successful 212 // convergence, it's simpler to "ensure" the correct state when an appropriate 213 // condition occurs (i.e. a successful bonding), and discard further events. 214 func (db *DB) ensureExpirer() { 215 db.runner.Do(func() { go db.expirer() }) 216 } 217 218 // expirer should be started in a go routine, and is responsible for looping ad 219 // infinitum and dropping stale data from the database. 220 func (db *DB) expirer() { 221 tick := time.NewTicker(nodeDBCleanupCycle) 222 defer tick.Stop() 223 for { 224 select { 225 case <-tick.C: 226 if err := db.expireNodes(); err != nil { 227 log.Error("Failed to expire nodedb items", "err", err) 228 } 229 case <-db.quit: 230 return 231 } 232 } 233 } 234 235 // expireNodes iterates over the database and deletes all nodes that have not 236 // been seen (i.e. received a pong from) for some allotted time. 237 func (db *DB) expireNodes() error { 238 threshold := time.Now().Add(-nodeDBNodeExpiration) 239 240 // Find discovered nodes that are older than the allowance 241 it := db.lvl.NewIterator(nil, nil) 242 defer it.Release() 243 244 for it.Next() { 245 // Skip the item if not a discovery node 246 id, field := splitKey(it.Key()) 247 if field != nodeDBDiscoverRoot { 248 continue 249 } 250 // Skip the node if not expired yet (and not self) 251 if seen := db.LastPongReceived(id); seen.After(threshold) { 252 continue 253 } 254 // Otherwise delete all associated information 255 db.DeleteNode(id) 256 } 257 return nil 258 } 259 260 // LastPingReceived retrieves the time of the last ping packet received from 261 // a remote node. 262 func (db *DB) LastPingReceived(id ID) time.Time { 263 return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPing)), 0) 264 } 265 266 // UpdateLastPingReceived updates the last time we tried contacting a remote node. 267 func (db *DB) UpdateLastPingReceived(id ID, instance time.Time) error { 268 return db.storeInt64(makeKey(id, nodeDBDiscoverPing), instance.Unix()) 269 } 270 271 // LastPongReceived retrieves the time of the last successful pong from remote node. 272 func (db *DB) LastPongReceived(id ID) time.Time { 273 // Launch expirer 274 db.ensureExpirer() 275 return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPong)), 0) 276 } 277 278 // UpdateLastPongReceived updates the last pong time of a node. 279 func (db *DB) UpdateLastPongReceived(id ID, instance time.Time) error { 280 return db.storeInt64(makeKey(id, nodeDBDiscoverPong), instance.Unix()) 281 } 282 283 // FindFails retrieves the number of findnode failures since bonding. 284 func (db *DB) FindFails(id ID) int { 285 return int(db.fetchInt64(makeKey(id, nodeDBDiscoverFindFails))) 286 } 287 288 // UpdateFindFails updates the number of findnode failures since bonding. 289 func (db *DB) UpdateFindFails(id ID, fails int) error { 290 return db.storeInt64(makeKey(id, nodeDBDiscoverFindFails), int64(fails)) 291 } 292 293 // QuerySeeds retrieves random nodes to be used as potential seed nodes 294 // for bootstrapping. 295 func (db *DB) QuerySeeds(n int, maxAge time.Duration) []*Node { 296 var ( 297 now = time.Now() 298 nodes = make([]*Node, 0, n) 299 it = db.lvl.NewIterator(nil, nil) 300 id ID 301 ) 302 defer it.Release() 303 304 seek: 305 for seeks := 0; len(nodes) < n && seeks < n*5; seeks++ { 306 // Seek to a random entry. The first byte is incremented by a 307 // random amount each time in order to increase the likelihood 308 // of hitting all existing nodes in very small databases. 309 ctr := id[0] 310 rand.Read(id[:]) 311 id[0] = ctr + id[0]%16 312 it.Seek(makeKey(id, nodeDBDiscoverRoot)) 313 314 n := nextNode(it) 315 if n == nil { 316 id[0] = 0 317 continue seek // iterator exhausted 318 } 319 if now.Sub(db.LastPongReceived(n.ID())) > maxAge { 320 continue seek 321 } 322 for i := range nodes { 323 if nodes[i].ID() == n.ID() { 324 continue seek // duplicate 325 } 326 } 327 nodes = append(nodes, n) 328 } 329 return nodes 330 } 331 332 // reads the next node record from the iterator, skipping over other 333 // database entries. 334 func nextNode(it iterator.Iterator) *Node { 335 for end := false; !end; end = !it.Next() { 336 id, field := splitKey(it.Key()) 337 if field != nodeDBDiscoverRoot { 338 continue 339 } 340 return mustDecodeNode(id[:], it.Value()) 341 } 342 return nil 343 } 344 345 // close flushes and closes the database files. 346 func (db *DB) Close() { 347 close(db.quit) 348 db.lvl.Close() 349 }