github.com/aigarnetwork/aigar@v0.0.0-20191115204914-d59a6eb70f8e/p2p/discv5/database.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // Copyright 2019 The go-aigar Authors 3 // This file is part of the go-aigar library. 4 // 5 // The go-aigar library is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Lesser General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // The go-aigar library is distributed in the hope that it will be useful, 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Lesser General Public License for more details. 14 // 15 // You should have received a copy of the GNU Lesser General Public License 16 // along with the go-aigar library. If not, see <http://www.gnu.org/licenses/>. 17 18 // Contains the node database, storing previously seen nodes and any collected 19 // metadata about them for QoS purposes. 20 21 package discv5 22 23 import ( 24 "bytes" 25 "crypto/rand" 26 "encoding/binary" 27 "fmt" 28 "os" 29 "sync" 30 "time" 31 32 "github.com/AigarNetwork/aigar/crypto" 33 "github.com/AigarNetwork/aigar/log" 34 "github.com/AigarNetwork/aigar/rlp" 35 "github.com/syndtr/goleveldb/leveldb" 36 "github.com/syndtr/goleveldb/leveldb/errors" 37 "github.com/syndtr/goleveldb/leveldb/iterator" 38 "github.com/syndtr/goleveldb/leveldb/opt" 39 "github.com/syndtr/goleveldb/leveldb/storage" 40 "github.com/syndtr/goleveldb/leveldb/util" 41 ) 42 43 var ( 44 nodeDBNilNodeID = NodeID{} // Special node ID to use as a nil element. 45 nodeDBNodeExpiration = 24 * time.Hour // Time after which an unseen node should be dropped. 46 nodeDBCleanupCycle = time.Hour // Time period for running the expiration task. 47 ) 48 49 // nodeDB stores all nodes we know about. 50 type nodeDB struct { 51 lvl *leveldb.DB // Interface to the database itself 52 self NodeID // Own node id to prevent adding it into the database 53 runner sync.Once // Ensures we can start at most one expirer 54 quit chan struct{} // Channel to signal the expiring thread to stop 55 } 56 57 // Schema layout for the node database 58 var ( 59 nodeDBVersionKey = []byte("version") // Version of the database to flush if changes 60 nodeDBItemPrefix = []byte("n:") // Identifier to prefix node entries with 61 62 nodeDBDiscoverRoot = ":discover" 63 nodeDBDiscoverPing = nodeDBDiscoverRoot + ":lastping" 64 nodeDBDiscoverPong = nodeDBDiscoverRoot + ":lastpong" 65 nodeDBDiscoverFindFails = nodeDBDiscoverRoot + ":findfail" 66 nodeDBDiscoverLocalEndpoint = nodeDBDiscoverRoot + ":localendpoint" 67 nodeDBTopicRegTickets = ":tickets" 68 ) 69 70 // newNodeDB creates a new node database for storing and retrieving infos about 71 // known peers in the network. If no path is given, an in-memory, temporary 72 // database is constructed. 73 func newNodeDB(path string, version int, self NodeID) (*nodeDB, error) { 74 if path == "" { 75 return newMemoryNodeDB(self) 76 } 77 return newPersistentNodeDB(path, version, self) 78 } 79 80 // newMemoryNodeDB creates a new in-memory node database without a persistent 81 // backend. 82 func newMemoryNodeDB(self NodeID) (*nodeDB, error) { 83 db, err := leveldb.Open(storage.NewMemStorage(), nil) 84 if err != nil { 85 return nil, err 86 } 87 return &nodeDB{ 88 lvl: db, 89 self: self, 90 quit: make(chan struct{}), 91 }, nil 92 } 93 94 // newPersistentNodeDB creates/opens a leveldb backed persistent node database, 95 // also flushing its contents in case of a version mismatch. 96 func newPersistentNodeDB(path string, version int, self NodeID) (*nodeDB, error) { 97 opts := &opt.Options{OpenFilesCacheCapacity: 5} 98 db, err := leveldb.OpenFile(path, opts) 99 if _, iscorrupted := err.(*errors.ErrCorrupted); iscorrupted { 100 db, err = leveldb.RecoverFile(path, nil) 101 } 102 if err != nil { 103 return nil, err 104 } 105 // The nodes contained in the cache correspond to a certain protocol version. 106 // Flush all nodes if the version doesn't match. 107 currentVer := make([]byte, binary.MaxVarintLen64) 108 currentVer = currentVer[:binary.PutVarint(currentVer, int64(version))] 109 110 blob, err := db.Get(nodeDBVersionKey, nil) 111 switch err { 112 case leveldb.ErrNotFound: 113 // Version not found (i.e. empty cache), insert it 114 if err := db.Put(nodeDBVersionKey, currentVer, nil); err != nil { 115 db.Close() 116 return nil, err 117 } 118 119 case nil: 120 // Version present, flush if different 121 if !bytes.Equal(blob, currentVer) { 122 db.Close() 123 if err = os.RemoveAll(path); err != nil { 124 return nil, err 125 } 126 return newPersistentNodeDB(path, version, self) 127 } 128 } 129 return &nodeDB{ 130 lvl: db, 131 self: self, 132 quit: make(chan struct{}), 133 }, nil 134 } 135 136 // makeKey generates the leveldb key-blob from a node id and its particular 137 // field of interest. 138 func makeKey(id NodeID, field string) []byte { 139 if bytes.Equal(id[:], nodeDBNilNodeID[:]) { 140 return []byte(field) 141 } 142 return append(nodeDBItemPrefix, append(id[:], field...)...) 143 } 144 145 // splitKey tries to split a database key into a node id and a field part. 146 func splitKey(key []byte) (id NodeID, field string) { 147 // If the key is not of a node, return it plainly 148 if !bytes.HasPrefix(key, nodeDBItemPrefix) { 149 return NodeID{}, string(key) 150 } 151 // Otherwise split the id and field 152 item := key[len(nodeDBItemPrefix):] 153 copy(id[:], item[:len(id)]) 154 field = string(item[len(id):]) 155 156 return id, field 157 } 158 159 // fetchInt64 retrieves an integer instance associated with a particular 160 // database key. 161 func (db *nodeDB) fetchInt64(key []byte) int64 { 162 blob, err := db.lvl.Get(key, nil) 163 if err != nil { 164 return 0 165 } 166 val, read := binary.Varint(blob) 167 if read <= 0 { 168 return 0 169 } 170 return val 171 } 172 173 // storeInt64 update a specific database entry to the current time instance as a 174 // unix timestamp. 175 func (db *nodeDB) storeInt64(key []byte, n int64) error { 176 blob := make([]byte, binary.MaxVarintLen64) 177 blob = blob[:binary.PutVarint(blob, n)] 178 return db.lvl.Put(key, blob, nil) 179 } 180 181 func (db *nodeDB) storeRLP(key []byte, val interface{}) error { 182 blob, err := rlp.EncodeToBytes(val) 183 if err != nil { 184 return err 185 } 186 return db.lvl.Put(key, blob, nil) 187 } 188 189 func (db *nodeDB) fetchRLP(key []byte, val interface{}) error { 190 blob, err := db.lvl.Get(key, nil) 191 if err != nil { 192 return err 193 } 194 err = rlp.DecodeBytes(blob, val) 195 if err != nil { 196 log.Warn(fmt.Sprintf("key %x (%T) %v", key, val, err)) 197 } 198 return err 199 } 200 201 // node retrieves a node with a given id from the database. 202 func (db *nodeDB) node(id NodeID) *Node { 203 var node Node 204 if err := db.fetchRLP(makeKey(id, nodeDBDiscoverRoot), &node); err != nil { 205 return nil 206 } 207 node.sha = crypto.Keccak256Hash(node.ID[:]) 208 return &node 209 } 210 211 // updateNode inserts - potentially overwriting - a node into the peer database. 212 func (db *nodeDB) updateNode(node *Node) error { 213 return db.storeRLP(makeKey(node.ID, nodeDBDiscoverRoot), node) 214 } 215 216 // deleteNode deletes all information/keys associated with a node. 217 func (db *nodeDB) deleteNode(id NodeID) error { 218 deleter := db.lvl.NewIterator(util.BytesPrefix(makeKey(id, "")), nil) 219 for deleter.Next() { 220 if err := db.lvl.Delete(deleter.Key(), nil); err != nil { 221 return err 222 } 223 } 224 return nil 225 } 226 227 // ensureExpirer is a small helper method ensuring that the data expiration 228 // mechanism is running. If the expiration goroutine is already running, this 229 // method simply returns. 230 // 231 // The goal is to start the data evacuation only after the network successfully 232 // bootstrapped itself (to prevent dumping potentially useful seed nodes). Since 233 // it would require significant overhead to exactly trace the first successful 234 // convergence, it's simpler to "ensure" the correct state when an appropriate 235 // condition occurs (i.e. a successful bonding), and discard further events. 236 func (db *nodeDB) ensureExpirer() { 237 db.runner.Do(func() { go db.expirer() }) 238 } 239 240 // expirer should be started in a go routine, and is responsible for looping ad 241 // infinitum and dropping stale data from the database. 242 func (db *nodeDB) expirer() { 243 tick := time.NewTicker(nodeDBCleanupCycle) 244 defer tick.Stop() 245 for { 246 select { 247 case <-tick.C: 248 if err := db.expireNodes(); err != nil { 249 log.Error(fmt.Sprintf("Failed to expire nodedb items: %v", err)) 250 } 251 case <-db.quit: 252 return 253 } 254 } 255 } 256 257 // expireNodes iterates over the database and deletes all nodes that have not 258 // been seen (i.e. received a pong from) for some allotted time. 259 func (db *nodeDB) expireNodes() error { 260 threshold := time.Now().Add(-nodeDBNodeExpiration) 261 262 // Find discovered nodes that are older than the allowance 263 it := db.lvl.NewIterator(nil, nil) 264 defer it.Release() 265 266 for it.Next() { 267 // Skip the item if not a discovery node 268 id, field := splitKey(it.Key()) 269 if field != nodeDBDiscoverRoot { 270 continue 271 } 272 // Skip the node if not expired yet (and not self) 273 if !bytes.Equal(id[:], db.self[:]) { 274 if seen := db.lastPong(id); seen.After(threshold) { 275 continue 276 } 277 } 278 // Otherwise delete all associated information 279 db.deleteNode(id) 280 } 281 return nil 282 } 283 284 // lastPing retrieves the time of the last ping packet send to a remote node, 285 // requesting binding. 286 func (db *nodeDB) lastPing(id NodeID) time.Time { 287 return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPing)), 0) 288 } 289 290 // updateLastPing updates the last time we tried contacting a remote node. 291 func (db *nodeDB) updateLastPing(id NodeID, instance time.Time) error { 292 return db.storeInt64(makeKey(id, nodeDBDiscoverPing), instance.Unix()) 293 } 294 295 // lastPong retrieves the time of the last successful contact from remote node. 296 func (db *nodeDB) lastPong(id NodeID) time.Time { 297 return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPong)), 0) 298 } 299 300 // updateLastPong updates the last time a remote node successfully contacted. 301 func (db *nodeDB) updateLastPong(id NodeID, instance time.Time) error { 302 return db.storeInt64(makeKey(id, nodeDBDiscoverPong), instance.Unix()) 303 } 304 305 // findFails retrieves the number of findnode failures since bonding. 306 func (db *nodeDB) findFails(id NodeID) int { 307 return int(db.fetchInt64(makeKey(id, nodeDBDiscoverFindFails))) 308 } 309 310 // updateFindFails updates the number of findnode failures since bonding. 311 func (db *nodeDB) updateFindFails(id NodeID, fails int) error { 312 return db.storeInt64(makeKey(id, nodeDBDiscoverFindFails), int64(fails)) 313 } 314 315 // localEndpoint returns the last local endpoint communicated to the 316 // given remote node. 317 func (db *nodeDB) localEndpoint(id NodeID) *rpcEndpoint { 318 var ep rpcEndpoint 319 if err := db.fetchRLP(makeKey(id, nodeDBDiscoverLocalEndpoint), &ep); err != nil { 320 return nil 321 } 322 return &ep 323 } 324 325 func (db *nodeDB) updateLocalEndpoint(id NodeID, ep rpcEndpoint) error { 326 return db.storeRLP(makeKey(id, nodeDBDiscoverLocalEndpoint), &ep) 327 } 328 329 // querySeeds retrieves random nodes to be used as potential seed nodes 330 // for bootstrapping. 331 func (db *nodeDB) querySeeds(n int, maxAge time.Duration) []*Node { 332 var ( 333 now = time.Now() 334 nodes = make([]*Node, 0, n) 335 it = db.lvl.NewIterator(nil, nil) 336 id NodeID 337 ) 338 defer it.Release() 339 340 seek: 341 for seeks := 0; len(nodes) < n && seeks < n*5; seeks++ { 342 // Seek to a random entry. The first byte is incremented by a 343 // random amount each time in order to increase the likelihood 344 // of hitting all existing nodes in very small databases. 345 ctr := id[0] 346 rand.Read(id[:]) 347 id[0] = ctr + id[0]%16 348 it.Seek(makeKey(id, nodeDBDiscoverRoot)) 349 350 n := nextNode(it) 351 if n == nil { 352 id[0] = 0 353 continue seek // iterator exhausted 354 } 355 if n.ID == db.self { 356 continue seek 357 } 358 if now.Sub(db.lastPong(n.ID)) > maxAge { 359 continue seek 360 } 361 for i := range nodes { 362 if nodes[i].ID == n.ID { 363 continue seek // duplicate 364 } 365 } 366 nodes = append(nodes, n) 367 } 368 return nodes 369 } 370 371 func (db *nodeDB) fetchTopicRegTickets(id NodeID) (issued, used uint32) { 372 key := makeKey(id, nodeDBTopicRegTickets) 373 blob, _ := db.lvl.Get(key, nil) 374 if len(blob) != 8 { 375 return 0, 0 376 } 377 issued = binary.BigEndian.Uint32(blob[0:4]) 378 used = binary.BigEndian.Uint32(blob[4:8]) 379 return 380 } 381 382 func (db *nodeDB) updateTopicRegTickets(id NodeID, issued, used uint32) error { 383 key := makeKey(id, nodeDBTopicRegTickets) 384 blob := make([]byte, 8) 385 binary.BigEndian.PutUint32(blob[0:4], issued) 386 binary.BigEndian.PutUint32(blob[4:8], used) 387 return db.lvl.Put(key, blob, nil) 388 } 389 390 // reads the next node record from the iterator, skipping over other 391 // database entries. 392 func nextNode(it iterator.Iterator) *Node { 393 for end := false; !end; end = !it.Next() { 394 id, field := splitKey(it.Key()) 395 if field != nodeDBDiscoverRoot { 396 continue 397 } 398 var n Node 399 if err := rlp.DecodeBytes(it.Value(), &n); err != nil { 400 log.Warn(fmt.Sprintf("invalid node %x: %v", id, err)) 401 continue 402 } 403 return &n 404 } 405 return nil 406 } 407 408 // close flushes and closes the database files. 409 func (db *nodeDB) close() { 410 close(db.quit) 411 db.lvl.Close() 412 }