github.com/sixexorg/magnetic-ring@v0.0.0-20191119090307-31705a21e419/p2pserver/discover/database.go (about) 1 package discover 2 3 import ( 4 "bytes" 5 "crypto/rand" 6 "encoding/binary" 7 "fmt" 8 "os" 9 "sync" 10 "time" 11 12 "github.com/ethereum/go-ethereum/crypto" 13 "github.com/ethereum/go-ethereum/log" 14 "github.com/syndtr/goleveldb/leveldb" 15 "github.com/syndtr/goleveldb/leveldb/errors" 16 "github.com/syndtr/goleveldb/leveldb/iterator" 17 "github.com/syndtr/goleveldb/leveldb/opt" 18 "github.com/syndtr/goleveldb/leveldb/storage" 19 "github.com/syndtr/goleveldb/leveldb/util" 20 "github.com/sixexorg/magnetic-ring/rlp" 21 // https://www.golangnote.com/topic/81.html 22 ) 23 24 var ( 25 nodeDBNilNodeID = NodeID{} // Special node ID to use as a nil element. 26 nodeDBNodeExpiration = 24 * time.Hour // Time after which an unseen node should be dropped. 27 nodeDBCleanupCycle = time.Hour // Time period for running the expiration task. 28 ) 29 30 // nodeDB stores all nodes we know about. 31 type nodeDB struct { 32 lvl *leveldb.DB // Interface to the database itself 33 self NodeID // Own node id to prevent adding it into the database 34 runner sync.Once // Ensures we can start at most one expirer 35 quit chan struct{} // Channel to signal the expiring thread to stop 36 } 37 38 // Schema layout for the node database 39 var ( 40 nodeDBVersionKey = []byte("version") // Version of the database to flush if changes 41 nodeDBItemPrefix = []byte("n:") // Identifier to prefix node entries with 42 43 nodeDBDiscoverRoot = ":discover" 44 nodeDBDiscoverPing = nodeDBDiscoverRoot + ":lastping" 45 nodeDBDiscoverPong = nodeDBDiscoverRoot + ":lastpong" 46 nodeDBDiscoverFindFails = nodeDBDiscoverRoot + ":findfail" 47 ) 48 49 // newNodeDB creates a new node database for storing and retrieving infos about 50 // known peers in the network. If no path is given, an in-memory, temporary 51 // database is constructed. 52 func newNodeDB(path string, version int, self NodeID) (*nodeDB, error) { 53 if path == "" { 54 return newMemoryNodeDB(self) 55 } 56 return newPersistentNodeDB(path, version, self) 57 } 58 59 // newMemoryNodeDB creates a new in-memory node database without a persistent 60 // backend. 61 func newMemoryNodeDB(self NodeID) (*nodeDB, error) { 62 db, err := leveldb.Open(storage.NewMemStorage(), nil) 63 if err != nil { 64 return nil, err 65 } 66 return &nodeDB{ 67 lvl: db, 68 self: self, 69 quit: make(chan struct{}), 70 }, nil 71 } 72 73 // newPersistentNodeDB creates/opens a leveldb backed persistent node database, 74 // also flushing its contents in case of a version mismatch. 75 func newPersistentNodeDB(path string, version int, self NodeID) (*nodeDB, error) { 76 fmt.Println(" ****** newPersistentNodeDB path:", path, "version:", version) 77 opts := &opt.Options{OpenFilesCacheCapacity: 5} 78 db, err := leveldb.OpenFile(path, opts) 79 if _, iscorrupted := err.(*errors.ErrCorrupted); iscorrupted { 80 db, err = leveldb.RecoverFile(path, nil) 81 } 82 if err != nil { 83 return nil, err 84 } 85 // The nodes contained in the cache correspond to a certain protocol version. 86 // Flush all nodes if the version doesn't match. 87 currentVer := make([]byte, binary.MaxVarintLen64) 88 currentVer = currentVer[:binary.PutVarint(currentVer, int64(version))] 89 90 blob, err := db.Get(nodeDBVersionKey, nil) 91 switch err { 92 case leveldb.ErrNotFound: 93 // Version not found (i.e. empty cache), insert it 94 if err := db.Put(nodeDBVersionKey, currentVer, nil); err != nil { 95 db.Close() 96 return nil, err 97 } 98 99 case nil: 100 // Version present, flush if different 101 if !bytes.Equal(blob, currentVer) { 102 db.Close() 103 if err = os.RemoveAll(path); err != nil { 104 return nil, err 105 } 106 return newPersistentNodeDB(path, version, self) 107 } 108 } 109 return &nodeDB{ 110 lvl: db, 111 self: self, 112 quit: make(chan struct{}), 113 }, nil 114 } 115 116 // makeKey generates the leveldb key-blob from a node id and its particular 117 // field of interest. 118 func makeKey(id NodeID, field string) []byte { 119 if bytes.Equal(id[:], nodeDBNilNodeID[:]) { 120 return []byte(field) 121 } 122 return append(nodeDBItemPrefix, append(id[:], field...)...) 123 } 124 125 // splitKey tries to split a database key into a node id and a field part. 126 func splitKey(key []byte) (id NodeID, field string) { 127 // If the key is not of a node, return it plainly 128 if !bytes.HasPrefix(key, nodeDBItemPrefix) { 129 return NodeID{}, string(key) 130 } 131 // Otherwise split the id and field 132 item := key[len(nodeDBItemPrefix):] 133 copy(id[:], item[:len(id)]) 134 field = string(item[len(id):]) 135 136 return id, field 137 } 138 139 // fetchInt64 retrieves an integer instance associated with a particular 140 // database key. 141 func (db *nodeDB) fetchInt64(key []byte) int64 { 142 blob, err := db.lvl.Get(key, nil) 143 if err != nil { 144 return 0 145 } 146 val, read := binary.Varint(blob) 147 if read <= 0 { 148 return 0 149 } 150 return val 151 } 152 153 // storeInt64 update a specific database entry to the current time instance as a 154 // unix timestamp. 155 func (db *nodeDB) storeInt64(key []byte, n int64) error { 156 blob := make([]byte, binary.MaxVarintLen64) 157 blob = blob[:binary.PutVarint(blob, n)] 158 159 return db.lvl.Put(key, blob, nil) 160 } 161 162 // node retrieves a node with a given id from the database. 163 func (db *nodeDB) node(id NodeID) *Node { 164 blob, err := db.lvl.Get(makeKey(id, nodeDBDiscoverRoot), nil) 165 if err != nil { 166 return nil 167 } 168 node := new(Node) 169 if err := rlp.DecodeBytes(blob, node); err != nil { 170 log.Error("Failed to decode node RLP", "err", err) 171 return nil 172 } 173 node.sha = crypto.Keccak256Hash(node.ID[:]) 174 return node 175 } 176 177 // updateNode inserts - potentially overwriting - a node into the peer database. 178 func (db *nodeDB) updateNode(node *Node) error { 179 blob, err := rlp.EncodeToBytes(node) 180 if err != nil { 181 return err 182 } 183 return db.lvl.Put(makeKey(node.ID, nodeDBDiscoverRoot), blob, nil) 184 } 185 186 // deleteNode deletes all information/keys associated with a node. 187 func (db *nodeDB) deleteNode(id NodeID) error { 188 deleter := db.lvl.NewIterator(util.BytesPrefix(makeKey(id, "")), nil) 189 for deleter.Next() { 190 if err := db.lvl.Delete(deleter.Key(), nil); err != nil { 191 return err 192 } 193 } 194 return nil 195 } 196 197 // ensureExpirer is a small helper method ensuring that the data expiration 198 // mechanism is running. If the expiration goroutine is already running, this 199 // method simply returns. 200 // 201 // The goal is to start the data evacuation only after the network successfully 202 // bootstrapped itself (to prevent dumping potentially useful seed nodes). Since 203 // it would require significant overhead to exactly trace the first successful 204 // convergence, it's simpler to "ensure" the correct state when an appropriate 205 // condition occurs (i.e. a successful bonding), and discard further events. 206 func (db *nodeDB) ensureExpirer() { 207 db.runner.Do(func() { go db.expirer() }) 208 } 209 210 // expirer should be started in a go routine, and is responsible for looping ad 211 // infinitum and dropping stale data from the database. 212 func (db *nodeDB) expirer() { 213 tick := time.Tick(nodeDBCleanupCycle) 214 for { 215 select { 216 case <-tick: 217 if err := db.expireNodes(); err != nil { 218 log.Error("Failed to expire nodedb items", "err", err) 219 } 220 221 case <-db.quit: 222 return 223 } 224 } 225 } 226 227 // expireNodes iterates over the database and deletes all nodes that have not 228 // been seen (i.e. received a pong from) for some allotted time. 229 func (db *nodeDB) expireNodes() error { 230 threshold := time.Now().Add(-nodeDBNodeExpiration) 231 232 // Find discovered nodes that are older than the allowance 233 it := db.lvl.NewIterator(nil, nil) 234 defer it.Release() 235 236 for it.Next() { 237 // Skip the item if not a discovery node 238 id, field := splitKey(it.Key()) 239 if field != nodeDBDiscoverRoot { 240 continue 241 } 242 // Skip the node if not expired yet (and not self) 243 if !bytes.Equal(id[:], db.self[:]) { 244 if seen := db.lastPong(id); seen.After(threshold) { 245 continue 246 } 247 } 248 // Otherwise delete all associated information 249 db.deleteNode(id) 250 } 251 return nil 252 } 253 254 // lastPing retrieves the time of the last ping packet send to a remote node, 255 // requesting binding. 256 func (db *nodeDB) lastPing(id NodeID) time.Time { 257 return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPing)), 0) 258 } 259 260 // updateLastPing updates the last time we tried contacting a remote node. 261 func (db *nodeDB) updateLastPing(id NodeID, instance time.Time) error { 262 return db.storeInt64(makeKey(id, nodeDBDiscoverPing), instance.Unix()) 263 } 264 265 // lastPong retrieves the time of the last successful contact from remote node. 266 func (db *nodeDB) lastPong(id NodeID) time.Time { 267 return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPong)), 0) 268 } 269 270 // updateLastPong updates the last time a remote node successfully contacted. 271 func (db *nodeDB) updateLastPong(id NodeID, instance time.Time) error { 272 return db.storeInt64(makeKey(id, nodeDBDiscoverPong), instance.Unix()) 273 } 274 275 // findFails retrieves the number of findnode failures since bonding. 276 func (db *nodeDB) findFails(id NodeID) int { 277 return int(db.fetchInt64(makeKey(id, nodeDBDiscoverFindFails))) 278 } 279 280 // updateFindFails updates the number of findnode failures since bonding. 281 func (db *nodeDB) updateFindFails(id NodeID, fails int) error { 282 return db.storeInt64(makeKey(id, nodeDBDiscoverFindFails), int64(fails)) 283 } 284 285 // querySeeds retrieves random nodes to be used as potential seed nodes 286 // for bootstrapping. 287 func (db *nodeDB) querySeeds(n int, maxAge time.Duration) []*Node { 288 var ( 289 now = time.Now() 290 nodes = make([]*Node, 0, n) 291 it = db.lvl.NewIterator(nil, nil) 292 id NodeID 293 ) 294 defer it.Release() 295 296 seek: 297 for seeks := 0; len(nodes) < n && seeks < n*5; seeks++ { 298 // Seek to a random entry. The first byte is incremented by a 299 // random amount each time in order to increase the likelihood 300 // of hitting all existing nodes in very small databases. 301 ctr := id[0] 302 rand.Read(id[:]) 303 id[0] = ctr + id[0]%16 304 it.Seek(makeKey(id, nodeDBDiscoverRoot)) 305 306 n := nextNode(it) 307 if n == nil { 308 id[0] = 0 309 continue seek // iterator exhausted 310 } 311 if n.ID == db.self { 312 continue seek 313 } 314 if now.Sub(db.lastPong(n.ID)) > maxAge { 315 continue seek 316 } 317 for i := range nodes { 318 if nodes[i].ID == n.ID { 319 continue seek // duplicate 320 } 321 } 322 nodes = append(nodes, n) 323 } 324 return nodes 325 } 326 327 // reads the next node record from the iterator, skipping over other 328 // database entries. 329 func nextNode(it iterator.Iterator) *Node { 330 for end := false; !end; end = !it.Next() { 331 id, field := splitKey(it.Key()) 332 if field != nodeDBDiscoverRoot { 333 continue 334 } 335 var n Node 336 if err := rlp.DecodeBytes(it.Value(), &n); err != nil { 337 log.Warn("Failed to decode node RLP", "id", id, "err", err) 338 continue 339 } 340 return &n 341 } 342 return nil 343 } 344 345 // close flushes and closes the database files. 346 func (db *nodeDB) close() { 347 close(db.quit) 348 db.lvl.Close() 349 }