github.com/Bytom/bytom@v1.1.2-0.20210127130405-ae40204c0b09/p2p/discover/dht/database.go (about) 1 // Contains the node database, storing previously seen nodes and any collected 2 // metadata about them for QoS purposes. 3 4 package dht 5 6 import ( 7 "bytes" 8 "crypto/rand" 9 "encoding/binary" 10 "os" 11 "path" 12 "sync" 13 "time" 14 15 log "github.com/sirupsen/logrus" 16 wire "github.com/tendermint/go-wire" 17 18 "github.com/bytom/bytom/crypto" 19 dbm "github.com/bytom/bytom/database/leveldb" 20 "github.com/bytom/bytom/errors" 21 ) 22 23 var ( 24 nodeDBNilNodeID = NodeID{} // Special node ID to use as a nil element. 25 nodeDBNodeExpiration = 24 * time.Hour // Time after which an unseen node should be dropped. 26 nodeDBCleanupCycle = time.Hour // Time period for running the expiration task. 27 ) 28 29 // nodeDB stores all nodes we know about. 30 type nodeDB struct { 31 lvl dbm.DB // Interface to the database itself 32 self NodeID // Own node id to prevent adding it into the database 33 runner sync.Once // Ensures we can start at most one expirer 34 quit chan struct{} // Channel to signal the expiring thread to stop 35 } 36 37 // Schema layout for the node database 38 var ( 39 nodeDBVersionKey = []byte("version") // Version of the database to flush if changes 40 nodeDBItemPrefix = []byte("n:") // Identifier to prefix node entries with 41 42 nodeDBDiscoverRoot = ":discover" 43 nodeDBDiscoverPing = nodeDBDiscoverRoot + ":lastping" 44 nodeDBDiscoverPong = nodeDBDiscoverRoot + ":lastpong" 45 nodeDBDiscoverFindFails = nodeDBDiscoverRoot + ":findfail" 46 nodeDBTopicRegTickets = ":tickets" 47 ) 48 49 // newNodeDB creates a new node database for storing and retrieving infos about 50 // known peers in the network. If no path is given, an in-memory, temporary 51 // database is constructed. 52 func newNodeDB(path string, version int, self NodeID) (*nodeDB, error) { 53 if path == "" { 54 return newMemoryNodeDB(self), nil 55 } 56 return newPersistentNodeDB(path, version, self) 57 } 58 59 // newMemoryNodeDB creates a new in-memory node database without a persistent 60 // backend. 61 func newMemoryNodeDB(self NodeID) *nodeDB { 62 db := dbm.NewMemDB() 63 return &nodeDB{ 64 lvl: db, 65 self: self, 66 quit: make(chan struct{}), 67 } 68 } 69 70 // newPersistentNodeDB creates/opens a leveldb backed persistent node database, 71 // also flushing its contents in case of a version mismatch. 72 func newPersistentNodeDB(filePath string, version int, self NodeID) (*nodeDB, error) { 73 dir, file := path.Split(filePath) 74 if file == "" { 75 return nil, errors.New("unspecified db file name") 76 } 77 db := dbm.NewDB(file, dbm.GoLevelDBBackendStr, dir) 78 79 // The nodes contained in the cache correspond to a certain protocol version. 80 // Flush all nodes if the version doesn't match. 81 currentVer := make([]byte, binary.MaxVarintLen64) 82 currentVer = currentVer[:binary.PutVarint(currentVer, int64(version))] 83 84 blob := db.Get(nodeDBVersionKey) 85 if blob == nil { 86 db.Set(nodeDBVersionKey, currentVer) 87 } else if !bytes.Equal(blob, currentVer) { 88 db.Close() 89 if err := os.RemoveAll(filePath + ".db"); err != nil { 90 return nil, err 91 } 92 return newPersistentNodeDB(filePath, version, self) 93 } 94 95 return &nodeDB{ 96 lvl: db, 97 self: self, 98 quit: make(chan struct{}), 99 }, nil 100 } 101 102 // makeKey generates the leveldb key-blob from a node id and its particular 103 // field of interest. 104 func makeKey(id NodeID, field string) []byte { 105 if bytes.Equal(id[:], nodeDBNilNodeID[:]) { 106 return []byte(field) 107 } 108 return append(nodeDBItemPrefix, append(id[:], field...)...) 109 } 110 111 // splitKey tries to split a database key into a node id and a field part. 112 func splitKey(key []byte) (id NodeID, field string) { 113 // If the key is not of a node, return it plainly 114 if !bytes.HasPrefix(key, nodeDBItemPrefix) { 115 return NodeID{}, string(key) 116 } 117 // Otherwise split the id and field 118 item := key[len(nodeDBItemPrefix):] 119 copy(id[:], item[:len(id)]) 120 field = string(item[len(id):]) 121 122 return id, field 123 } 124 125 // fetchInt64 retrieves an integer instance associated with a particular 126 // database key. 127 func (db *nodeDB) fetchInt64(key []byte) int64 { 128 blob := db.lvl.Get(key) 129 if blob == nil { 130 return 0 131 } 132 val, read := binary.Varint(blob) 133 if read <= 0 { 134 return 0 135 } 136 return val 137 } 138 139 // storeInt64 update a specific database entry to the current time instance as a 140 // unix timestamp. 141 func (db *nodeDB) storeInt64(key []byte, n int64) { 142 blob := make([]byte, binary.MaxVarintLen64) 143 blob = blob[:binary.PutVarint(blob, n)] 144 db.lvl.Set(key, blob) 145 } 146 147 // node retrieves a node with a given id from the database. 148 func (db *nodeDB) node(id NodeID) *Node { 149 var ( 150 n = int(0) 151 node = new(Node) 152 ) 153 154 key := makeKey(id, nodeDBDiscoverRoot) 155 rawData := db.lvl.Get(key) 156 157 var err error 158 wire.ReadBinary(node, bytes.NewReader(rawData), 0, &n, &err) 159 if err != nil { 160 log.WithFields(log.Fields{"module": logModule, "key": key, "node": node, "error": err}).Warn("get node from db err") 161 return nil 162 } 163 164 node.sha = crypto.Sha256Hash(node.ID[:]) 165 return node 166 } 167 168 // updateNode inserts - potentially overwriting - a node into the peer database. 169 func (db *nodeDB) updateNode(node *Node) error { 170 var ( 171 n = int(0) 172 err = error(nil) 173 blob = new(bytes.Buffer) 174 ) 175 176 wire.WriteBinary(node, blob, &n, &err) 177 if err != nil { 178 return err 179 } 180 181 db.lvl.Set(makeKey(node.ID, nodeDBDiscoverRoot), blob.Bytes()) 182 return nil 183 } 184 185 // deleteNode deletes all information/keys associated with a node. 186 func (db *nodeDB) deleteNode(id NodeID) { 187 deleter := db.lvl.IteratorPrefix(makeKey(id, "")) 188 for deleter.Next() { 189 db.lvl.Delete(deleter.Key()) 190 } 191 } 192 193 // ensureExpirer is a small helper method ensuring that the data expiration 194 // mechanism is running. If the expiration goroutine is already running, this 195 // method simply returns. 196 // 197 // The goal is to start the data evacuation only after the network successfully 198 // bootstrapped itself (to prevent dumping potentially useful seed nodes). Since 199 // it would require significant overhead to exactly trace the first successful 200 // convergence, it's simpler to "ensure" the correct state when an appropriate 201 // condition occurs (i.e. a successful bonding), and discard further events. 202 func (db *nodeDB) ensureExpirer() { 203 db.runner.Do(func() { go db.expirer() }) 204 } 205 206 // expirer should be started in a go routine, and is responsible for looping ad 207 // infinitum and dropping stale data from the database. 208 func (db *nodeDB) expirer() { 209 tick := time.NewTicker(nodeDBCleanupCycle) 210 defer tick.Stop() 211 for { 212 select { 213 case <-tick.C: 214 if err := db.expireNodes(); err != nil { 215 log.WithFields(log.Fields{"module": logModule, "error": err}).Error("Failed to expire nodedb items") 216 } 217 case <-db.quit: 218 return 219 } 220 } 221 } 222 223 // expireNodes iterates over the database and deletes all nodes that have not 224 // been seen (i.e. received a pong from) for some allotted time. 225 func (db *nodeDB) expireNodes() error { 226 threshold := time.Now().Add(-nodeDBNodeExpiration) 227 228 // Find discovered nodes that are older than the allowance 229 it := db.lvl.Iterator() 230 defer it.Release() 231 232 for it.Next() { 233 // Skip the item if not a discovery node 234 id, field := splitKey(it.Key()) 235 if field != nodeDBDiscoverRoot { 236 continue 237 } 238 // Skip the node if not expired yet (and not self) 239 if !bytes.Equal(id[:], db.self[:]) { 240 if seen := db.lastPong(id); seen.After(threshold) { 241 continue 242 } 243 } 244 // Otherwise delete all associated information 245 db.deleteNode(id) 246 } 247 248 return nil 249 } 250 251 // lastPing retrieves the time of the last ping packet send to a remote node, 252 // requesting binding. 253 func (db *nodeDB) lastPing(id NodeID) time.Time { 254 return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPing)), 0) 255 } 256 257 // updateLastPing updates the last time we tried contacting a remote node. 258 func (db *nodeDB) updateLastPing(id NodeID, instance time.Time) { 259 db.storeInt64(makeKey(id, nodeDBDiscoverPing), instance.Unix()) 260 } 261 262 // lastPong retrieves the time of the last successful contact from remote node. 263 func (db *nodeDB) lastPong(id NodeID) time.Time { 264 return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPong)), 0) 265 } 266 267 // updateLastPong updates the last time a remote node successfully contacted. 268 func (db *nodeDB) updateLastPong(id NodeID, instance time.Time) { 269 db.storeInt64(makeKey(id, nodeDBDiscoverPong), instance.Unix()) 270 } 271 272 // findFails retrieves the number of findnode failures since bonding. 273 func (db *nodeDB) findFails(id NodeID) int { 274 return int(db.fetchInt64(makeKey(id, nodeDBDiscoverFindFails))) 275 } 276 277 // updateFindFails updates the number of findnode failures since bonding. 278 func (db *nodeDB) updateFindFails(id NodeID, fails int) { 279 db.storeInt64(makeKey(id, nodeDBDiscoverFindFails), int64(fails)) 280 } 281 282 // querySeeds retrieves random nodes to be used as potential seed nodes 283 // for bootstrapping. 284 func (db *nodeDB) querySeeds(n int, maxAge time.Duration) []*Node { 285 var ( 286 now = time.Now() 287 nodes = make([]*Node, 0, n) 288 it = db.lvl.Iterator() 289 id NodeID 290 ) 291 defer it.Release() 292 293 seek: 294 for seeks := 0; len(nodes) < n && seeks < n*5; seeks++ { 295 // Seek to a random entry. The first byte is incremented by a 296 // random amount each time in order to increase the likelihood 297 // of hitting all existing nodes in very small databases. 298 ctr := id[0] 299 if _, err := rand.Read(id[:]); err != nil { 300 log.WithFields(log.Fields{"module": logModule, "error": err}).Warn("get rand date") 301 } 302 id[0] = ctr + id[0]%16 303 it.Seek(makeKey(id, nodeDBDiscoverRoot)) 304 305 n := nextNode(it) 306 if n == nil { 307 id[0] = 0 308 continue seek // iterator exhausted 309 } 310 if n.ID == db.self { 311 continue seek 312 } 313 if now.Sub(db.lastPong(n.ID)) > maxAge { 314 continue seek 315 } 316 for i := range nodes { 317 if nodes[i].ID == n.ID { 318 continue seek // duplicate 319 } 320 } 321 nodes = append(nodes, n) 322 } 323 return nodes 324 } 325 326 func (db *nodeDB) fetchTopicRegTickets(id NodeID) (issued, used uint32) { 327 key := makeKey(id, nodeDBTopicRegTickets) 328 blob := db.lvl.Get(key) 329 330 if len(blob) != 8 { 331 return 0, 0 332 } 333 issued = binary.BigEndian.Uint32(blob[0:4]) 334 used = binary.BigEndian.Uint32(blob[4:8]) 335 return 336 } 337 338 func (db *nodeDB) updateTopicRegTickets(id NodeID, issued, used uint32) { 339 key := makeKey(id, nodeDBTopicRegTickets) 340 blob := make([]byte, 8) 341 binary.BigEndian.PutUint32(blob[0:4], issued) 342 binary.BigEndian.PutUint32(blob[4:8], used) 343 db.lvl.Set(key, blob) 344 } 345 346 // reads the next node record from the iterator, skipping over other 347 // database entries. 348 func nextNode(it dbm.Iterator) *Node { 349 var ( 350 n = int(0) 351 err = error(nil) 352 node = new(Node) 353 ) 354 355 for end := false; !end; end = !it.Next() { 356 id, field := splitKey(it.Key()) 357 if field != nodeDBDiscoverRoot { 358 continue 359 } 360 361 wire.ReadBinary(node, bytes.NewReader(it.Value()), 0, &n, &err) 362 if err != nil { 363 log.WithFields(log.Fields{"module": logModule, "id": id, "error": err}).Error("invalid node") 364 continue 365 } 366 367 return node 368 } 369 return nil 370 } 371 372 // close flushes and closes the database files. 373 func (db *nodeDB) close() { 374 close(db.quit) 375 db.lvl.Close() 376 }