github.com/aquanetwork/aquachain@v1.7.8/p2p/discv5/table.go (about) 1 // Copyright 2016 The aquachain Authors 2 // This file is part of the aquachain library. 3 // 4 // The aquachain library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The aquachain library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the aquachain library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package discv5 implements the RLPx v5 Topic Discovery Protocol. 18 // 19 // The Topic Discovery protocol provides a way to find RLPx nodes that 20 // can be connected to. It uses a Kademlia-like protocol to maintain a 21 // distributed database of the IDs and endpoints of all listening 22 // nodes. 23 package discv5 24 25 import ( 26 "crypto/rand" 27 "encoding/binary" 28 "fmt" 29 "net" 30 "sort" 31 32 "gitlab.com/aquachain/aquachain/common" 33 ) 34 35 const ( 36 alpha = 3 // Kademlia concurrency factor 37 bucketSize = 16 // Kademlia bucket size 38 hashBits = len(common.Hash{}) * 8 39 nBuckets = hashBits + 1 // Number of buckets 40 41 maxBondingPingPongs = 16 42 maxFindnodeFailures = 5 43 ) 44 45 type Table struct { 46 count int // number of nodes 47 buckets [nBuckets]*bucket // index of known nodes by distance 48 nodeAddedHook func(*Node) // for testing 49 self *Node // metadata of the local node 50 } 51 52 // bucket contains nodes, ordered by their last activity. the entry 53 // that was most recently active is the first element in entries. 54 type bucket struct { 55 entries []*Node 56 replacements []*Node 57 } 58 59 func newTable(ourID NodeID, ourAddr *net.UDPAddr) *Table { 60 self := NewNode(ourID, ourAddr.IP, uint16(ourAddr.Port), uint16(ourAddr.Port)) 61 tab := &Table{self: self} 62 for i := range tab.buckets { 63 tab.buckets[i] = new(bucket) 64 } 65 return tab 66 } 67 68 const printTable = false 69 70 // chooseBucketRefreshTarget selects random refresh targets to keep all Kademlia 71 // buckets filled with live connections and keep the network topology healthy. 72 // This requires selecting addresses closer to our own with a higher probability 73 // in order to refresh closer buckets too. 74 // 75 // This algorithm approximates the distance distribution of existing nodes in the 76 // table by selecting a random node from the table and selecting a target address 77 // with a distance less than twice of that of the selected node. 78 // This algorithm will be improved later to specifically target the least recently 79 // used buckets. 80 func (tab *Table) chooseBucketRefreshTarget() common.Hash { 81 entries := 0 82 if printTable { 83 fmt.Println() 84 } 85 for i, b := range tab.buckets { 86 entries += len(b.entries) 87 if printTable { 88 for _, e := range b.entries { 89 fmt.Println(i, e.state, e.addr().String(), e.ID.String(), e.sha.Hex()) 90 } 91 } 92 } 93 94 prefix := binary.BigEndian.Uint64(tab.self.sha[0:8]) 95 dist := ^uint64(0) 96 entry := int(randUint(uint32(entries + 1))) 97 for _, b := range tab.buckets { 98 if entry < len(b.entries) { 99 n := b.entries[entry] 100 dist = binary.BigEndian.Uint64(n.sha[0:8]) ^ prefix 101 break 102 } 103 entry -= len(b.entries) 104 } 105 106 ddist := ^uint64(0) 107 if dist+dist > dist { 108 ddist = dist 109 } 110 targetPrefix := prefix ^ randUint64n(ddist) 111 112 var target common.Hash 113 binary.BigEndian.PutUint64(target[0:8], targetPrefix) 114 rand.Read(target[8:]) 115 return target 116 } 117 118 // readRandomNodes fills the given slice with random nodes from the 119 // table. It will not write the same node more than once. The nodes in 120 // the slice are copies and can be modified by the caller. 121 func (tab *Table) readRandomNodes(buf []*Node) (n int) { 122 // TODO: tree-based buckets would help here 123 // Find all non-empty buckets and get a fresh slice of their entries. 124 var buckets [][]*Node 125 for _, b := range tab.buckets { 126 if len(b.entries) > 0 { 127 buckets = append(buckets, b.entries[:]) 128 } 129 } 130 if len(buckets) == 0 { 131 return 0 132 } 133 // Shuffle the buckets. 134 for i := uint32(len(buckets)) - 1; i > 0; i-- { 135 j := randUint(i) 136 buckets[i], buckets[j] = buckets[j], buckets[i] 137 } 138 // Move head of each bucket into buf, removing buckets that become empty. 139 var i, j int 140 for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) { 141 b := buckets[j] 142 buf[i] = &(*b[0]) 143 buckets[j] = b[1:] 144 if len(b) == 1 { 145 buckets = append(buckets[:j], buckets[j+1:]...) 146 } 147 if len(buckets) == 0 { 148 break 149 } 150 } 151 return i + 1 152 } 153 154 func randUint(max uint32) uint32 { 155 if max < 2 { 156 return 0 157 } 158 var b [4]byte 159 rand.Read(b[:]) 160 return binary.BigEndian.Uint32(b[:]) % max 161 } 162 163 func randUint64n(max uint64) uint64 { 164 if max < 2 { 165 return 0 166 } 167 var b [8]byte 168 rand.Read(b[:]) 169 return binary.BigEndian.Uint64(b[:]) % max 170 } 171 172 // closest returns the n nodes in the table that are closest to the 173 // given id. The caller must hold tab.mutex. 174 func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance { 175 // This is a very wasteful way to find the closest nodes but 176 // obviously correct. I believe that tree-based buckets would make 177 // this easier to implement efficiently. 178 close := &nodesByDistance{target: target} 179 for _, b := range tab.buckets { 180 for _, n := range b.entries { 181 close.push(n, nresults) 182 } 183 } 184 return close 185 } 186 187 // add attempts to add the given node its corresponding bucket. If the 188 // bucket has space available, adding the node succeeds immediately. 189 // Otherwise, the node is added to the replacement cache for the bucket. 190 func (tab *Table) add(n *Node) (contested *Node) { 191 //fmt.Println("add", n.addr().String(), n.ID.String(), n.sha.Hex()) 192 if n.ID == tab.self.ID { 193 return 194 } 195 b := tab.buckets[logdist(tab.self.sha, n.sha)] 196 switch { 197 case b.bump(n): 198 // n exists in b. 199 return nil 200 case len(b.entries) < bucketSize: 201 // b has space available. 202 b.addFront(n) 203 tab.count++ 204 if tab.nodeAddedHook != nil { 205 tab.nodeAddedHook(n) 206 } 207 return nil 208 default: 209 // b has no space left, add to replacement cache 210 // and revalidate the last entry. 211 // TODO: drop previous node 212 b.replacements = append(b.replacements, n) 213 if len(b.replacements) > bucketSize { 214 copy(b.replacements, b.replacements[1:]) 215 b.replacements = b.replacements[:len(b.replacements)-1] 216 } 217 return b.entries[len(b.entries)-1] 218 } 219 } 220 221 // stuff adds nodes the table to the end of their corresponding bucket 222 // if the bucket is not full. 223 func (tab *Table) stuff(nodes []*Node) { 224 outer: 225 for _, n := range nodes { 226 if n.ID == tab.self.ID { 227 continue // don't add self 228 } 229 bucket := tab.buckets[logdist(tab.self.sha, n.sha)] 230 for i := range bucket.entries { 231 if bucket.entries[i].ID == n.ID { 232 continue outer // already in bucket 233 } 234 } 235 if len(bucket.entries) < bucketSize { 236 bucket.entries = append(bucket.entries, n) 237 tab.count++ 238 if tab.nodeAddedHook != nil { 239 tab.nodeAddedHook(n) 240 } 241 } 242 } 243 } 244 245 // delete removes an entry from the node table (used to evacuate 246 // failed/non-bonded discovery peers). 247 func (tab *Table) delete(node *Node) { 248 //fmt.Println("delete", node.addr().String(), node.ID.String(), node.sha.Hex()) 249 bucket := tab.buckets[logdist(tab.self.sha, node.sha)] 250 for i := range bucket.entries { 251 if bucket.entries[i].ID == node.ID { 252 bucket.entries = append(bucket.entries[:i], bucket.entries[i+1:]...) 253 tab.count-- 254 return 255 } 256 } 257 } 258 259 func (tab *Table) deleteReplace(node *Node) { 260 b := tab.buckets[logdist(tab.self.sha, node.sha)] 261 i := 0 262 for i < len(b.entries) { 263 if b.entries[i].ID == node.ID { 264 b.entries = append(b.entries[:i], b.entries[i+1:]...) 265 tab.count-- 266 } else { 267 i++ 268 } 269 } 270 // refill from replacement cache 271 // TODO: maybe use random index 272 if len(b.entries) < bucketSize && len(b.replacements) > 0 { 273 ri := len(b.replacements) - 1 274 b.addFront(b.replacements[ri]) 275 tab.count++ 276 b.replacements[ri] = nil 277 b.replacements = b.replacements[:ri] 278 } 279 } 280 281 func (b *bucket) addFront(n *Node) { 282 b.entries = append(b.entries, nil) 283 copy(b.entries[1:], b.entries) 284 b.entries[0] = n 285 } 286 287 func (b *bucket) bump(n *Node) bool { 288 for i := range b.entries { 289 if b.entries[i].ID == n.ID { 290 // move it to the front 291 copy(b.entries[1:], b.entries[:i]) 292 b.entries[0] = n 293 return true 294 } 295 } 296 return false 297 } 298 299 // nodesByDistance is a list of nodes, ordered by 300 // distance to target. 301 type nodesByDistance struct { 302 entries []*Node 303 target common.Hash 304 } 305 306 // push adds the given node to the list, keeping the total size below maxElems. 307 func (h *nodesByDistance) push(n *Node, maxElems int) { 308 ix := sort.Search(len(h.entries), func(i int) bool { 309 return distcmp(h.target, h.entries[i].sha, n.sha) > 0 310 }) 311 if len(h.entries) < maxElems { 312 h.entries = append(h.entries, n) 313 } 314 if ix == len(h.entries) { 315 // farther away than all nodes we already have. 316 // if there was room for it, the node is now the last element. 317 } else { 318 // slide existing entries down to make room 319 // this will overwrite the entry we just appended. 320 copy(h.entries[ix+1:], h.entries[ix:]) 321 h.entries[ix] = n 322 } 323 }