github.com/theQRL/go-zond@v0.1.1/p2p/discover/table.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package discover implements the Node Discovery Protocol. 18 // 19 // The Node Discovery protocol provides a way to find RLPx nodes that 20 // can be connected to. It uses a Kademlia-like protocol to maintain a 21 // distributed database of the IDs and endpoints of all listening 22 // nodes. 23 package discover 24 25 import ( 26 crand "crypto/rand" 27 "encoding/binary" 28 "fmt" 29 mrand "math/rand" 30 "net" 31 "sort" 32 "sync" 33 "time" 34 35 "github.com/theQRL/go-zond/common" 36 "github.com/theQRL/go-zond/log" 37 "github.com/theQRL/go-zond/metrics" 38 "github.com/theQRL/go-zond/p2p/enode" 39 "github.com/theQRL/go-zond/p2p/netutil" 40 ) 41 42 const ( 43 alpha = 3 // Kademlia concurrency factor 44 bucketSize = 16 // Kademlia bucket size 45 maxReplacements = 10 // Size of per-bucket replacement list 46 47 // We keep buckets for the upper 1/15 of distances because 48 // it's very unlikely we'll ever encounter a node that's closer. 49 hashBits = len(common.Hash{}) * 8 50 nBuckets = hashBits / 15 // Number of buckets 51 bucketMinDistance = hashBits - nBuckets // Log distance of closest bucket 52 53 // IP address limits. 54 bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24 55 tableIPLimit, tableSubnet = 10, 24 56 57 copyNodesInterval = 30 * time.Second 58 seedMinTableTime = 5 * time.Minute 59 seedCount = 30 60 seedMaxAge = 5 * 24 * time.Hour 61 ) 62 63 // Table is the 'node table', a Kademlia-like index of neighbor nodes. The table keeps 64 // itself up-to-date by verifying the liveness of neighbors and requesting their node 65 // records when announcements of a new record version are received. 66 type Table struct { 67 mutex sync.Mutex // protects buckets, bucket content, nursery, rand 68 buckets [nBuckets]*bucket // index of known nodes by distance 69 nursery []*node // bootstrap nodes 70 rand *mrand.Rand // source of randomness, periodically reseeded 71 ips netutil.DistinctNetSet 72 73 db *enode.DB // database of known nodes 74 net transport 75 cfg Config 76 log log.Logger 77 78 // loop channels 79 refreshReq chan chan struct{} 80 initDone chan struct{} 81 closeReq chan struct{} 82 closed chan struct{} 83 84 nodeAddedHook func(*bucket, *node) 85 nodeRemovedHook func(*bucket, *node) 86 } 87 88 // transport is implemented by the UDP transports. 89 type transport interface { 90 Self() *enode.Node 91 RequestENR(*enode.Node) (*enode.Node, error) 92 lookupRandom() []*enode.Node 93 lookupSelf() []*enode.Node 94 ping(*enode.Node) (seq uint64, err error) 95 } 96 97 // bucket contains nodes, ordered by their last activity. the entry 98 // that was most recently active is the first element in entries. 99 type bucket struct { 100 entries []*node // live entries, sorted by time of last contact 101 replacements []*node // recently seen nodes to be used if revalidation fails 102 ips netutil.DistinctNetSet 103 index int 104 } 105 106 func newTable(t transport, db *enode.DB, cfg Config) (*Table, error) { 107 cfg = cfg.withDefaults() 108 tab := &Table{ 109 net: t, 110 db: db, 111 cfg: cfg, 112 log: cfg.Log, 113 refreshReq: make(chan chan struct{}), 114 initDone: make(chan struct{}), 115 closeReq: make(chan struct{}), 116 closed: make(chan struct{}), 117 rand: mrand.New(mrand.NewSource(0)), 118 ips: netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit}, 119 } 120 if err := tab.setFallbackNodes(cfg.Bootnodes); err != nil { 121 return nil, err 122 } 123 for i := range tab.buckets { 124 tab.buckets[i] = &bucket{ 125 index: i, 126 ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit}, 127 } 128 } 129 tab.seedRand() 130 tab.loadSeedNodes() 131 132 return tab, nil 133 } 134 135 func newMeteredTable(t transport, db *enode.DB, cfg Config) (*Table, error) { 136 tab, err := newTable(t, db, cfg) 137 if err != nil { 138 return nil, err 139 } 140 if metrics.Enabled { 141 tab.nodeAddedHook = func(b *bucket, n *node) { 142 bucketsCounter[b.index].Inc(1) 143 } 144 tab.nodeRemovedHook = func(b *bucket, n *node) { 145 bucketsCounter[b.index].Dec(1) 146 } 147 } 148 return tab, nil 149 } 150 151 // Nodes returns all nodes contained in the table. 152 func (tab *Table) Nodes() []*enode.Node { 153 if !tab.isInitDone() { 154 return nil 155 } 156 157 tab.mutex.Lock() 158 defer tab.mutex.Unlock() 159 160 var nodes []*enode.Node 161 for _, b := range &tab.buckets { 162 for _, n := range b.entries { 163 nodes = append(nodes, unwrapNode(n)) 164 } 165 } 166 return nodes 167 } 168 169 func (tab *Table) self() *enode.Node { 170 return tab.net.Self() 171 } 172 173 func (tab *Table) seedRand() { 174 var b [8]byte 175 crand.Read(b[:]) 176 177 tab.mutex.Lock() 178 tab.rand.Seed(int64(binary.BigEndian.Uint64(b[:]))) 179 tab.mutex.Unlock() 180 } 181 182 // getNode returns the node with the given ID or nil if it isn't in the table. 183 func (tab *Table) getNode(id enode.ID) *enode.Node { 184 tab.mutex.Lock() 185 defer tab.mutex.Unlock() 186 187 b := tab.bucket(id) 188 for _, e := range b.entries { 189 if e.ID() == id { 190 return unwrapNode(e) 191 } 192 } 193 return nil 194 } 195 196 // close terminates the network listener and flushes the node database. 197 func (tab *Table) close() { 198 close(tab.closeReq) 199 <-tab.closed 200 } 201 202 // setFallbackNodes sets the initial points of contact. These nodes 203 // are used to connect to the network if the table is empty and there 204 // are no known nodes in the database. 205 func (tab *Table) setFallbackNodes(nodes []*enode.Node) error { 206 nursery := make([]*node, 0, len(nodes)) 207 for _, n := range nodes { 208 if err := n.ValidateComplete(); err != nil { 209 return fmt.Errorf("bad bootstrap node %q: %v", n, err) 210 } 211 if tab.cfg.NetRestrict != nil && !tab.cfg.NetRestrict.Contains(n.IP()) { 212 tab.log.Error("Bootstrap node filtered by netrestrict", "id", n.ID(), "ip", n.IP()) 213 continue 214 } 215 nursery = append(nursery, wrapNode(n)) 216 } 217 tab.nursery = nursery 218 return nil 219 } 220 221 // isInitDone returns whether the table's initial seeding procedure has completed. 222 func (tab *Table) isInitDone() bool { 223 select { 224 case <-tab.initDone: 225 return true 226 default: 227 return false 228 } 229 } 230 231 func (tab *Table) refresh() <-chan struct{} { 232 done := make(chan struct{}) 233 select { 234 case tab.refreshReq <- done: 235 case <-tab.closeReq: 236 close(done) 237 } 238 return done 239 } 240 241 // loop schedules runs of doRefresh, doRevalidate and copyLiveNodes. 242 func (tab *Table) loop() { 243 var ( 244 revalidate = time.NewTimer(tab.nextRevalidateTime()) 245 refresh = time.NewTimer(tab.nextRefreshTime()) 246 copyNodes = time.NewTicker(copyNodesInterval) 247 refreshDone = make(chan struct{}) // where doRefresh reports completion 248 revalidateDone chan struct{} // where doRevalidate reports completion 249 waiting = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs 250 ) 251 defer refresh.Stop() 252 defer revalidate.Stop() 253 defer copyNodes.Stop() 254 255 // Start initial refresh. 256 go tab.doRefresh(refreshDone) 257 258 loop: 259 for { 260 select { 261 case <-refresh.C: 262 tab.seedRand() 263 if refreshDone == nil { 264 refreshDone = make(chan struct{}) 265 go tab.doRefresh(refreshDone) 266 } 267 case req := <-tab.refreshReq: 268 waiting = append(waiting, req) 269 if refreshDone == nil { 270 refreshDone = make(chan struct{}) 271 go tab.doRefresh(refreshDone) 272 } 273 case <-refreshDone: 274 for _, ch := range waiting { 275 close(ch) 276 } 277 waiting, refreshDone = nil, nil 278 refresh.Reset(tab.nextRefreshTime()) 279 case <-revalidate.C: 280 revalidateDone = make(chan struct{}) 281 go tab.doRevalidate(revalidateDone) 282 case <-revalidateDone: 283 revalidate.Reset(tab.nextRevalidateTime()) 284 revalidateDone = nil 285 case <-copyNodes.C: 286 go tab.copyLiveNodes() 287 case <-tab.closeReq: 288 break loop 289 } 290 } 291 292 if refreshDone != nil { 293 <-refreshDone 294 } 295 for _, ch := range waiting { 296 close(ch) 297 } 298 if revalidateDone != nil { 299 <-revalidateDone 300 } 301 close(tab.closed) 302 } 303 304 // doRefresh performs a lookup for a random target to keep buckets full. seed nodes are 305 // inserted if the table is empty (initial bootstrap or discarded faulty peers). 306 func (tab *Table) doRefresh(done chan struct{}) { 307 defer close(done) 308 309 // Load nodes from the database and insert 310 // them. This should yield a few previously seen nodes that are 311 // (hopefully) still alive. 312 tab.loadSeedNodes() 313 314 // Run self lookup to discover new neighbor nodes. 315 tab.net.lookupSelf() 316 317 // The Kademlia paper specifies that the bucket refresh should 318 // perform a lookup in the least recently used bucket. We cannot 319 // adhere to this because the findnode target is a 512bit value 320 // (not hash-sized) and it is not easily possible to generate a 321 // sha3 preimage that falls into a chosen bucket. 322 // We perform a few lookups with a random target instead. 323 for i := 0; i < 3; i++ { 324 tab.net.lookupRandom() 325 } 326 } 327 328 func (tab *Table) loadSeedNodes() { 329 seeds := wrapNodes(tab.db.QuerySeeds(seedCount, seedMaxAge)) 330 seeds = append(seeds, tab.nursery...) 331 for i := range seeds { 332 seed := seeds[i] 333 age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID(), seed.IP())) }} 334 tab.log.Trace("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age) 335 tab.addSeenNode(seed) 336 } 337 } 338 339 // doRevalidate checks that the last node in a random bucket is still live and replaces or 340 // deletes the node if it isn't. 341 func (tab *Table) doRevalidate(done chan<- struct{}) { 342 defer func() { done <- struct{}{} }() 343 344 last, bi := tab.nodeToRevalidate() 345 if last == nil { 346 // No non-empty bucket found. 347 return 348 } 349 350 // Ping the selected node and wait for a pong. 351 remoteSeq, err := tab.net.ping(unwrapNode(last)) 352 353 // Also fetch record if the node replied and returned a higher sequence number. 354 if last.Seq() < remoteSeq { 355 n, err := tab.net.RequestENR(unwrapNode(last)) 356 if err != nil { 357 tab.log.Debug("ENR request failed", "id", last.ID(), "addr", last.addr(), "err", err) 358 } else { 359 last = &node{Node: *n, addedAt: last.addedAt, livenessChecks: last.livenessChecks} 360 } 361 } 362 363 tab.mutex.Lock() 364 defer tab.mutex.Unlock() 365 b := tab.buckets[bi] 366 if err == nil { 367 // The node responded, move it to the front. 368 last.livenessChecks++ 369 tab.log.Debug("Revalidated node", "b", bi, "id", last.ID(), "checks", last.livenessChecks) 370 tab.bumpInBucket(b, last) 371 return 372 } 373 // No reply received, pick a replacement or delete the node if there aren't 374 // any replacements. 375 if r := tab.replace(b, last); r != nil { 376 tab.log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks, "r", r.ID(), "rip", r.IP()) 377 } else { 378 tab.log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks) 379 } 380 } 381 382 // nodeToRevalidate returns the last node in a random, non-empty bucket. 383 func (tab *Table) nodeToRevalidate() (n *node, bi int) { 384 tab.mutex.Lock() 385 defer tab.mutex.Unlock() 386 387 for _, bi = range tab.rand.Perm(len(tab.buckets)) { 388 b := tab.buckets[bi] 389 if len(b.entries) > 0 { 390 last := b.entries[len(b.entries)-1] 391 return last, bi 392 } 393 } 394 return nil, 0 395 } 396 397 func (tab *Table) nextRevalidateTime() time.Duration { 398 tab.mutex.Lock() 399 defer tab.mutex.Unlock() 400 401 return time.Duration(tab.rand.Int63n(int64(tab.cfg.PingInterval))) 402 } 403 404 func (tab *Table) nextRefreshTime() time.Duration { 405 tab.mutex.Lock() 406 defer tab.mutex.Unlock() 407 408 half := tab.cfg.RefreshInterval / 2 409 return half + time.Duration(tab.rand.Int63n(int64(half))) 410 } 411 412 // copyLiveNodes adds nodes from the table to the database if they have been in the table 413 // longer than seedMinTableTime. 414 func (tab *Table) copyLiveNodes() { 415 tab.mutex.Lock() 416 defer tab.mutex.Unlock() 417 418 now := time.Now() 419 for _, b := range &tab.buckets { 420 for _, n := range b.entries { 421 if n.livenessChecks > 0 && now.Sub(n.addedAt) >= seedMinTableTime { 422 tab.db.UpdateNode(unwrapNode(n)) 423 } 424 } 425 } 426 } 427 428 // findnodeByID returns the n nodes in the table that are closest to the given id. 429 // This is used by the FINDNODE/v4 handler. 430 // 431 // The preferLive parameter says whether the caller wants liveness-checked results. If 432 // preferLive is true and the table contains any verified nodes, the result will not 433 // contain unverified nodes. However, if there are no verified nodes at all, the result 434 // will contain unverified nodes. 435 func (tab *Table) findnodeByID(target enode.ID, nresults int, preferLive bool) *nodesByDistance { 436 tab.mutex.Lock() 437 defer tab.mutex.Unlock() 438 439 // Scan all buckets. There might be a better way to do this, but there aren't that many 440 // buckets, so this solution should be fine. The worst-case complexity of this loop 441 // is O(tab.len() * nresults). 442 nodes := &nodesByDistance{target: target} 443 liveNodes := &nodesByDistance{target: target} 444 for _, b := range &tab.buckets { 445 for _, n := range b.entries { 446 nodes.push(n, nresults) 447 if preferLive && n.livenessChecks > 0 { 448 liveNodes.push(n, nresults) 449 } 450 } 451 } 452 453 if preferLive && len(liveNodes.entries) > 0 { 454 return liveNodes 455 } 456 return nodes 457 } 458 459 // len returns the number of nodes in the table. 460 func (tab *Table) len() (n int) { 461 tab.mutex.Lock() 462 defer tab.mutex.Unlock() 463 464 for _, b := range &tab.buckets { 465 n += len(b.entries) 466 } 467 return n 468 } 469 470 // bucketLen returns the number of nodes in the bucket for the given ID. 471 func (tab *Table) bucketLen(id enode.ID) int { 472 tab.mutex.Lock() 473 defer tab.mutex.Unlock() 474 475 return len(tab.bucket(id).entries) 476 } 477 478 // bucket returns the bucket for the given node ID hash. 479 func (tab *Table) bucket(id enode.ID) *bucket { 480 d := enode.LogDist(tab.self().ID(), id) 481 return tab.bucketAtDistance(d) 482 } 483 484 func (tab *Table) bucketAtDistance(d int) *bucket { 485 if d <= bucketMinDistance { 486 return tab.buckets[0] 487 } 488 return tab.buckets[d-bucketMinDistance-1] 489 } 490 491 // addSeenNode adds a node which may or may not be live to the end of a bucket. If the 492 // bucket has space available, adding the node succeeds immediately. Otherwise, the node is 493 // added to the replacements list. 494 // 495 // The caller must not hold tab.mutex. 496 func (tab *Table) addSeenNode(n *node) { 497 if n.ID() == tab.self().ID() { 498 return 499 } 500 501 tab.mutex.Lock() 502 defer tab.mutex.Unlock() 503 b := tab.bucket(n.ID()) 504 if contains(b.entries, n.ID()) { 505 // Already in bucket, don't add. 506 return 507 } 508 if len(b.entries) >= bucketSize { 509 // Bucket full, maybe add as replacement. 510 tab.addReplacement(b, n) 511 return 512 } 513 if !tab.addIP(b, n.IP()) { 514 // Can't add: IP limit reached. 515 return 516 } 517 518 // Add to end of bucket: 519 b.entries = append(b.entries, n) 520 b.replacements = deleteNode(b.replacements, n) 521 n.addedAt = time.Now() 522 523 if tab.nodeAddedHook != nil { 524 tab.nodeAddedHook(b, n) 525 } 526 } 527 528 // addVerifiedNode adds a node whose existence has been verified recently to the front of a 529 // bucket. If the node is already in the bucket, it is moved to the front. If the bucket 530 // has no space, the node is added to the replacements list. 531 // 532 // There is an additional safety measure: if the table is still initializing the node 533 // is not added. This prevents an attack where the table could be filled by just sending 534 // ping repeatedly. 535 // 536 // The caller must not hold tab.mutex. 537 func (tab *Table) addVerifiedNode(n *node) { 538 if !tab.isInitDone() { 539 return 540 } 541 if n.ID() == tab.self().ID() { 542 return 543 } 544 545 tab.mutex.Lock() 546 defer tab.mutex.Unlock() 547 b := tab.bucket(n.ID()) 548 if tab.bumpInBucket(b, n) { 549 // Already in bucket, moved to front. 550 return 551 } 552 if len(b.entries) >= bucketSize { 553 // Bucket full, maybe add as replacement. 554 tab.addReplacement(b, n) 555 return 556 } 557 if !tab.addIP(b, n.IP()) { 558 // Can't add: IP limit reached. 559 return 560 } 561 562 // Add to front of bucket. 563 b.entries, _ = pushNode(b.entries, n, bucketSize) 564 b.replacements = deleteNode(b.replacements, n) 565 n.addedAt = time.Now() 566 567 if tab.nodeAddedHook != nil { 568 tab.nodeAddedHook(b, n) 569 } 570 } 571 572 // delete removes an entry from the node table. It is used to evacuate dead nodes. 573 func (tab *Table) delete(node *node) { 574 tab.mutex.Lock() 575 defer tab.mutex.Unlock() 576 577 tab.deleteInBucket(tab.bucket(node.ID()), node) 578 } 579 580 func (tab *Table) addIP(b *bucket, ip net.IP) bool { 581 if len(ip) == 0 { 582 return false // Nodes without IP cannot be added. 583 } 584 if netutil.IsLAN(ip) { 585 return true 586 } 587 if !tab.ips.Add(ip) { 588 tab.log.Debug("IP exceeds table limit", "ip", ip) 589 return false 590 } 591 if !b.ips.Add(ip) { 592 tab.log.Debug("IP exceeds bucket limit", "ip", ip) 593 tab.ips.Remove(ip) 594 return false 595 } 596 return true 597 } 598 599 func (tab *Table) removeIP(b *bucket, ip net.IP) { 600 if netutil.IsLAN(ip) { 601 return 602 } 603 tab.ips.Remove(ip) 604 b.ips.Remove(ip) 605 } 606 607 func (tab *Table) addReplacement(b *bucket, n *node) { 608 for _, e := range b.replacements { 609 if e.ID() == n.ID() { 610 return // already in list 611 } 612 } 613 if !tab.addIP(b, n.IP()) { 614 return 615 } 616 var removed *node 617 b.replacements, removed = pushNode(b.replacements, n, maxReplacements) 618 if removed != nil { 619 tab.removeIP(b, removed.IP()) 620 } 621 } 622 623 // replace removes n from the replacement list and replaces 'last' with it if it is the 624 // last entry in the bucket. If 'last' isn't the last entry, it has either been replaced 625 // with someone else or became active. 626 func (tab *Table) replace(b *bucket, last *node) *node { 627 if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID() != last.ID() { 628 // Entry has moved, don't replace it. 629 return nil 630 } 631 // Still the last entry. 632 if len(b.replacements) == 0 { 633 tab.deleteInBucket(b, last) 634 return nil 635 } 636 r := b.replacements[tab.rand.Intn(len(b.replacements))] 637 b.replacements = deleteNode(b.replacements, r) 638 b.entries[len(b.entries)-1] = r 639 tab.removeIP(b, last.IP()) 640 return r 641 } 642 643 // bumpInBucket moves the given node to the front of the bucket entry list 644 // if it is contained in that list. 645 func (tab *Table) bumpInBucket(b *bucket, n *node) bool { 646 for i := range b.entries { 647 if b.entries[i].ID() == n.ID() { 648 if !n.IP().Equal(b.entries[i].IP()) { 649 // Endpoint has changed, ensure that the new IP fits into table limits. 650 tab.removeIP(b, b.entries[i].IP()) 651 if !tab.addIP(b, n.IP()) { 652 // It doesn't, put the previous one back. 653 tab.addIP(b, b.entries[i].IP()) 654 return false 655 } 656 } 657 // Move it to the front. 658 copy(b.entries[1:], b.entries[:i]) 659 b.entries[0] = n 660 return true 661 } 662 } 663 return false 664 } 665 666 func (tab *Table) deleteInBucket(b *bucket, n *node) { 667 // Check if the node is actually in the bucket so the removed hook 668 // isn't called multiple times for the same node. 669 if !contains(b.entries, n.ID()) { 670 return 671 } 672 b.entries = deleteNode(b.entries, n) 673 tab.removeIP(b, n.IP()) 674 if tab.nodeRemovedHook != nil { 675 tab.nodeRemovedHook(b, n) 676 } 677 } 678 679 func contains(ns []*node, id enode.ID) bool { 680 for _, n := range ns { 681 if n.ID() == id { 682 return true 683 } 684 } 685 return false 686 } 687 688 // pushNode adds n to the front of list, keeping at most max items. 689 func pushNode(list []*node, n *node, max int) ([]*node, *node) { 690 if len(list) < max { 691 list = append(list, nil) 692 } 693 removed := list[len(list)-1] 694 copy(list[1:], list) 695 list[0] = n 696 return list, removed 697 } 698 699 // deleteNode removes n from list. 700 func deleteNode(list []*node, n *node) []*node { 701 for i := range list { 702 if list[i].ID() == n.ID() { 703 return append(list[:i], list[i+1:]...) 704 } 705 } 706 return list 707 } 708 709 // nodesByDistance is a list of nodes, ordered by distance to target. 710 type nodesByDistance struct { 711 entries []*node 712 target enode.ID 713 } 714 715 // push adds the given node to the list, keeping the total size below maxElems. 716 func (h *nodesByDistance) push(n *node, maxElems int) { 717 ix := sort.Search(len(h.entries), func(i int) bool { 718 return enode.DistCmp(h.target, h.entries[i].ID(), n.ID()) > 0 719 }) 720 721 end := len(h.entries) 722 if len(h.entries) < maxElems { 723 h.entries = append(h.entries, n) 724 } 725 if ix < end { 726 // Slide existing entries down to make room. 727 // This will overwrite the entry we just appended. 728 copy(h.entries[ix+1:], h.entries[ix:]) 729 h.entries[ix] = n 730 } 731 }