github.com/theQRL/go-zond@v0.1.1/p2p/discover/table_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package discover
    18  
    19  import (
    20  	"crypto/ecdsa"
    21  	"fmt"
    22  	"math/rand"
    23  
    24  	"net"
    25  	"reflect"
    26  	"testing"
    27  	"testing/quick"
    28  	"time"
    29  
    30  	"github.com/theQRL/go-zond/crypto"
    31  	"github.com/theQRL/go-zond/p2p/enode"
    32  	"github.com/theQRL/go-zond/p2p/enr"
    33  	"github.com/theQRL/go-zond/p2p/netutil"
    34  )
    35  
    36  func TestTable_pingReplace(t *testing.T) {
    37  	run := func(newNodeResponding, lastInBucketResponding bool) {
    38  		name := fmt.Sprintf("newNodeResponding=%t/lastInBucketResponding=%t", newNodeResponding, lastInBucketResponding)
    39  		t.Run(name, func(t *testing.T) {
    40  			t.Parallel()
    41  			testPingReplace(t, newNodeResponding, lastInBucketResponding)
    42  		})
    43  	}
    44  
    45  	run(true, true)
    46  	run(false, true)
    47  	run(true, false)
    48  	run(false, false)
    49  }
    50  
    51  func testPingReplace(t *testing.T, newNodeIsResponding, lastInBucketIsResponding bool) {
    52  	transport := newPingRecorder()
    53  	tab, db := newTestTable(transport)
    54  	defer db.Close()
    55  	defer tab.close()
    56  
    57  	<-tab.initDone
    58  
    59  	// Fill up the sender's bucket.
    60  	pingKey, _ := crypto.HexToECDSA("45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8")
    61  	pingSender := wrapNode(enode.NewV4(&pingKey.PublicKey, net.IP{127, 0, 0, 1}, 99, 99))
    62  	last := fillBucket(tab, pingSender)
    63  
    64  	// Add the sender as if it just pinged us. Revalidate should replace the last node in
    65  	// its bucket if it is unresponsive. Revalidate again to ensure that
    66  	transport.dead[last.ID()] = !lastInBucketIsResponding
    67  	transport.dead[pingSender.ID()] = !newNodeIsResponding
    68  	tab.addSeenNode(pingSender)
    69  	tab.doRevalidate(make(chan struct{}, 1))
    70  	tab.doRevalidate(make(chan struct{}, 1))
    71  
    72  	if !transport.pinged[last.ID()] {
    73  		// Oldest node in bucket is pinged to see whether it is still alive.
    74  		t.Error("table did not ping last node in bucket")
    75  	}
    76  
    77  	tab.mutex.Lock()
    78  	defer tab.mutex.Unlock()
    79  	wantSize := bucketSize
    80  	if !lastInBucketIsResponding && !newNodeIsResponding {
    81  		wantSize--
    82  	}
    83  	if l := len(tab.bucket(pingSender.ID()).entries); l != wantSize {
    84  		t.Errorf("wrong bucket size after bond: got %d, want %d", l, wantSize)
    85  	}
    86  	if found := contains(tab.bucket(pingSender.ID()).entries, last.ID()); found != lastInBucketIsResponding {
    87  		t.Errorf("last entry found: %t, want: %t", found, lastInBucketIsResponding)
    88  	}
    89  	wantNewEntry := newNodeIsResponding && !lastInBucketIsResponding
    90  	if found := contains(tab.bucket(pingSender.ID()).entries, pingSender.ID()); found != wantNewEntry {
    91  		t.Errorf("new entry found: %t, want: %t", found, wantNewEntry)
    92  	}
    93  }
    94  
    95  func TestBucket_bumpNoDuplicates(t *testing.T) {
    96  	t.Parallel()
    97  	cfg := &quick.Config{
    98  		MaxCount: 1000,
    99  		Rand:     rand.New(rand.NewSource(time.Now().Unix())),
   100  		Values: func(args []reflect.Value, rand *rand.Rand) {
   101  			// generate a random list of nodes. this will be the content of the bucket.
   102  			n := rand.Intn(bucketSize-1) + 1
   103  			nodes := make([]*node, n)
   104  			for i := range nodes {
   105  				nodes[i] = nodeAtDistance(enode.ID{}, 200, intIP(200))
   106  			}
   107  			args[0] = reflect.ValueOf(nodes)
   108  			// generate random bump positions.
   109  			bumps := make([]int, rand.Intn(100))
   110  			for i := range bumps {
   111  				bumps[i] = rand.Intn(len(nodes))
   112  			}
   113  			args[1] = reflect.ValueOf(bumps)
   114  		},
   115  	}
   116  
   117  	prop := func(nodes []*node, bumps []int) (ok bool) {
   118  		tab, db := newTestTable(newPingRecorder())
   119  		defer db.Close()
   120  		defer tab.close()
   121  
   122  		b := &bucket{entries: make([]*node, len(nodes))}
   123  		copy(b.entries, nodes)
   124  		for i, pos := range bumps {
   125  			tab.bumpInBucket(b, b.entries[pos])
   126  			if hasDuplicates(b.entries) {
   127  				t.Logf("bucket has duplicates after %d/%d bumps:", i+1, len(bumps))
   128  				for _, n := range b.entries {
   129  					t.Logf("  %p", n)
   130  				}
   131  				return false
   132  			}
   133  		}
   134  		checkIPLimitInvariant(t, tab)
   135  		return true
   136  	}
   137  	if err := quick.Check(prop, cfg); err != nil {
   138  		t.Error(err)
   139  	}
   140  }
   141  
   142  // This checks that the table-wide IP limit is applied correctly.
   143  func TestTable_IPLimit(t *testing.T) {
   144  	transport := newPingRecorder()
   145  	tab, db := newTestTable(transport)
   146  	defer db.Close()
   147  	defer tab.close()
   148  
   149  	for i := 0; i < tableIPLimit+1; i++ {
   150  		n := nodeAtDistance(tab.self().ID(), i, net.IP{172, 0, 1, byte(i)})
   151  		tab.addSeenNode(n)
   152  	}
   153  	if tab.len() > tableIPLimit {
   154  		t.Errorf("too many nodes in table")
   155  	}
   156  	checkIPLimitInvariant(t, tab)
   157  }
   158  
   159  // This checks that the per-bucket IP limit is applied correctly.
   160  func TestTable_BucketIPLimit(t *testing.T) {
   161  	transport := newPingRecorder()
   162  	tab, db := newTestTable(transport)
   163  	defer db.Close()
   164  	defer tab.close()
   165  
   166  	d := 3
   167  	for i := 0; i < bucketIPLimit+1; i++ {
   168  		n := nodeAtDistance(tab.self().ID(), d, net.IP{172, 0, 1, byte(i)})
   169  		tab.addSeenNode(n)
   170  	}
   171  	if tab.len() > bucketIPLimit {
   172  		t.Errorf("too many nodes in table")
   173  	}
   174  	checkIPLimitInvariant(t, tab)
   175  }
   176  
   177  // checkIPLimitInvariant checks that ip limit sets contain an entry for every
   178  // node in the table and no extra entries.
   179  func checkIPLimitInvariant(t *testing.T, tab *Table) {
   180  	t.Helper()
   181  
   182  	tabset := netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit}
   183  	for _, b := range tab.buckets {
   184  		for _, n := range b.entries {
   185  			tabset.Add(n.IP())
   186  		}
   187  	}
   188  	if tabset.String() != tab.ips.String() {
   189  		t.Errorf("table IP set is incorrect:\nhave: %v\nwant: %v", tab.ips, tabset)
   190  	}
   191  }
   192  
   193  func TestTable_findnodeByID(t *testing.T) {
   194  	t.Parallel()
   195  
   196  	test := func(test *closeTest) bool {
   197  		// for any node table, Target and N
   198  		transport := newPingRecorder()
   199  		tab, db := newTestTable(transport)
   200  		defer db.Close()
   201  		defer tab.close()
   202  		fillTable(tab, test.All)
   203  
   204  		// check that closest(Target, N) returns nodes
   205  		result := tab.findnodeByID(test.Target, test.N, false).entries
   206  		if hasDuplicates(result) {
   207  			t.Errorf("result contains duplicates")
   208  			return false
   209  		}
   210  		if !sortedByDistanceTo(test.Target, result) {
   211  			t.Errorf("result is not sorted by distance to target")
   212  			return false
   213  		}
   214  
   215  		// check that the number of results is min(N, tablen)
   216  		wantN := test.N
   217  		if tlen := tab.len(); tlen < test.N {
   218  			wantN = tlen
   219  		}
   220  		if len(result) != wantN {
   221  			t.Errorf("wrong number of nodes: got %d, want %d", len(result), wantN)
   222  			return false
   223  		} else if len(result) == 0 {
   224  			return true // no need to check distance
   225  		}
   226  
   227  		// check that the result nodes have minimum distance to target.
   228  		for _, b := range tab.buckets {
   229  			for _, n := range b.entries {
   230  				if contains(result, n.ID()) {
   231  					continue // don't run the check below for nodes in result
   232  				}
   233  				farthestResult := result[len(result)-1].ID()
   234  				if enode.DistCmp(test.Target, n.ID(), farthestResult) < 0 {
   235  					t.Errorf("table contains node that is closer to target but it's not in result")
   236  					t.Logf("  Target:          %v", test.Target)
   237  					t.Logf("  Farthest Result: %v", farthestResult)
   238  					t.Logf("  ID:              %v", n.ID())
   239  					return false
   240  				}
   241  			}
   242  		}
   243  		return true
   244  	}
   245  	if err := quick.Check(test, quickcfg()); err != nil {
   246  		t.Error(err)
   247  	}
   248  }
   249  
   250  type closeTest struct {
   251  	Self   enode.ID
   252  	Target enode.ID
   253  	All    []*node
   254  	N      int
   255  }
   256  
   257  func (*closeTest) Generate(rand *rand.Rand, size int) reflect.Value {
   258  	t := &closeTest{
   259  		Self:   gen(enode.ID{}, rand).(enode.ID),
   260  		Target: gen(enode.ID{}, rand).(enode.ID),
   261  		N:      rand.Intn(bucketSize),
   262  	}
   263  	for _, id := range gen([]enode.ID{}, rand).([]enode.ID) {
   264  		r := new(enr.Record)
   265  		r.Set(enr.IP(genIP(rand)))
   266  		n := wrapNode(enode.SignNull(r, id))
   267  		n.livenessChecks = 1
   268  		t.All = append(t.All, n)
   269  	}
   270  	return reflect.ValueOf(t)
   271  }
   272  
   273  func TestTable_addVerifiedNode(t *testing.T) {
   274  	tab, db := newTestTable(newPingRecorder())
   275  	<-tab.initDone
   276  	defer db.Close()
   277  	defer tab.close()
   278  
   279  	// Insert two nodes.
   280  	n1 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 1})
   281  	n2 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 2})
   282  	tab.addSeenNode(n1)
   283  	tab.addSeenNode(n2)
   284  
   285  	// Verify bucket content:
   286  	bcontent := []*node{n1, n2}
   287  	if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, bcontent) {
   288  		t.Fatalf("wrong bucket content: %v", tab.bucket(n1.ID()).entries)
   289  	}
   290  
   291  	// Add a changed version of n2.
   292  	newrec := n2.Record()
   293  	newrec.Set(enr.IP{99, 99, 99, 99})
   294  	newn2 := wrapNode(enode.SignNull(newrec, n2.ID()))
   295  	tab.addVerifiedNode(newn2)
   296  
   297  	// Check that bucket is updated correctly.
   298  	newBcontent := []*node{newn2, n1}
   299  	if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, newBcontent) {
   300  		t.Fatalf("wrong bucket content after update: %v", tab.bucket(n1.ID()).entries)
   301  	}
   302  	checkIPLimitInvariant(t, tab)
   303  }
   304  
   305  func TestTable_addSeenNode(t *testing.T) {
   306  	tab, db := newTestTable(newPingRecorder())
   307  	<-tab.initDone
   308  	defer db.Close()
   309  	defer tab.close()
   310  
   311  	// Insert two nodes.
   312  	n1 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 1})
   313  	n2 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 2})
   314  	tab.addSeenNode(n1)
   315  	tab.addSeenNode(n2)
   316  
   317  	// Verify bucket content:
   318  	bcontent := []*node{n1, n2}
   319  	if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, bcontent) {
   320  		t.Fatalf("wrong bucket content: %v", tab.bucket(n1.ID()).entries)
   321  	}
   322  
   323  	// Add a changed version of n2.
   324  	newrec := n2.Record()
   325  	newrec.Set(enr.IP{99, 99, 99, 99})
   326  	newn2 := wrapNode(enode.SignNull(newrec, n2.ID()))
   327  	tab.addSeenNode(newn2)
   328  
   329  	// Check that bucket content is unchanged.
   330  	if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, bcontent) {
   331  		t.Fatalf("wrong bucket content after update: %v", tab.bucket(n1.ID()).entries)
   332  	}
   333  	checkIPLimitInvariant(t, tab)
   334  }
   335  
   336  // This test checks that ENR updates happen during revalidation. If a node in the table
   337  // announces a new sequence number, the new record should be pulled.
   338  func TestTable_revalidateSyncRecord(t *testing.T) {
   339  	transport := newPingRecorder()
   340  	tab, db := newTestTable(transport)
   341  	<-tab.initDone
   342  	defer db.Close()
   343  	defer tab.close()
   344  
   345  	// Insert a node.
   346  	var r enr.Record
   347  	r.Set(enr.IP(net.IP{127, 0, 0, 1}))
   348  	id := enode.ID{1}
   349  	n1 := wrapNode(enode.SignNull(&r, id))
   350  	tab.addSeenNode(n1)
   351  
   352  	// Update the node record.
   353  	r.Set(enr.WithEntry("foo", "bar"))
   354  	n2 := enode.SignNull(&r, id)
   355  	transport.updateRecord(n2)
   356  
   357  	tab.doRevalidate(make(chan struct{}, 1))
   358  	intable := tab.getNode(id)
   359  	if !reflect.DeepEqual(intable, n2) {
   360  		t.Fatalf("table contains old record with seq %d, want seq %d", intable.Seq(), n2.Seq())
   361  	}
   362  }
   363  
   364  func TestNodesPush(t *testing.T) {
   365  	var target enode.ID
   366  	n1 := nodeAtDistance(target, 255, intIP(1))
   367  	n2 := nodeAtDistance(target, 254, intIP(2))
   368  	n3 := nodeAtDistance(target, 253, intIP(3))
   369  	perm := [][]*node{
   370  		{n3, n2, n1},
   371  		{n3, n1, n2},
   372  		{n2, n3, n1},
   373  		{n2, n1, n3},
   374  		{n1, n3, n2},
   375  		{n1, n2, n3},
   376  	}
   377  
   378  	// Insert all permutations into lists with size limit 3.
   379  	for _, nodes := range perm {
   380  		list := nodesByDistance{target: target}
   381  		for _, n := range nodes {
   382  			list.push(n, 3)
   383  		}
   384  		if !slicesEqual(list.entries, perm[0], nodeIDEqual) {
   385  			t.Fatal("not equal")
   386  		}
   387  	}
   388  
   389  	// Insert all permutations into lists with size limit 2.
   390  	for _, nodes := range perm {
   391  		list := nodesByDistance{target: target}
   392  		for _, n := range nodes {
   393  			list.push(n, 2)
   394  		}
   395  		if !slicesEqual(list.entries, perm[0][:2], nodeIDEqual) {
   396  			t.Fatal("not equal")
   397  		}
   398  	}
   399  }
   400  
   401  func nodeIDEqual(n1, n2 *node) bool {
   402  	return n1.ID() == n2.ID()
   403  }
   404  
   405  func slicesEqual[T any](s1, s2 []T, check func(e1, e2 T) bool) bool {
   406  	if len(s1) != len(s2) {
   407  		return false
   408  	}
   409  	for i := range s1 {
   410  		if !check(s1[i], s2[i]) {
   411  			return false
   412  		}
   413  	}
   414  	return true
   415  }
   416  
   417  // gen wraps quick.Value so it's easier to use.
   418  // it generates a random value of the given value's type.
   419  func gen(typ interface{}, rand *rand.Rand) interface{} {
   420  	v, ok := quick.Value(reflect.TypeOf(typ), rand)
   421  	if !ok {
   422  		panic(fmt.Sprintf("couldn't generate random value of type %T", typ))
   423  	}
   424  	return v.Interface()
   425  }
   426  
   427  func genIP(rand *rand.Rand) net.IP {
   428  	ip := make(net.IP, 4)
   429  	rand.Read(ip)
   430  	return ip
   431  }
   432  
   433  func quickcfg() *quick.Config {
   434  	return &quick.Config{
   435  		MaxCount: 5000,
   436  		Rand:     rand.New(rand.NewSource(time.Now().Unix())),
   437  	}
   438  }
   439  
   440  func newkey() *ecdsa.PrivateKey {
   441  	key, err := crypto.GenerateKey()
   442  	if err != nil {
   443  		panic("couldn't generate key: " + err.Error())
   444  	}
   445  	return key
   446  }