github.com/aigarnetwork/aigar@v0.0.0-20191115204914-d59a6eb70f8e/p2p/discover/table_test.go (about)

     1  //  Copyright 2018 The go-ethereum Authors
     2  //  Copyright 2019 The go-aigar Authors
     3  //  This file is part of the go-aigar library.
     4  //
     5  //  The go-aigar library is free software: you can redistribute it and/or modify
     6  //  it under the terms of the GNU Lesser General Public License as published by
     7  //  the Free Software Foundation, either version 3 of the License, or
     8  //  (at your option) any later version.
     9  //
    10  //  The go-aigar library is distributed in the hope that it will be useful,
    11  //  but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  //  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  //  GNU Lesser General Public License for more details.
    14  //
    15  //  You should have received a copy of the GNU Lesser General Public License
    16  //  along with the go-aigar library. If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package discover
    19  
    20  import (
    21  	"crypto/ecdsa"
    22  	"fmt"
    23  	"math/rand"
    24  
    25  	"net"
    26  	"reflect"
    27  	"testing"
    28  	"testing/quick"
    29  	"time"
    30  
    31  	"github.com/AigarNetwork/aigar/crypto"
    32  	"github.com/AigarNetwork/aigar/p2p/enode"
    33  	"github.com/AigarNetwork/aigar/p2p/enr"
    34  	"github.com/AigarNetwork/aigar/p2p/netutil"
    35  )
    36  
    37  func TestTable_pingReplace(t *testing.T) {
    38  	run := func(newNodeResponding, lastInBucketResponding bool) {
    39  		name := fmt.Sprintf("newNodeResponding=%t/lastInBucketResponding=%t", newNodeResponding, lastInBucketResponding)
    40  		t.Run(name, func(t *testing.T) {
    41  			t.Parallel()
    42  			testPingReplace(t, newNodeResponding, lastInBucketResponding)
    43  		})
    44  	}
    45  
    46  	run(true, true)
    47  	run(false, true)
    48  	run(true, false)
    49  	run(false, false)
    50  }
    51  
    52  func testPingReplace(t *testing.T, newNodeIsResponding, lastInBucketIsResponding bool) {
    53  	transport := newPingRecorder()
    54  	tab, db := newTestTable(transport)
    55  	defer db.Close()
    56  	defer tab.close()
    57  
    58  	<-tab.initDone
    59  
    60  	// Fill up the sender's bucket.
    61  	pingKey, _ := crypto.HexToECDSA("45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8")
    62  	pingSender := wrapNode(enode.NewV4(&pingKey.PublicKey, net.IP{}, 99, 99))
    63  	last := fillBucket(tab, pingSender)
    64  
    65  	// Add the sender as if it just pinged us. Revalidate should replace the last node in
    66  	// its bucket if it is unresponsive. Revalidate again to ensure that
    67  	transport.dead[last.ID()] = !lastInBucketIsResponding
    68  	transport.dead[pingSender.ID()] = !newNodeIsResponding
    69  	tab.addSeenNode(pingSender)
    70  	tab.doRevalidate(make(chan struct{}, 1))
    71  	tab.doRevalidate(make(chan struct{}, 1))
    72  
    73  	if !transport.pinged[last.ID()] {
    74  		// Oldest node in bucket is pinged to see whether it is still alive.
    75  		t.Error("table did not ping last node in bucket")
    76  	}
    77  
    78  	tab.mutex.Lock()
    79  	defer tab.mutex.Unlock()
    80  	wantSize := bucketSize
    81  	if !lastInBucketIsResponding && !newNodeIsResponding {
    82  		wantSize--
    83  	}
    84  	if l := len(tab.bucket(pingSender.ID()).entries); l != wantSize {
    85  		t.Errorf("wrong bucket size after bond: got %d, want %d", l, wantSize)
    86  	}
    87  	if found := contains(tab.bucket(pingSender.ID()).entries, last.ID()); found != lastInBucketIsResponding {
    88  		t.Errorf("last entry found: %t, want: %t", found, lastInBucketIsResponding)
    89  	}
    90  	wantNewEntry := newNodeIsResponding && !lastInBucketIsResponding
    91  	if found := contains(tab.bucket(pingSender.ID()).entries, pingSender.ID()); found != wantNewEntry {
    92  		t.Errorf("new entry found: %t, want: %t", found, wantNewEntry)
    93  	}
    94  }
    95  
    96  func TestBucket_bumpNoDuplicates(t *testing.T) {
    97  	t.Parallel()
    98  	cfg := &quick.Config{
    99  		MaxCount: 1000,
   100  		Rand:     rand.New(rand.NewSource(time.Now().Unix())),
   101  		Values: func(args []reflect.Value, rand *rand.Rand) {
   102  			// generate a random list of nodes. this will be the content of the bucket.
   103  			n := rand.Intn(bucketSize-1) + 1
   104  			nodes := make([]*node, n)
   105  			for i := range nodes {
   106  				nodes[i] = nodeAtDistance(enode.ID{}, 200, intIP(200))
   107  			}
   108  			args[0] = reflect.ValueOf(nodes)
   109  			// generate random bump positions.
   110  			bumps := make([]int, rand.Intn(100))
   111  			for i := range bumps {
   112  				bumps[i] = rand.Intn(len(nodes))
   113  			}
   114  			args[1] = reflect.ValueOf(bumps)
   115  		},
   116  	}
   117  
   118  	prop := func(nodes []*node, bumps []int) (ok bool) {
   119  		tab, db := newTestTable(newPingRecorder())
   120  		defer db.Close()
   121  		defer tab.close()
   122  
   123  		b := &bucket{entries: make([]*node, len(nodes))}
   124  		copy(b.entries, nodes)
   125  		for i, pos := range bumps {
   126  			tab.bumpInBucket(b, b.entries[pos])
   127  			if hasDuplicates(b.entries) {
   128  				t.Logf("bucket has duplicates after %d/%d bumps:", i+1, len(bumps))
   129  				for _, n := range b.entries {
   130  					t.Logf("  %p", n)
   131  				}
   132  				return false
   133  			}
   134  		}
   135  		checkIPLimitInvariant(t, tab)
   136  		return true
   137  	}
   138  	if err := quick.Check(prop, cfg); err != nil {
   139  		t.Error(err)
   140  	}
   141  }
   142  
   143  // This checks that the table-wide IP limit is applied correctly.
   144  func TestTable_IPLimit(t *testing.T) {
   145  	transport := newPingRecorder()
   146  	tab, db := newTestTable(transport)
   147  	defer db.Close()
   148  	defer tab.close()
   149  
   150  	for i := 0; i < tableIPLimit+1; i++ {
   151  		n := nodeAtDistance(tab.self().ID(), i, net.IP{172, 0, 1, byte(i)})
   152  		tab.addSeenNode(n)
   153  	}
   154  	if tab.len() > tableIPLimit {
   155  		t.Errorf("too many nodes in table")
   156  	}
   157  	checkIPLimitInvariant(t, tab)
   158  }
   159  
   160  // This checks that the per-bucket IP limit is applied correctly.
   161  func TestTable_BucketIPLimit(t *testing.T) {
   162  	transport := newPingRecorder()
   163  	tab, db := newTestTable(transport)
   164  	defer db.Close()
   165  	defer tab.close()
   166  
   167  	d := 3
   168  	for i := 0; i < bucketIPLimit+1; i++ {
   169  		n := nodeAtDistance(tab.self().ID(), d, net.IP{172, 0, 1, byte(i)})
   170  		tab.addSeenNode(n)
   171  	}
   172  	if tab.len() > bucketIPLimit {
   173  		t.Errorf("too many nodes in table")
   174  	}
   175  	checkIPLimitInvariant(t, tab)
   176  }
   177  
   178  // checkIPLimitInvariant checks that ip limit sets contain an entry for every
   179  // node in the table and no extra entries.
   180  func checkIPLimitInvariant(t *testing.T, tab *Table) {
   181  	t.Helper()
   182  
   183  	tabset := netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit}
   184  	for _, b := range tab.buckets {
   185  		for _, n := range b.entries {
   186  			tabset.Add(n.IP())
   187  		}
   188  	}
   189  	if tabset.String() != tab.ips.String() {
   190  		t.Errorf("table IP set is incorrect:\nhave: %v\nwant: %v", tab.ips, tabset)
   191  	}
   192  }
   193  
   194  func TestTable_closest(t *testing.T) {
   195  	t.Parallel()
   196  
   197  	test := func(test *closeTest) bool {
   198  		// for any node table, Target and N
   199  		transport := newPingRecorder()
   200  		tab, db := newTestTable(transport)
   201  		defer db.Close()
   202  		defer tab.close()
   203  		fillTable(tab, test.All)
   204  
   205  		// check that closest(Target, N) returns nodes
   206  		result := tab.closest(test.Target, test.N, false).entries
   207  		if hasDuplicates(result) {
   208  			t.Errorf("result contains duplicates")
   209  			return false
   210  		}
   211  		if !sortedByDistanceTo(test.Target, result) {
   212  			t.Errorf("result is not sorted by distance to target")
   213  			return false
   214  		}
   215  
   216  		// check that the number of results is min(N, tablen)
   217  		wantN := test.N
   218  		if tlen := tab.len(); tlen < test.N {
   219  			wantN = tlen
   220  		}
   221  		if len(result) != wantN {
   222  			t.Errorf("wrong number of nodes: got %d, want %d", len(result), wantN)
   223  			return false
   224  		} else if len(result) == 0 {
   225  			return true // no need to check distance
   226  		}
   227  
   228  		// check that the result nodes have minimum distance to target.
   229  		for _, b := range tab.buckets {
   230  			for _, n := range b.entries {
   231  				if contains(result, n.ID()) {
   232  					continue // don't run the check below for nodes in result
   233  				}
   234  				farthestResult := result[len(result)-1].ID()
   235  				if enode.DistCmp(test.Target, n.ID(), farthestResult) < 0 {
   236  					t.Errorf("table contains node that is closer to target but it's not in result")
   237  					t.Logf("  Target:          %v", test.Target)
   238  					t.Logf("  Farthest Result: %v", farthestResult)
   239  					t.Logf("  ID:              %v", n.ID())
   240  					return false
   241  				}
   242  			}
   243  		}
   244  		return true
   245  	}
   246  	if err := quick.Check(test, quickcfg()); err != nil {
   247  		t.Error(err)
   248  	}
   249  }
   250  
   251  func TestTable_ReadRandomNodesGetAll(t *testing.T) {
   252  	cfg := &quick.Config{
   253  		MaxCount: 200,
   254  		Rand:     rand.New(rand.NewSource(time.Now().Unix())),
   255  		Values: func(args []reflect.Value, rand *rand.Rand) {
   256  			args[0] = reflect.ValueOf(make([]*enode.Node, rand.Intn(1000)))
   257  		},
   258  	}
   259  	test := func(buf []*enode.Node) bool {
   260  		transport := newPingRecorder()
   261  		tab, db := newTestTable(transport)
   262  		defer db.Close()
   263  		defer tab.close()
   264  		<-tab.initDone
   265  
   266  		for i := 0; i < len(buf); i++ {
   267  			ld := cfg.Rand.Intn(len(tab.buckets))
   268  			fillTable(tab, []*node{nodeAtDistance(tab.self().ID(), ld, intIP(ld))})
   269  		}
   270  		gotN := tab.ReadRandomNodes(buf)
   271  		if gotN != tab.len() {
   272  			t.Errorf("wrong number of nodes, got %d, want %d", gotN, tab.len())
   273  			return false
   274  		}
   275  		if hasDuplicates(wrapNodes(buf[:gotN])) {
   276  			t.Errorf("result contains duplicates")
   277  			return false
   278  		}
   279  		return true
   280  	}
   281  	if err := quick.Check(test, cfg); err != nil {
   282  		t.Error(err)
   283  	}
   284  }
   285  
   286  type closeTest struct {
   287  	Self   enode.ID
   288  	Target enode.ID
   289  	All    []*node
   290  	N      int
   291  }
   292  
   293  func (*closeTest) Generate(rand *rand.Rand, size int) reflect.Value {
   294  	t := &closeTest{
   295  		Self:   gen(enode.ID{}, rand).(enode.ID),
   296  		Target: gen(enode.ID{}, rand).(enode.ID),
   297  		N:      rand.Intn(bucketSize),
   298  	}
   299  	for _, id := range gen([]enode.ID{}, rand).([]enode.ID) {
   300  		r := new(enr.Record)
   301  		r.Set(enr.IP(genIP(rand)))
   302  		n := wrapNode(enode.SignNull(r, id))
   303  		n.livenessChecks = 1
   304  		t.All = append(t.All, n)
   305  	}
   306  	return reflect.ValueOf(t)
   307  }
   308  
   309  func TestTable_addVerifiedNode(t *testing.T) {
   310  	tab, db := newTestTable(newPingRecorder())
   311  	<-tab.initDone
   312  	defer db.Close()
   313  	defer tab.close()
   314  
   315  	// Insert two nodes.
   316  	n1 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 1})
   317  	n2 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 2})
   318  	tab.addSeenNode(n1)
   319  	tab.addSeenNode(n2)
   320  
   321  	// Verify bucket content:
   322  	bcontent := []*node{n1, n2}
   323  	if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, bcontent) {
   324  		t.Fatalf("wrong bucket content: %v", tab.bucket(n1.ID()).entries)
   325  	}
   326  
   327  	// Add a changed version of n2.
   328  	newrec := n2.Record()
   329  	newrec.Set(enr.IP{99, 99, 99, 99})
   330  	newn2 := wrapNode(enode.SignNull(newrec, n2.ID()))
   331  	tab.addVerifiedNode(newn2)
   332  
   333  	// Check that bucket is updated correctly.
   334  	newBcontent := []*node{newn2, n1}
   335  	if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, newBcontent) {
   336  		t.Fatalf("wrong bucket content after update: %v", tab.bucket(n1.ID()).entries)
   337  	}
   338  	checkIPLimitInvariant(t, tab)
   339  }
   340  
   341  func TestTable_addSeenNode(t *testing.T) {
   342  	tab, db := newTestTable(newPingRecorder())
   343  	<-tab.initDone
   344  	defer db.Close()
   345  	defer tab.close()
   346  
   347  	// Insert two nodes.
   348  	n1 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 1})
   349  	n2 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 2})
   350  	tab.addSeenNode(n1)
   351  	tab.addSeenNode(n2)
   352  
   353  	// Verify bucket content:
   354  	bcontent := []*node{n1, n2}
   355  	if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, bcontent) {
   356  		t.Fatalf("wrong bucket content: %v", tab.bucket(n1.ID()).entries)
   357  	}
   358  
   359  	// Add a changed version of n2.
   360  	newrec := n2.Record()
   361  	newrec.Set(enr.IP{99, 99, 99, 99})
   362  	newn2 := wrapNode(enode.SignNull(newrec, n2.ID()))
   363  	tab.addSeenNode(newn2)
   364  
   365  	// Check that bucket content is unchanged.
   366  	if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, bcontent) {
   367  		t.Fatalf("wrong bucket content after update: %v", tab.bucket(n1.ID()).entries)
   368  	}
   369  	checkIPLimitInvariant(t, tab)
   370  }
   371  
   372  // This test checks that ENR updates happen during revalidation. If a node in the table
   373  // announces a new sequence number, the new record should be pulled.
   374  func TestTable_revalidateSyncRecord(t *testing.T) {
   375  	transport := newPingRecorder()
   376  	tab, db := newTestTable(transport)
   377  	<-tab.initDone
   378  	defer db.Close()
   379  	defer tab.close()
   380  
   381  	// Insert a node.
   382  	var r enr.Record
   383  	r.Set(enr.IP(net.IP{127, 0, 0, 1}))
   384  	id := enode.ID{1}
   385  	n1 := wrapNode(enode.SignNull(&r, id))
   386  	tab.addSeenNode(n1)
   387  
   388  	// Update the node record.
   389  	r.Set(enr.WithEntry("foo", "bar"))
   390  	n2 := enode.SignNull(&r, id)
   391  	transport.updateRecord(n2)
   392  
   393  	tab.doRevalidate(make(chan struct{}, 1))
   394  	intable := tab.getNode(id)
   395  	if !reflect.DeepEqual(intable, n2) {
   396  		t.Fatalf("table contains old record with seq %d, want seq %d", intable.Seq(), n2.Seq())
   397  	}
   398  }
   399  
   400  // gen wraps quick.Value so it's easier to use.
   401  // it generates a random value of the given value's type.
   402  func gen(typ interface{}, rand *rand.Rand) interface{} {
   403  	v, ok := quick.Value(reflect.TypeOf(typ), rand)
   404  	if !ok {
   405  		panic(fmt.Sprintf("couldn't generate random value of type %T", typ))
   406  	}
   407  	return v.Interface()
   408  }
   409  
   410  func genIP(rand *rand.Rand) net.IP {
   411  	ip := make(net.IP, 4)
   412  	rand.Read(ip)
   413  	return ip
   414  }
   415  
   416  func quickcfg() *quick.Config {
   417  	return &quick.Config{
   418  		MaxCount: 5000,
   419  		Rand:     rand.New(rand.NewSource(time.Now().Unix())),
   420  	}
   421  }
   422  
   423  func newkey() *ecdsa.PrivateKey {
   424  	key, err := crypto.GenerateKey()
   425  	if err != nil {
   426  		panic("couldn't generate key: " + err.Error())
   427  	}
   428  	return key
   429  }