github.com/keltia/go-ipfs@v0.3.8-0.20150909044612-210793031c63/routing/dht/dht_test.go (about)

     1  package dht
     2  
     3  import (
     4  	"bytes"
     5  	"fmt"
     6  	"math/rand"
     7  	"sort"
     8  	"sync"
     9  	"testing"
    10  	"time"
    11  
    12  	ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
    13  	dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync"
    14  	ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
    15  	context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
    16  
    17  	key "github.com/ipfs/go-ipfs/blocks/key"
    18  	peer "github.com/ipfs/go-ipfs/p2p/peer"
    19  	netutil "github.com/ipfs/go-ipfs/p2p/test/util"
    20  	routing "github.com/ipfs/go-ipfs/routing"
    21  	record "github.com/ipfs/go-ipfs/routing/record"
    22  	u "github.com/ipfs/go-ipfs/util"
    23  
    24  	ci "github.com/ipfs/go-ipfs/util/testutil/ci"
    25  	travisci "github.com/ipfs/go-ipfs/util/testutil/ci/travis"
    26  )
    27  
    28  var testCaseValues = map[key.Key][]byte{}
    29  
    30  func init() {
    31  	testCaseValues["hello"] = []byte("world")
    32  	for i := 0; i < 100; i++ {
    33  		k := fmt.Sprintf("%d -- key", i)
    34  		v := fmt.Sprintf("%d -- value", i)
    35  		testCaseValues[key.Key(k)] = []byte(v)
    36  	}
    37  }
    38  
    39  func setupDHT(ctx context.Context, t *testing.T) *IpfsDHT {
    40  	h := netutil.GenHostSwarm(t, ctx)
    41  
    42  	dss := dssync.MutexWrap(ds.NewMapDatastore())
    43  	d := NewDHT(ctx, h, dss)
    44  
    45  	d.Validator["v"] = &record.ValidChecker{
    46  		Func: func(key.Key, []byte) error {
    47  			return nil
    48  		},
    49  		Sign: false,
    50  	}
    51  	return d
    52  }
    53  
    54  func setupDHTS(ctx context.Context, n int, t *testing.T) ([]ma.Multiaddr, []peer.ID, []*IpfsDHT) {
    55  	addrs := make([]ma.Multiaddr, n)
    56  	dhts := make([]*IpfsDHT, n)
    57  	peers := make([]peer.ID, n)
    58  
    59  	for i := 0; i < n; i++ {
    60  		dhts[i] = setupDHT(ctx, t)
    61  		peers[i] = dhts[i].self
    62  		addrs[i] = dhts[i].peerstore.Addrs(dhts[i].self)[0]
    63  	}
    64  
    65  	return addrs, peers, dhts
    66  }
    67  
    68  func connect(t *testing.T, ctx context.Context, a, b *IpfsDHT) {
    69  
    70  	idB := b.self
    71  	addrB := b.peerstore.Addrs(idB)
    72  	if len(addrB) == 0 {
    73  		t.Fatal("peers setup incorrectly: no local address")
    74  	}
    75  
    76  	a.peerstore.AddAddrs(idB, addrB, peer.TempAddrTTL)
    77  	pi := peer.PeerInfo{ID: idB}
    78  	if err := a.host.Connect(ctx, pi); err != nil {
    79  		t.Fatal(err)
    80  	}
    81  
    82  	// loop until connection notification has been received.
    83  	// under high load, this may not happen as immediately as we would like.
    84  	for a.routingTable.Find(b.self) == "" {
    85  		time.Sleep(time.Millisecond * 5)
    86  	}
    87  
    88  	for b.routingTable.Find(a.self) == "" {
    89  		time.Sleep(time.Millisecond * 5)
    90  	}
    91  }
    92  
    93  func bootstrap(t *testing.T, ctx context.Context, dhts []*IpfsDHT) {
    94  
    95  	ctx, cancel := context.WithCancel(ctx)
    96  	log.Debugf("bootstrapping dhts...")
    97  
    98  	// tried async. sequential fares much better. compare:
    99  	// 100 async https://gist.github.com/jbenet/56d12f0578d5f34810b2
   100  	// 100 sync https://gist.github.com/jbenet/6c59e7c15426e48aaedd
   101  	// probably because results compound
   102  
   103  	var cfg BootstrapConfig
   104  	cfg = DefaultBootstrapConfig
   105  	cfg.Queries = 3
   106  
   107  	start := rand.Intn(len(dhts)) // randomize to decrease bias.
   108  	for i := range dhts {
   109  		dht := dhts[(start+i)%len(dhts)]
   110  		dht.runBootstrap(ctx, cfg)
   111  	}
   112  	cancel()
   113  }
   114  
   115  func TestValueGetSet(t *testing.T) {
   116  	// t.Skip("skipping test to debug another")
   117  
   118  	ctx := context.Background()
   119  
   120  	dhtA := setupDHT(ctx, t)
   121  	dhtB := setupDHT(ctx, t)
   122  
   123  	defer dhtA.Close()
   124  	defer dhtB.Close()
   125  	defer dhtA.host.Close()
   126  	defer dhtB.host.Close()
   127  
   128  	vf := &record.ValidChecker{
   129  		Func: func(key.Key, []byte) error {
   130  			return nil
   131  		},
   132  		Sign: false,
   133  	}
   134  	dhtA.Validator["v"] = vf
   135  	dhtB.Validator["v"] = vf
   136  
   137  	connect(t, ctx, dhtA, dhtB)
   138  
   139  	ctxT, _ := context.WithTimeout(ctx, time.Second)
   140  	dhtA.PutValue(ctxT, "/v/hello", []byte("world"))
   141  
   142  	ctxT, _ = context.WithTimeout(ctx, time.Second*2)
   143  	val, err := dhtA.GetValue(ctxT, "/v/hello")
   144  	if err != nil {
   145  		t.Fatal(err)
   146  	}
   147  
   148  	if string(val) != "world" {
   149  		t.Fatalf("Expected 'world' got '%s'", string(val))
   150  	}
   151  
   152  	ctxT, _ = context.WithTimeout(ctx, time.Second*2)
   153  	val, err = dhtB.GetValue(ctxT, "/v/hello")
   154  	if err != nil {
   155  		t.Fatal(err)
   156  	}
   157  
   158  	if string(val) != "world" {
   159  		t.Fatalf("Expected 'world' got '%s'", string(val))
   160  	}
   161  }
   162  
   163  func TestProvides(t *testing.T) {
   164  	// t.Skip("skipping test to debug another")
   165  	ctx := context.Background()
   166  
   167  	_, _, dhts := setupDHTS(ctx, 4, t)
   168  	defer func() {
   169  		for i := 0; i < 4; i++ {
   170  			dhts[i].Close()
   171  			defer dhts[i].host.Close()
   172  		}
   173  	}()
   174  
   175  	connect(t, ctx, dhts[0], dhts[1])
   176  	connect(t, ctx, dhts[1], dhts[2])
   177  	connect(t, ctx, dhts[1], dhts[3])
   178  
   179  	for k, v := range testCaseValues {
   180  		log.Debugf("adding local values for %s = %s", k, v)
   181  		sk := dhts[3].peerstore.PrivKey(dhts[3].self)
   182  		rec, err := record.MakePutRecord(sk, k, v, false)
   183  		if err != nil {
   184  			t.Fatal(err)
   185  		}
   186  
   187  		err = dhts[3].putLocal(k, rec)
   188  		if err != nil {
   189  			t.Fatal(err)
   190  		}
   191  
   192  		bits, err := dhts[3].getLocal(k)
   193  		if err != nil {
   194  			t.Fatal(err)
   195  		}
   196  		if !bytes.Equal(bits, v) {
   197  			t.Fatal("didn't store the right bits (%s, %s)", k, v)
   198  		}
   199  	}
   200  
   201  	for k := range testCaseValues {
   202  		log.Debugf("announcing provider for %s", k)
   203  		if err := dhts[3].Provide(ctx, k); err != nil {
   204  			t.Fatal(err)
   205  		}
   206  	}
   207  
   208  	// what is this timeout for? was 60ms before.
   209  	time.Sleep(time.Millisecond * 6)
   210  
   211  	n := 0
   212  	for k := range testCaseValues {
   213  		n = (n + 1) % 3
   214  
   215  		log.Debugf("getting providers for %s from %d", k, n)
   216  		ctxT, _ := context.WithTimeout(ctx, time.Second)
   217  		provchan := dhts[n].FindProvidersAsync(ctxT, k, 1)
   218  
   219  		select {
   220  		case prov := <-provchan:
   221  			if prov.ID == "" {
   222  				t.Fatal("Got back nil provider")
   223  			}
   224  			if prov.ID != dhts[3].self {
   225  				t.Fatal("Got back wrong provider")
   226  			}
   227  		case <-ctxT.Done():
   228  			t.Fatal("Did not get a provider back.")
   229  		}
   230  	}
   231  }
   232  
   233  // if minPeers or avgPeers is 0, dont test for it.
   234  func waitForWellFormedTables(t *testing.T, dhts []*IpfsDHT, minPeers, avgPeers int, timeout time.Duration) bool {
   235  	// test "well-formed-ness" (>= minPeers peers in every routing table)
   236  
   237  	checkTables := func() bool {
   238  		totalPeers := 0
   239  		for _, dht := range dhts {
   240  			rtlen := dht.routingTable.Size()
   241  			totalPeers += rtlen
   242  			if minPeers > 0 && rtlen < minPeers {
   243  				t.Logf("routing table for %s only has %d peers (should have >%d)", dht.self, rtlen, minPeers)
   244  				return false
   245  			}
   246  		}
   247  		actualAvgPeers := totalPeers / len(dhts)
   248  		t.Logf("avg rt size: %d", actualAvgPeers)
   249  		if avgPeers > 0 && actualAvgPeers < avgPeers {
   250  			t.Logf("avg rt size: %d < %d", actualAvgPeers, avgPeers)
   251  			return false
   252  		}
   253  		return true
   254  	}
   255  
   256  	timeoutA := time.After(timeout)
   257  	for {
   258  		select {
   259  		case <-timeoutA:
   260  			log.Debugf("did not reach well-formed routing tables by %s", timeout)
   261  			return false // failed
   262  		case <-time.After(5 * time.Millisecond):
   263  			if checkTables() {
   264  				return true // succeeded
   265  			}
   266  		}
   267  	}
   268  }
   269  
   270  func printRoutingTables(dhts []*IpfsDHT) {
   271  	// the routing tables should be full now. let's inspect them.
   272  	fmt.Println("checking routing table of %d", len(dhts))
   273  	for _, dht := range dhts {
   274  		fmt.Printf("checking routing table of %s\n", dht.self)
   275  		dht.routingTable.Print()
   276  		fmt.Println("")
   277  	}
   278  }
   279  
   280  func TestBootstrap(t *testing.T) {
   281  	// t.Skip("skipping test to debug another")
   282  	if testing.Short() {
   283  		t.SkipNow()
   284  	}
   285  
   286  	ctx := context.Background()
   287  
   288  	nDHTs := 30
   289  	_, _, dhts := setupDHTS(ctx, nDHTs, t)
   290  	defer func() {
   291  		for i := 0; i < nDHTs; i++ {
   292  			dhts[i].Close()
   293  			defer dhts[i].host.Close()
   294  		}
   295  	}()
   296  
   297  	t.Logf("connecting %d dhts in a ring", nDHTs)
   298  	for i := 0; i < nDHTs; i++ {
   299  		connect(t, ctx, dhts[i], dhts[(i+1)%len(dhts)])
   300  	}
   301  
   302  	<-time.After(100 * time.Millisecond)
   303  	// bootstrap a few times until we get good tables.
   304  	stop := make(chan struct{})
   305  	go func() {
   306  		for {
   307  			t.Logf("bootstrapping them so they find each other", nDHTs)
   308  			ctxT, _ := context.WithTimeout(ctx, 5*time.Second)
   309  			bootstrap(t, ctxT, dhts)
   310  
   311  			select {
   312  			case <-time.After(50 * time.Millisecond):
   313  				continue // being explicit
   314  			case <-stop:
   315  				return
   316  			}
   317  		}
   318  	}()
   319  
   320  	waitForWellFormedTables(t, dhts, 7, 10, 20*time.Second)
   321  	close(stop)
   322  
   323  	if u.Debug {
   324  		// the routing tables should be full now. let's inspect them.
   325  		printRoutingTables(dhts)
   326  	}
   327  }
   328  
   329  func TestPeriodicBootstrap(t *testing.T) {
   330  	// t.Skip("skipping test to debug another")
   331  	if ci.IsRunning() {
   332  		t.Skip("skipping on CI. highly timing dependent")
   333  	}
   334  	if testing.Short() {
   335  		t.SkipNow()
   336  	}
   337  
   338  	ctx := context.Background()
   339  
   340  	nDHTs := 30
   341  	_, _, dhts := setupDHTS(ctx, nDHTs, t)
   342  	defer func() {
   343  		for i := 0; i < nDHTs; i++ {
   344  			dhts[i].Close()
   345  			defer dhts[i].host.Close()
   346  		}
   347  	}()
   348  
   349  	// signal amplifier
   350  	amplify := func(signal chan time.Time, other []chan time.Time) {
   351  		for t := range signal {
   352  			for _, s := range other {
   353  				s <- t
   354  			}
   355  		}
   356  		for _, s := range other {
   357  			close(s)
   358  		}
   359  	}
   360  
   361  	signal := make(chan time.Time)
   362  	allSignals := []chan time.Time{}
   363  
   364  	var cfg BootstrapConfig
   365  	cfg = DefaultBootstrapConfig
   366  	cfg.Queries = 5
   367  
   368  	// kick off periodic bootstrappers with instrumented signals.
   369  	for _, dht := range dhts {
   370  		s := make(chan time.Time)
   371  		allSignals = append(allSignals, s)
   372  		dht.BootstrapOnSignal(cfg, s)
   373  	}
   374  	go amplify(signal, allSignals)
   375  
   376  	t.Logf("dhts are not connected.", nDHTs)
   377  	for _, dht := range dhts {
   378  		rtlen := dht.routingTable.Size()
   379  		if rtlen > 0 {
   380  			t.Errorf("routing table for %s should have 0 peers. has %d", dht.self, rtlen)
   381  		}
   382  	}
   383  
   384  	for i := 0; i < nDHTs; i++ {
   385  		connect(t, ctx, dhts[i], dhts[(i+1)%len(dhts)])
   386  	}
   387  
   388  	t.Logf("dhts are now connected to 1-2 others.", nDHTs)
   389  	for _, dht := range dhts {
   390  		rtlen := dht.routingTable.Size()
   391  		if rtlen > 2 {
   392  			t.Errorf("routing table for %s should have at most 2 peers. has %d", dht.self, rtlen)
   393  		}
   394  	}
   395  
   396  	if u.Debug {
   397  		printRoutingTables(dhts)
   398  	}
   399  
   400  	t.Logf("bootstrapping them so they find each other", nDHTs)
   401  	signal <- time.Now()
   402  
   403  	// this is async, and we dont know when it's finished with one cycle, so keep checking
   404  	// until the routing tables look better, or some long timeout for the failure case.
   405  	waitForWellFormedTables(t, dhts, 7, 10, 20*time.Second)
   406  
   407  	if u.Debug {
   408  		printRoutingTables(dhts)
   409  	}
   410  }
   411  
   412  func TestProvidesMany(t *testing.T) {
   413  	t.Skip("this test doesn't work")
   414  	// t.Skip("skipping test to debug another")
   415  	ctx := context.Background()
   416  
   417  	nDHTs := 40
   418  	_, _, dhts := setupDHTS(ctx, nDHTs, t)
   419  	defer func() {
   420  		for i := 0; i < nDHTs; i++ {
   421  			dhts[i].Close()
   422  			defer dhts[i].host.Close()
   423  		}
   424  	}()
   425  
   426  	t.Logf("connecting %d dhts in a ring", nDHTs)
   427  	for i := 0; i < nDHTs; i++ {
   428  		connect(t, ctx, dhts[i], dhts[(i+1)%len(dhts)])
   429  	}
   430  
   431  	<-time.After(100 * time.Millisecond)
   432  	t.Logf("bootstrapping them so they find each other", nDHTs)
   433  	ctxT, _ := context.WithTimeout(ctx, 20*time.Second)
   434  	bootstrap(t, ctxT, dhts)
   435  
   436  	if u.Debug {
   437  		// the routing tables should be full now. let's inspect them.
   438  		t.Logf("checking routing table of %d", nDHTs)
   439  		for _, dht := range dhts {
   440  			fmt.Printf("checking routing table of %s\n", dht.self)
   441  			dht.routingTable.Print()
   442  			fmt.Println("")
   443  		}
   444  	}
   445  
   446  	var providers = map[key.Key]peer.ID{}
   447  
   448  	d := 0
   449  	for k, v := range testCaseValues {
   450  		d = (d + 1) % len(dhts)
   451  		dht := dhts[d]
   452  		providers[k] = dht.self
   453  
   454  		t.Logf("adding local values for %s = %s (on %s)", k, v, dht.self)
   455  		rec, err := record.MakePutRecord(nil, k, v, false)
   456  		if err != nil {
   457  			t.Fatal(err)
   458  		}
   459  
   460  		err = dht.putLocal(k, rec)
   461  		if err != nil {
   462  			t.Fatal(err)
   463  		}
   464  
   465  		bits, err := dht.getLocal(k)
   466  		if err != nil {
   467  			t.Fatal(err)
   468  		}
   469  		if !bytes.Equal(bits, v) {
   470  			t.Fatal("didn't store the right bits (%s, %s)", k, v)
   471  		}
   472  
   473  		t.Logf("announcing provider for %s", k)
   474  		if err := dht.Provide(ctx, k); err != nil {
   475  			t.Fatal(err)
   476  		}
   477  	}
   478  
   479  	// what is this timeout for? was 60ms before.
   480  	time.Sleep(time.Millisecond * 6)
   481  
   482  	errchan := make(chan error)
   483  
   484  	ctxT, _ = context.WithTimeout(ctx, 5*time.Second)
   485  
   486  	var wg sync.WaitGroup
   487  	getProvider := func(dht *IpfsDHT, k key.Key) {
   488  		defer wg.Done()
   489  
   490  		expected := providers[k]
   491  
   492  		provchan := dht.FindProvidersAsync(ctxT, k, 1)
   493  		select {
   494  		case prov := <-provchan:
   495  			actual := prov.ID
   496  			if actual == "" {
   497  				errchan <- fmt.Errorf("Got back nil provider (%s at %s)", k, dht.self)
   498  			} else if actual != expected {
   499  				errchan <- fmt.Errorf("Got back wrong provider (%s != %s) (%s at %s)",
   500  					expected, actual, k, dht.self)
   501  			}
   502  		case <-ctxT.Done():
   503  			errchan <- fmt.Errorf("Did not get a provider back (%s at %s)", k, dht.self)
   504  		}
   505  	}
   506  
   507  	for k := range testCaseValues {
   508  		// everyone should be able to find it...
   509  		for _, dht := range dhts {
   510  			log.Debugf("getting providers for %s at %s", k, dht.self)
   511  			wg.Add(1)
   512  			go getProvider(dht, k)
   513  		}
   514  	}
   515  
   516  	// we need this because of printing errors
   517  	go func() {
   518  		wg.Wait()
   519  		close(errchan)
   520  	}()
   521  
   522  	for err := range errchan {
   523  		t.Error(err)
   524  	}
   525  }
   526  
   527  func TestProvidesAsync(t *testing.T) {
   528  	// t.Skip("skipping test to debug another")
   529  	if testing.Short() {
   530  		t.SkipNow()
   531  	}
   532  
   533  	ctx := context.Background()
   534  
   535  	_, _, dhts := setupDHTS(ctx, 4, t)
   536  	defer func() {
   537  		for i := 0; i < 4; i++ {
   538  			dhts[i].Close()
   539  			defer dhts[i].host.Close()
   540  		}
   541  	}()
   542  
   543  	connect(t, ctx, dhts[0], dhts[1])
   544  	connect(t, ctx, dhts[1], dhts[2])
   545  	connect(t, ctx, dhts[1], dhts[3])
   546  
   547  	k := key.Key("hello")
   548  	val := []byte("world")
   549  	sk := dhts[3].peerstore.PrivKey(dhts[3].self)
   550  	rec, err := record.MakePutRecord(sk, k, val, false)
   551  	if err != nil {
   552  		t.Fatal(err)
   553  	}
   554  
   555  	err = dhts[3].putLocal(k, rec)
   556  	if err != nil {
   557  		t.Fatal(err)
   558  	}
   559  
   560  	bits, err := dhts[3].getLocal(k)
   561  	if err != nil && bytes.Equal(bits, val) {
   562  		t.Fatal(err)
   563  	}
   564  
   565  	err = dhts[3].Provide(ctx, key.Key("hello"))
   566  	if err != nil {
   567  		t.Fatal(err)
   568  	}
   569  
   570  	time.Sleep(time.Millisecond * 60)
   571  
   572  	ctxT, _ := context.WithTimeout(ctx, time.Millisecond*300)
   573  	provs := dhts[0].FindProvidersAsync(ctxT, key.Key("hello"), 5)
   574  	select {
   575  	case p, ok := <-provs:
   576  		if !ok {
   577  			t.Fatal("Provider channel was closed...")
   578  		}
   579  		if p.ID == "" {
   580  			t.Fatal("Got back nil provider!")
   581  		}
   582  		if p.ID != dhts[3].self {
   583  			t.Fatalf("got a provider, but not the right one. %s", p)
   584  		}
   585  	case <-ctxT.Done():
   586  		t.Fatal("Didnt get back providers")
   587  	}
   588  }
   589  
   590  func TestLayeredGet(t *testing.T) {
   591  	// t.Skip("skipping test to debug another")
   592  	if testing.Short() {
   593  		t.SkipNow()
   594  	}
   595  
   596  	ctx := context.Background()
   597  
   598  	_, _, dhts := setupDHTS(ctx, 4, t)
   599  	defer func() {
   600  		for i := 0; i < 4; i++ {
   601  			dhts[i].Close()
   602  			defer dhts[i].host.Close()
   603  		}
   604  	}()
   605  
   606  	connect(t, ctx, dhts[0], dhts[1])
   607  	connect(t, ctx, dhts[1], dhts[2])
   608  	connect(t, ctx, dhts[1], dhts[3])
   609  
   610  	err := dhts[3].Provide(ctx, key.Key("/v/hello"))
   611  	if err != nil {
   612  		t.Fatal(err)
   613  	}
   614  
   615  	time.Sleep(time.Millisecond * 6)
   616  
   617  	t.Log("interface was changed. GetValue should not use providers.")
   618  	ctxT, _ := context.WithTimeout(ctx, time.Second)
   619  	val, err := dhts[0].GetValue(ctxT, key.Key("/v/hello"))
   620  	if err != routing.ErrNotFound {
   621  		t.Error(err)
   622  	}
   623  	if string(val) == "world" {
   624  		t.Error("should not get value.")
   625  	}
   626  	if len(val) > 0 && string(val) != "world" {
   627  		t.Error("worse, there's a value and its not even the right one.")
   628  	}
   629  }
   630  
   631  func TestFindPeer(t *testing.T) {
   632  	// t.Skip("skipping test to debug another")
   633  	if testing.Short() {
   634  		t.SkipNow()
   635  	}
   636  
   637  	ctx := context.Background()
   638  
   639  	_, peers, dhts := setupDHTS(ctx, 4, t)
   640  	defer func() {
   641  		for i := 0; i < 4; i++ {
   642  			dhts[i].Close()
   643  			dhts[i].host.Close()
   644  		}
   645  	}()
   646  
   647  	connect(t, ctx, dhts[0], dhts[1])
   648  	connect(t, ctx, dhts[1], dhts[2])
   649  	connect(t, ctx, dhts[1], dhts[3])
   650  
   651  	ctxT, _ := context.WithTimeout(ctx, time.Second)
   652  	p, err := dhts[0].FindPeer(ctxT, peers[2])
   653  	if err != nil {
   654  		t.Fatal(err)
   655  	}
   656  
   657  	if p.ID == "" {
   658  		t.Fatal("Failed to find peer.")
   659  	}
   660  
   661  	if p.ID != peers[2] {
   662  		t.Fatal("Didnt find expected peer.")
   663  	}
   664  }
   665  
   666  func TestFindPeersConnectedToPeer(t *testing.T) {
   667  	t.Skip("not quite correct (see note)")
   668  
   669  	if testing.Short() {
   670  		t.SkipNow()
   671  	}
   672  
   673  	ctx := context.Background()
   674  
   675  	_, peers, dhts := setupDHTS(ctx, 4, t)
   676  	defer func() {
   677  		for i := 0; i < 4; i++ {
   678  			dhts[i].Close()
   679  			dhts[i].host.Close()
   680  		}
   681  	}()
   682  
   683  	// topology:
   684  	// 0-1, 1-2, 1-3, 2-3
   685  	connect(t, ctx, dhts[0], dhts[1])
   686  	connect(t, ctx, dhts[1], dhts[2])
   687  	connect(t, ctx, dhts[1], dhts[3])
   688  	connect(t, ctx, dhts[2], dhts[3])
   689  
   690  	// fmt.Println("0 is", peers[0])
   691  	// fmt.Println("1 is", peers[1])
   692  	// fmt.Println("2 is", peers[2])
   693  	// fmt.Println("3 is", peers[3])
   694  
   695  	ctxT, _ := context.WithTimeout(ctx, time.Second)
   696  	pchan, err := dhts[0].FindPeersConnectedToPeer(ctxT, peers[2])
   697  	if err != nil {
   698  		t.Fatal(err)
   699  	}
   700  
   701  	// shouldFind := []peer.ID{peers[1], peers[3]}
   702  	found := []peer.PeerInfo{}
   703  	for nextp := range pchan {
   704  		found = append(found, nextp)
   705  	}
   706  
   707  	// fmt.Printf("querying 0 (%s) FindPeersConnectedToPeer 2 (%s)\n", peers[0], peers[2])
   708  	// fmt.Println("should find 1, 3", shouldFind)
   709  	// fmt.Println("found", found)
   710  
   711  	// testPeerListsMatch(t, shouldFind, found)
   712  
   713  	log.Warning("TestFindPeersConnectedToPeer is not quite correct")
   714  	if len(found) == 0 {
   715  		t.Fatal("didn't find any peers.")
   716  	}
   717  }
   718  
   719  func testPeerListsMatch(t *testing.T, p1, p2 []peer.ID) {
   720  
   721  	if len(p1) != len(p2) {
   722  		t.Fatal("did not find as many peers as should have", p1, p2)
   723  	}
   724  
   725  	ids1 := make([]string, len(p1))
   726  	ids2 := make([]string, len(p2))
   727  
   728  	for i, p := range p1 {
   729  		ids1[i] = string(p)
   730  	}
   731  
   732  	for i, p := range p2 {
   733  		ids2[i] = string(p)
   734  	}
   735  
   736  	sort.Sort(sort.StringSlice(ids1))
   737  	sort.Sort(sort.StringSlice(ids2))
   738  
   739  	for i := range ids1 {
   740  		if ids1[i] != ids2[i] {
   741  			t.Fatal("Didnt find expected peer", ids1[i], ids2)
   742  		}
   743  	}
   744  }
   745  
   746  func TestConnectCollision(t *testing.T) {
   747  	// t.Skip("skipping test to debug another")
   748  	if testing.Short() {
   749  		t.SkipNow()
   750  	}
   751  	if travisci.IsRunning() {
   752  		t.Skip("Skipping on Travis-CI.")
   753  	}
   754  
   755  	runTimes := 10
   756  
   757  	for rtime := 0; rtime < runTimes; rtime++ {
   758  		log.Info("Running Time: ", rtime)
   759  
   760  		ctx := context.Background()
   761  
   762  		dhtA := setupDHT(ctx, t)
   763  		dhtB := setupDHT(ctx, t)
   764  
   765  		addrA := dhtA.peerstore.Addrs(dhtA.self)[0]
   766  		addrB := dhtB.peerstore.Addrs(dhtB.self)[0]
   767  
   768  		peerA := dhtA.self
   769  		peerB := dhtB.self
   770  
   771  		errs := make(chan error)
   772  		go func() {
   773  			dhtA.peerstore.AddAddr(peerB, addrB, peer.TempAddrTTL)
   774  			pi := peer.PeerInfo{ID: peerB}
   775  			err := dhtA.host.Connect(ctx, pi)
   776  			errs <- err
   777  		}()
   778  		go func() {
   779  			dhtB.peerstore.AddAddr(peerA, addrA, peer.TempAddrTTL)
   780  			pi := peer.PeerInfo{ID: peerA}
   781  			err := dhtB.host.Connect(ctx, pi)
   782  			errs <- err
   783  		}()
   784  
   785  		timeout := time.After(5 * time.Second)
   786  		select {
   787  		case e := <-errs:
   788  			if e != nil {
   789  				t.Fatal(e)
   790  			}
   791  		case <-timeout:
   792  			t.Fatal("Timeout received!")
   793  		}
   794  		select {
   795  		case e := <-errs:
   796  			if e != nil {
   797  				t.Fatal(e)
   798  			}
   799  		case <-timeout:
   800  			t.Fatal("Timeout received!")
   801  		}
   802  
   803  		dhtA.Close()
   804  		dhtB.Close()
   805  		dhtA.host.Close()
   806  		dhtB.host.Close()
   807  	}
   808  }