github.com/keltia/go-ipfs@v0.3.8-0.20150909044612-210793031c63/routing/dht/ext_test.go (about)

     1  package dht
     2  
     3  import (
     4  	"io"
     5  	"io/ioutil"
     6  	"math/rand"
     7  	"testing"
     8  	"time"
     9  
    10  	ggio "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/io"
    11  	ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
    12  	dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync"
    13  	context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
    14  
    15  	key "github.com/ipfs/go-ipfs/blocks/key"
    16  	inet "github.com/ipfs/go-ipfs/p2p/net"
    17  	mocknet "github.com/ipfs/go-ipfs/p2p/net/mock"
    18  	peer "github.com/ipfs/go-ipfs/p2p/peer"
    19  	routing "github.com/ipfs/go-ipfs/routing"
    20  	pb "github.com/ipfs/go-ipfs/routing/dht/pb"
    21  	record "github.com/ipfs/go-ipfs/routing/record"
    22  	u "github.com/ipfs/go-ipfs/util"
    23  )
    24  
    25  func TestGetFailures(t *testing.T) {
    26  	if testing.Short() {
    27  		t.SkipNow()
    28  	}
    29  
    30  	ctx := context.Background()
    31  	mn, err := mocknet.FullMeshConnected(ctx, 2)
    32  	if err != nil {
    33  		t.Fatal(err)
    34  	}
    35  	hosts := mn.Hosts()
    36  
    37  	tsds := dssync.MutexWrap(ds.NewMapDatastore())
    38  	d := NewDHT(ctx, hosts[0], tsds)
    39  	d.Update(ctx, hosts[1].ID())
    40  
    41  	// Reply with failures to every message
    42  	hosts[1].SetStreamHandler(ProtocolDHT, func(s inet.Stream) {
    43  		defer s.Close()
    44  		io.Copy(ioutil.Discard, s)
    45  	})
    46  
    47  	// This one should time out
    48  	ctx1, _ := context.WithTimeout(context.Background(), 200*time.Millisecond)
    49  	if _, err := d.GetValue(ctx1, key.Key("test")); err != nil {
    50  		if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 {
    51  			err = merr[0]
    52  		}
    53  
    54  		if err.Error() != "process closing" {
    55  			t.Fatal("Got different error than we expected", err)
    56  		}
    57  	} else {
    58  		t.Fatal("Did not get expected error!")
    59  	}
    60  
    61  	t.Log("Timeout test passed.")
    62  
    63  	// Reply with failures to every message
    64  	hosts[1].SetStreamHandler(ProtocolDHT, func(s inet.Stream) {
    65  		defer s.Close()
    66  
    67  		pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax)
    68  		pbw := ggio.NewDelimitedWriter(s)
    69  
    70  		pmes := new(pb.Message)
    71  		if err := pbr.ReadMsg(pmes); err != nil {
    72  			panic(err)
    73  		}
    74  
    75  		resp := &pb.Message{
    76  			Type: pmes.Type,
    77  		}
    78  		if err := pbw.WriteMsg(resp); err != nil {
    79  			panic(err)
    80  		}
    81  	})
    82  
    83  	// This one should fail with NotFound.
    84  	// long context timeout to ensure we dont end too early.
    85  	// the dht should be exhausting its query and returning not found.
    86  	// (was 3 seconds before which should be _plenty_ of time, but maybe
    87  	// travis machines really have a hard time...)
    88  	ctx2, _ := context.WithTimeout(context.Background(), 20*time.Second)
    89  	_, err = d.GetValue(ctx2, key.Key("test"))
    90  	if err != nil {
    91  		if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 {
    92  			err = merr[0]
    93  		}
    94  		if err != routing.ErrNotFound {
    95  			t.Fatalf("Expected ErrNotFound, got: %s", err)
    96  		}
    97  	} else {
    98  		t.Fatal("expected error, got none.")
    99  	}
   100  
   101  	t.Log("ErrNotFound check passed!")
   102  
   103  	// Now we test this DHT's handleGetValue failure
   104  	{
   105  		typ := pb.Message_GET_VALUE
   106  		str := "hello"
   107  
   108  		sk, err := d.getOwnPrivateKey()
   109  		if err != nil {
   110  			t.Fatal(err)
   111  		}
   112  
   113  		rec, err := record.MakePutRecord(sk, key.Key(str), []byte("blah"), true)
   114  		if err != nil {
   115  			t.Fatal(err)
   116  		}
   117  		req := pb.Message{
   118  			Type:   &typ,
   119  			Key:    &str,
   120  			Record: rec,
   121  		}
   122  
   123  		s, err := hosts[1].NewStream(ProtocolDHT, hosts[0].ID())
   124  		if err != nil {
   125  			t.Fatal(err)
   126  		}
   127  		defer s.Close()
   128  
   129  		pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax)
   130  		pbw := ggio.NewDelimitedWriter(s)
   131  
   132  		if err := pbw.WriteMsg(&req); err != nil {
   133  			t.Fatal(err)
   134  		}
   135  
   136  		pmes := new(pb.Message)
   137  		if err := pbr.ReadMsg(pmes); err != nil {
   138  			t.Fatal(err)
   139  		}
   140  		if pmes.GetRecord() != nil {
   141  			t.Fatal("shouldnt have value")
   142  		}
   143  		if pmes.GetProviderPeers() != nil {
   144  			t.Fatal("shouldnt have provider peers")
   145  		}
   146  	}
   147  }
   148  
   149  func TestNotFound(t *testing.T) {
   150  	// t.Skip("skipping test to debug another")
   151  	if testing.Short() {
   152  		t.SkipNow()
   153  	}
   154  
   155  	ctx := context.Background()
   156  	mn, err := mocknet.FullMeshConnected(ctx, 16)
   157  	if err != nil {
   158  		t.Fatal(err)
   159  	}
   160  	hosts := mn.Hosts()
   161  	tsds := dssync.MutexWrap(ds.NewMapDatastore())
   162  	d := NewDHT(ctx, hosts[0], tsds)
   163  
   164  	for _, p := range hosts {
   165  		d.Update(ctx, p.ID())
   166  	}
   167  
   168  	// Reply with random peers to every message
   169  	for _, host := range hosts {
   170  		host := host // shadow loop var
   171  		host.SetStreamHandler(ProtocolDHT, func(s inet.Stream) {
   172  			defer s.Close()
   173  
   174  			pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax)
   175  			pbw := ggio.NewDelimitedWriter(s)
   176  
   177  			pmes := new(pb.Message)
   178  			if err := pbr.ReadMsg(pmes); err != nil {
   179  				panic(err)
   180  			}
   181  
   182  			switch pmes.GetType() {
   183  			case pb.Message_GET_VALUE:
   184  				resp := &pb.Message{Type: pmes.Type}
   185  
   186  				ps := []peer.PeerInfo{}
   187  				for i := 0; i < 7; i++ {
   188  					p := hosts[rand.Intn(len(hosts))].ID()
   189  					pi := host.Peerstore().PeerInfo(p)
   190  					ps = append(ps, pi)
   191  				}
   192  
   193  				resp.CloserPeers = pb.PeerInfosToPBPeers(d.host.Network(), ps)
   194  				if err := pbw.WriteMsg(resp); err != nil {
   195  					panic(err)
   196  				}
   197  
   198  			default:
   199  				panic("Shouldnt recieve this.")
   200  			}
   201  		})
   202  	}
   203  
   204  	// long timeout to ensure timing is not at play.
   205  	ctx, cancel := context.WithTimeout(ctx, time.Second*20)
   206  	defer cancel()
   207  	v, err := d.GetValue(ctx, key.Key("hello"))
   208  	log.Debugf("get value got %v", v)
   209  	if err != nil {
   210  		if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 {
   211  			err = merr[0]
   212  		}
   213  		switch err {
   214  		case routing.ErrNotFound:
   215  			//Success!
   216  			return
   217  		case u.ErrTimeout:
   218  			t.Fatal("Should not have gotten timeout!")
   219  		default:
   220  			t.Fatalf("Got unexpected error: %s", err)
   221  		}
   222  	}
   223  	t.Fatal("Expected to recieve an error.")
   224  }
   225  
   226  // If less than K nodes are in the entire network, it should fail when we make
   227  // a GET rpc and nobody has the value
   228  func TestLessThanKResponses(t *testing.T) {
   229  	// t.Skip("skipping test to debug another")
   230  	// t.Skip("skipping test because it makes a lot of output")
   231  
   232  	ctx := context.Background()
   233  	mn, err := mocknet.FullMeshConnected(ctx, 6)
   234  	if err != nil {
   235  		t.Fatal(err)
   236  	}
   237  	hosts := mn.Hosts()
   238  
   239  	tsds := dssync.MutexWrap(ds.NewMapDatastore())
   240  	d := NewDHT(ctx, hosts[0], tsds)
   241  
   242  	for i := 1; i < 5; i++ {
   243  		d.Update(ctx, hosts[i].ID())
   244  	}
   245  
   246  	// Reply with random peers to every message
   247  	for _, host := range hosts {
   248  		host := host // shadow loop var
   249  		host.SetStreamHandler(ProtocolDHT, func(s inet.Stream) {
   250  			defer s.Close()
   251  
   252  			pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax)
   253  			pbw := ggio.NewDelimitedWriter(s)
   254  
   255  			pmes := new(pb.Message)
   256  			if err := pbr.ReadMsg(pmes); err != nil {
   257  				panic(err)
   258  			}
   259  
   260  			switch pmes.GetType() {
   261  			case pb.Message_GET_VALUE:
   262  				pi := host.Peerstore().PeerInfo(hosts[1].ID())
   263  				resp := &pb.Message{
   264  					Type:        pmes.Type,
   265  					CloserPeers: pb.PeerInfosToPBPeers(d.host.Network(), []peer.PeerInfo{pi}),
   266  				}
   267  
   268  				if err := pbw.WriteMsg(resp); err != nil {
   269  					panic(err)
   270  				}
   271  			default:
   272  				panic("Shouldnt recieve this.")
   273  			}
   274  
   275  		})
   276  	}
   277  
   278  	ctx, cancel := context.WithTimeout(ctx, time.Second*30)
   279  	defer cancel()
   280  	if _, err := d.GetValue(ctx, key.Key("hello")); err != nil {
   281  		switch err {
   282  		case routing.ErrNotFound:
   283  			//Success!
   284  			return
   285  		case u.ErrTimeout:
   286  			t.Fatal("Should not have gotten timeout!")
   287  		default:
   288  			t.Fatalf("Got unexpected error: %s", err)
   289  		}
   290  	}
   291  	t.Fatal("Expected to recieve an error.")
   292  }