github.com/keltia/go-ipfs@v0.3.8-0.20150909044612-210793031c63/p2p/test/backpressure/backpressure_test.go (about)

     1  package backpressure_tests
     2  
     3  import (
     4  	"io"
     5  	"math/rand"
     6  	"testing"
     7  	"time"
     8  
     9  	host "github.com/ipfs/go-ipfs/p2p/host"
    10  	inet "github.com/ipfs/go-ipfs/p2p/net"
    11  	peer "github.com/ipfs/go-ipfs/p2p/peer"
    12  	protocol "github.com/ipfs/go-ipfs/p2p/protocol"
    13  	testutil "github.com/ipfs/go-ipfs/p2p/test/util"
    14  	eventlog "github.com/ipfs/go-ipfs/thirdparty/eventlog"
    15  
    16  	context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
    17  	u "github.com/ipfs/go-ipfs/util"
    18  )
    19  
    20  var log = eventlog.Logger("backpressure")
    21  
    22  // TestBackpressureStreamHandler tests whether mux handler
    23  // ratelimiting works. Meaning, since the handler is sequential
    24  // it should block senders.
    25  //
    26  // Important note: spdystream (which peerstream uses) has a set
    27  // of n workers (n=spdsystream.FRAME_WORKERS) which handle new
    28  // frames, including those starting new streams. So all of them
    29  // can be in the handler at one time. Also, the sending side
    30  // does not rate limit unless we call stream.Wait()
    31  //
    32  //
    33  // Note: right now, this happens muxer-wide. the muxer should
    34  // learn to flow control, so handlers cant block each other.
    35  func TestBackpressureStreamHandler(t *testing.T) {
    36  	t.Skip(`Sadly, as cool as this test is, it doesn't work
    37  Because spdystream doesnt handle stream open backpressure
    38  well IMO. I'll see about rewriting that part when it becomes
    39  a problem.
    40  `)
    41  
    42  	// a number of concurrent request handlers
    43  	limit := 10
    44  
    45  	// our way to signal that we're done with 1 request
    46  	requestHandled := make(chan struct{})
    47  
    48  	// handler rate limiting
    49  	receiverRatelimit := make(chan struct{}, limit)
    50  	for i := 0; i < limit; i++ {
    51  		receiverRatelimit <- struct{}{}
    52  	}
    53  
    54  	// sender counter of successfully opened streams
    55  	senderOpened := make(chan struct{}, limit*100)
    56  
    57  	// sender signals it's done (errored out)
    58  	senderDone := make(chan struct{})
    59  
    60  	// the receiver handles requests with some rate limiting
    61  	receiver := func(s inet.Stream) {
    62  		log.Debug("receiver received a stream")
    63  
    64  		<-receiverRatelimit // acquire
    65  		go func() {
    66  			// our request handler. can do stuff here. we
    67  			// simulate something taking time by waiting
    68  			// on requestHandled
    69  			log.Debug("request worker handling...")
    70  			<-requestHandled
    71  			log.Debug("request worker done!")
    72  			receiverRatelimit <- struct{}{} // release
    73  		}()
    74  	}
    75  
    76  	// the sender opens streams as fast as possible
    77  	sender := func(host host.Host, remote peer.ID) {
    78  		var s inet.Stream
    79  		var err error
    80  		defer func() {
    81  			t.Error(err)
    82  			log.Debug("sender error. exiting.")
    83  			senderDone <- struct{}{}
    84  		}()
    85  
    86  		for {
    87  			s, err = host.NewStream(protocol.TestingID, remote)
    88  			if err != nil {
    89  				return
    90  			}
    91  
    92  			_ = s
    93  			// if err = s.SwarmStream().Stream().Wait(); err != nil {
    94  			// 	return
    95  			// }
    96  
    97  			// "count" another successfully opened stream
    98  			// (large buffer so shouldn't block in normal operation)
    99  			log.Debug("sender opened another stream!")
   100  			senderOpened <- struct{}{}
   101  		}
   102  	}
   103  
   104  	// count our senderOpened events
   105  	countStreamsOpenedBySender := func(min int) int {
   106  		opened := 0
   107  		for opened < min {
   108  			log.Debugf("countStreamsOpenedBySender got %d (min %d)", opened, min)
   109  			select {
   110  			case <-senderOpened:
   111  				opened++
   112  			case <-time.After(10 * time.Millisecond):
   113  			}
   114  		}
   115  		return opened
   116  	}
   117  
   118  	// count our received events
   119  	// waitForNReceivedStreams := func(n int) {
   120  	// 	for n > 0 {
   121  	// 		log.Debugf("waiting for %d received streams...", n)
   122  	// 		select {
   123  	// 		case <-receiverRatelimit:
   124  	// 			n--
   125  	// 		}
   126  	// 	}
   127  	// }
   128  
   129  	testStreamsOpened := func(expected int) {
   130  		log.Debugf("testing rate limited to %d streams", expected)
   131  		if n := countStreamsOpenedBySender(expected); n != expected {
   132  			t.Fatalf("rate limiting did not work :( -- %d != %d", expected, n)
   133  		}
   134  	}
   135  
   136  	// ok that's enough setup. let's do it!
   137  
   138  	ctx := context.Background()
   139  	h1 := testutil.GenHostSwarm(t, ctx)
   140  	h2 := testutil.GenHostSwarm(t, ctx)
   141  
   142  	// setup receiver handler
   143  	h1.SetStreamHandler(protocol.TestingID, receiver)
   144  
   145  	h2pi := h2.Peerstore().PeerInfo(h2.ID())
   146  	log.Debugf("dialing %s", h2pi.Addrs)
   147  	if err := h1.Connect(ctx, h2pi); err != nil {
   148  		t.Fatalf("Failed to connect:", err)
   149  	}
   150  
   151  	// launch sender!
   152  	go sender(h2, h1.ID())
   153  
   154  	// ok, what do we expect to happen? the receiver should
   155  	// receive 10 requests and stop receiving, blocking the sender.
   156  	// we can test this by counting 10x senderOpened requests
   157  
   158  	<-senderOpened // wait for the sender to successfully open some.
   159  	testStreamsOpened(limit - 1)
   160  
   161  	// let's "handle" 3 requests.
   162  	<-requestHandled
   163  	<-requestHandled
   164  	<-requestHandled
   165  	// the sender should've now been able to open exactly 3 more.
   166  
   167  	testStreamsOpened(3)
   168  
   169  	// shouldn't have opened anything more
   170  	testStreamsOpened(0)
   171  
   172  	// let's "handle" 100 requests in batches of 5
   173  	for i := 0; i < 20; i++ {
   174  		<-requestHandled
   175  		<-requestHandled
   176  		<-requestHandled
   177  		<-requestHandled
   178  		<-requestHandled
   179  		testStreamsOpened(5)
   180  	}
   181  
   182  	// success!
   183  
   184  	// now for the sugar on top: let's tear down the receiver. it should
   185  	// exit the sender.
   186  	h1.Close()
   187  
   188  	// shouldn't have opened anything more
   189  	testStreamsOpened(0)
   190  
   191  	select {
   192  	case <-time.After(100 * time.Millisecond):
   193  		t.Error("receiver shutdown failed to exit sender")
   194  	case <-senderDone:
   195  		log.Info("handler backpressure works!")
   196  	}
   197  }
   198  
   199  // TestStBackpressureStreamWrite tests whether streams see proper
   200  // backpressure when writing data over the network streams.
   201  func TestStBackpressureStreamWrite(t *testing.T) {
   202  
   203  	// senderWrote signals that the sender wrote bytes to remote.
   204  	// the value is the count of bytes written.
   205  	senderWrote := make(chan int, 10000)
   206  
   207  	// sender signals it's done (errored out)
   208  	senderDone := make(chan struct{})
   209  
   210  	// writeStats lets us listen to all the writes and return
   211  	// how many happened and how much was written
   212  	writeStats := func() (int, int) {
   213  		writes := 0
   214  		bytes := 0
   215  		for {
   216  			select {
   217  			case n := <-senderWrote:
   218  				writes++
   219  				bytes = bytes + n
   220  			default:
   221  				log.Debugf("stats: sender wrote %d bytes, %d writes", bytes, writes)
   222  				return bytes, writes
   223  			}
   224  		}
   225  	}
   226  
   227  	// sender attempts to write as fast as possible, signaling on the
   228  	// completion of every write. This makes it possible to see how
   229  	// fast it's actually writing. We pair this with a receiver
   230  	// that waits for a signal to read.
   231  	sender := func(s inet.Stream) {
   232  		defer func() {
   233  			s.Close()
   234  			senderDone <- struct{}{}
   235  		}()
   236  
   237  		// ready a buffer of random data
   238  		buf := make([]byte, 65536)
   239  		u.NewTimeSeededRand().Read(buf)
   240  
   241  		for {
   242  			// send a randomly sized subchunk
   243  			from := rand.Intn(len(buf) / 2)
   244  			to := rand.Intn(len(buf) / 2)
   245  			sendbuf := buf[from : from+to]
   246  
   247  			n, err := s.Write(sendbuf)
   248  			if err != nil {
   249  				log.Debug("sender error. exiting:", err)
   250  				return
   251  			}
   252  
   253  			log.Debugf("sender wrote %d bytes", n)
   254  			senderWrote <- n
   255  		}
   256  	}
   257  
   258  	// receive a number of bytes from a stream.
   259  	// returns the number of bytes written.
   260  	receive := func(s inet.Stream, expect int) {
   261  		log.Debugf("receiver to read %d bytes", expect)
   262  		rbuf := make([]byte, expect)
   263  		n, err := io.ReadFull(s, rbuf)
   264  		if err != nil {
   265  			t.Error("read failed:", err)
   266  		}
   267  		if expect != n {
   268  			t.Error("read len differs: %d != %d", expect, n)
   269  		}
   270  	}
   271  
   272  	// ok let's do it!
   273  
   274  	// setup the networks
   275  	ctx := context.Background()
   276  	h1 := testutil.GenHostSwarm(t, ctx)
   277  	h2 := testutil.GenHostSwarm(t, ctx)
   278  
   279  	// setup sender handler on 1
   280  	h1.SetStreamHandler(protocol.TestingID, sender)
   281  
   282  	h2pi := h2.Peerstore().PeerInfo(h2.ID())
   283  	log.Debugf("dialing %s", h2pi.Addrs)
   284  	if err := h1.Connect(ctx, h2pi); err != nil {
   285  		t.Fatalf("Failed to connect:", err)
   286  	}
   287  
   288  	// open a stream, from 2->1, this is our reader
   289  	s, err := h2.NewStream(protocol.TestingID, h1.ID())
   290  	if err != nil {
   291  		t.Fatal(err)
   292  	}
   293  
   294  	// let's make sure r/w works.
   295  	testSenderWrote := func(bytesE int) {
   296  		bytesA, writesA := writeStats()
   297  		if bytesA != bytesE {
   298  			t.Errorf("numbers failed: %d =?= %d bytes, via %d writes", bytesA, bytesE, writesA)
   299  		}
   300  	}
   301  
   302  	// 500ms rounds of lockstep write + drain
   303  	roundsStart := time.Now()
   304  	roundsTotal := 0
   305  	for roundsTotal < (2 << 20) {
   306  		// let the sender fill its buffers, it will stop sending.
   307  		<-time.After(300 * time.Millisecond)
   308  		b, _ := writeStats()
   309  		testSenderWrote(0)
   310  		testSenderWrote(0)
   311  
   312  		// drain it all, wait again
   313  		receive(s, b)
   314  		roundsTotal = roundsTotal + b
   315  	}
   316  	roundsTime := time.Now().Sub(roundsStart)
   317  
   318  	// now read continously, while we measure stats.
   319  	stop := make(chan struct{})
   320  	contStart := time.Now()
   321  
   322  	go func() {
   323  		for {
   324  			select {
   325  			case <-stop:
   326  				return
   327  			default:
   328  				receive(s, 2<<15)
   329  			}
   330  		}
   331  	}()
   332  
   333  	contTotal := 0
   334  	for contTotal < (2 << 20) {
   335  		n := <-senderWrote
   336  		contTotal += n
   337  	}
   338  	stop <- struct{}{}
   339  	contTime := time.Now().Sub(contStart)
   340  
   341  	// now compare! continuous should've been faster AND larger
   342  	if roundsTime < contTime {
   343  		t.Error("continuous should have been faster")
   344  	}
   345  
   346  	if roundsTotal < contTotal {
   347  		t.Error("continuous should have been larger, too!")
   348  	}
   349  
   350  	// and a couple rounds more for good measure ;)
   351  	for i := 0; i < 3; i++ {
   352  		// let the sender fill its buffers, it will stop sending.
   353  		<-time.After(300 * time.Millisecond)
   354  		b, _ := writeStats()
   355  		testSenderWrote(0)
   356  		testSenderWrote(0)
   357  
   358  		// drain it all, wait again
   359  		receive(s, b)
   360  	}
   361  
   362  	// this doesn't work :(:
   363  	// // now for the sugar on top: let's tear down the receiver. it should
   364  	// // exit the sender.
   365  	// n1.Close()
   366  	// testSenderWrote(0)
   367  	// testSenderWrote(0)
   368  	// select {
   369  	// case <-time.After(2 * time.Second):
   370  	// 	t.Error("receiver shutdown failed to exit sender")
   371  	// case <-senderDone:
   372  	// 	log.Info("handler backpressure works!")
   373  	// }
   374  }