github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/pkg/tcpip/transport/tcp/tcp_noracedetector_test.go (about)

     1  // Copyright 2018 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  //
    15  // These tests are flaky when run under the go race detector due to some
    16  // iterations taking long enough that the retransmit timer can kick in causing
    17  // the congestion window measurements to fail due to extra packets etc.
    18  //
    19  // +build !race
    20  
    21  package tcp_test
    22  
    23  import (
    24  	"bytes"
    25  	"fmt"
    26  	"math"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/SagerNet/gvisor/pkg/tcpip"
    31  	"github.com/SagerNet/gvisor/pkg/tcpip/header"
    32  	"github.com/SagerNet/gvisor/pkg/tcpip/transport/tcp"
    33  	"github.com/SagerNet/gvisor/pkg/tcpip/transport/tcp/testing/context"
    34  	"github.com/SagerNet/gvisor/pkg/test/testutil"
    35  )
    36  
    37  func TestFastRecovery(t *testing.T) {
    38  	maxPayload := 32
    39  	c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))
    40  	defer c.Cleanup()
    41  
    42  	c.CreateConnected(789, 30000, -1 /* epRcvBuf */)
    43  
    44  	const iterations = 3
    45  	data := make([]byte, 2*maxPayload*(tcp.InitialCwnd<<(iterations+1)))
    46  	for i := range data {
    47  		data[i] = byte(i)
    48  	}
    49  
    50  	// Write all the data in one shot. Packets will only be written at the
    51  	// MTU size though.
    52  	var r bytes.Reader
    53  	r.Reset(data)
    54  	if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
    55  		t.Fatalf("Write failed: %s", err)
    56  	}
    57  
    58  	// Do slow start for a few iterations.
    59  	expected := tcp.InitialCwnd
    60  	bytesRead := 0
    61  	for i := 0; i < iterations; i++ {
    62  		expected = tcp.InitialCwnd << uint(i)
    63  		if i > 0 {
    64  			// Acknowledge all the data received so far if not on
    65  			// first iteration.
    66  			c.SendAck(790, bytesRead)
    67  		}
    68  
    69  		// Read all packets expected on this iteration. Don't
    70  		// acknowledge any of them just yet, so that we can measure the
    71  		// congestion window.
    72  		for j := 0; j < expected; j++ {
    73  			c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
    74  			bytesRead += maxPayload
    75  		}
    76  
    77  		// Check we don't receive any more packets on this iteration.
    78  		// The timeout can't be too high or we'll trigger a timeout.
    79  		c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond)
    80  	}
    81  
    82  	// Send 3 duplicate acks. This should force an immediate retransmit of
    83  	// the pending packet and put the sender into fast recovery.
    84  	rtxOffset := bytesRead - maxPayload*expected
    85  	for i := 0; i < 3; i++ {
    86  		c.SendAck(790, rtxOffset)
    87  	}
    88  
    89  	// Receive the retransmitted packet.
    90  	c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload)
    91  
    92  	// Wait before checking metrics.
    93  	metricPollFn := func() error {
    94  		if got, want := c.Stack().Stats().TCP.FastRetransmit.Value(), uint64(1); got != want {
    95  			return fmt.Errorf("got stats.TCP.FastRetransmit.Value = %d, want = %d", got, want)
    96  		}
    97  		if got, want := c.Stack().Stats().TCP.Retransmits.Value(), uint64(1); got != want {
    98  			return fmt.Errorf("got stats.TCP.Retransmit.Value = %d, want = %d", got, want)
    99  		}
   100  
   101  		if got, want := c.Stack().Stats().TCP.FastRecovery.Value(), uint64(1); got != want {
   102  			return fmt.Errorf("got stats.TCP.FastRecovery.Value = %d, want = %d", got, want)
   103  		}
   104  		return nil
   105  	}
   106  
   107  	if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
   108  		t.Error(err)
   109  	}
   110  
   111  	// Now send 7 mode duplicate acks. Each of these should cause a window
   112  	// inflation by 1 and cause the sender to send an extra packet.
   113  	for i := 0; i < 7; i++ {
   114  		c.SendAck(790, rtxOffset)
   115  	}
   116  
   117  	recover := bytesRead
   118  
   119  	// Ensure no new packets arrive.
   120  	c.CheckNoPacketTimeout("More packets received than expected during recovery after dupacks for this cwnd.",
   121  		50*time.Millisecond)
   122  
   123  	// Acknowledge half of the pending data.
   124  	rtxOffset = bytesRead - expected*maxPayload/2
   125  	c.SendAck(790, rtxOffset)
   126  
   127  	// Receive the retransmit due to partial ack.
   128  	c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload)
   129  
   130  	// Wait before checking metrics.
   131  	metricPollFn = func() error {
   132  		if got, want := c.Stack().Stats().TCP.FastRetransmit.Value(), uint64(2); got != want {
   133  			return fmt.Errorf("got stats.TCP.FastRetransmit.Value = %d, want = %d", got, want)
   134  		}
   135  		if got, want := c.Stack().Stats().TCP.Retransmits.Value(), uint64(2); got != want {
   136  			return fmt.Errorf("got stats.TCP.Retransmit.Value = %d, want = %d", got, want)
   137  		}
   138  		return nil
   139  	}
   140  	if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
   141  		t.Error(err)
   142  	}
   143  
   144  	// Receive the 10 extra packets that should have been released due to
   145  	// the congestion window inflation in recovery.
   146  	for i := 0; i < 10; i++ {
   147  		c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
   148  		bytesRead += maxPayload
   149  	}
   150  
   151  	// A partial ACK during recovery should reduce congestion window by the
   152  	// number acked. Since we had "expected" packets outstanding before sending
   153  	// partial ack and we acked expected/2 , the cwnd and outstanding should
   154  	// be expected/2 + 10 (7 dupAcks + 3 for the original 3 dupacks that triggered
   155  	// fast recovery). Which means the sender should not send any more packets
   156  	// till we ack this one.
   157  	c.CheckNoPacketTimeout("More packets received than expected during recovery after partial ack for this cwnd.",
   158  		50*time.Millisecond)
   159  
   160  	// Acknowledge all pending data to recover point.
   161  	c.SendAck(790, recover)
   162  
   163  	// At this point, the cwnd should reset to expected/2 and there are 10
   164  	// packets outstanding.
   165  	//
   166  	// NOTE: Technically netstack is incorrect in that we adjust the cwnd on
   167  	// the same segment that takes us out of recovery. But because of that
   168  	// the actual cwnd at exit of recovery will be expected/2 + 1 as we
   169  	// acked a cwnd worth of packets which will increase the cwnd further by
   170  	// 1 in congestion avoidance.
   171  	//
   172  	// Now in the first iteration since there are 10 packets outstanding.
   173  	// We would expect to get expected/2 +1 - 10 packets. But subsequent
   174  	// iterations will send us expected/2 + 1 + 1 (per iteration).
   175  	expected = expected/2 + 1 - 10
   176  	for i := 0; i < iterations; i++ {
   177  		// Read all packets expected on this iteration. Don't
   178  		// acknowledge any of them just yet, so that we can measure the
   179  		// congestion window.
   180  		for j := 0; j < expected; j++ {
   181  			c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
   182  			bytesRead += maxPayload
   183  		}
   184  
   185  		// Check we don't receive any more packets on this iteration.
   186  		// The timeout can't be too high or we'll trigger a timeout.
   187  		c.CheckNoPacketTimeout(fmt.Sprintf("More packets received(after deflation) than expected %d for this cwnd.", expected), 50*time.Millisecond)
   188  
   189  		// Acknowledge all the data received so far.
   190  		c.SendAck(790, bytesRead)
   191  
   192  		// In cogestion avoidance, the packets trains increase by 1 in
   193  		// each iteration.
   194  		if i == 0 {
   195  			// After the first iteration we expect to get the full
   196  			// congestion window worth of packets in every
   197  			// iteration.
   198  			expected += 10
   199  		}
   200  		expected++
   201  	}
   202  }
   203  
   204  func TestExponentialIncreaseDuringSlowStart(t *testing.T) {
   205  	maxPayload := 32
   206  	c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))
   207  	defer c.Cleanup()
   208  
   209  	c.CreateConnected(789, 30000, -1 /* epRcvBuf */)
   210  
   211  	const iterations = 3
   212  	data := make([]byte, maxPayload*(tcp.InitialCwnd<<(iterations+1)))
   213  	for i := range data {
   214  		data[i] = byte(i)
   215  	}
   216  
   217  	// Write all the data in one shot. Packets will only be written at the
   218  	// MTU size though.
   219  	var r bytes.Reader
   220  	r.Reset(data)
   221  	if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
   222  		t.Fatalf("Write failed: %s", err)
   223  	}
   224  
   225  	expected := tcp.InitialCwnd
   226  	bytesRead := 0
   227  	for i := 0; i < iterations; i++ {
   228  		// Read all packets expected on this iteration. Don't
   229  		// acknowledge any of them just yet, so that we can measure the
   230  		// congestion window.
   231  		for j := 0; j < expected; j++ {
   232  			c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
   233  			bytesRead += maxPayload
   234  		}
   235  
   236  		// Check we don't receive any more packets on this iteration.
   237  		// The timeout can't be too high or we'll trigger a timeout.
   238  		c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond)
   239  
   240  		// Acknowledge all the data received so far.
   241  		c.SendAck(790, bytesRead)
   242  
   243  		// Double the number of expected packets for the next iteration.
   244  		expected *= 2
   245  	}
   246  }
   247  
   248  func TestCongestionAvoidance(t *testing.T) {
   249  	maxPayload := 32
   250  	c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))
   251  	defer c.Cleanup()
   252  
   253  	c.CreateConnected(789, 30000, -1 /* epRcvBuf */)
   254  
   255  	const iterations = 3
   256  	data := make([]byte, 2*maxPayload*(tcp.InitialCwnd<<(iterations+1)))
   257  	for i := range data {
   258  		data[i] = byte(i)
   259  	}
   260  
   261  	// Write all the data in one shot. Packets will only be written at the
   262  	// MTU size though.
   263  	var r bytes.Reader
   264  	r.Reset(data)
   265  	if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
   266  		t.Fatalf("Write failed: %s", err)
   267  	}
   268  
   269  	// Do slow start for a few iterations.
   270  	expected := tcp.InitialCwnd
   271  	bytesRead := 0
   272  	for i := 0; i < iterations; i++ {
   273  		expected = tcp.InitialCwnd << uint(i)
   274  		if i > 0 {
   275  			// Acknowledge all the data received so far if not on
   276  			// first iteration.
   277  			c.SendAck(790, bytesRead)
   278  		}
   279  
   280  		// Read all packets expected on this iteration. Don't
   281  		// acknowledge any of them just yet, so that we can measure the
   282  		// congestion window.
   283  		for j := 0; j < expected; j++ {
   284  			c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
   285  			bytesRead += maxPayload
   286  		}
   287  
   288  		// Check we don't receive any more packets on this iteration.
   289  		// The timeout can't be too high or we'll trigger a timeout.
   290  		c.CheckNoPacketTimeout("More packets received than expected for this cwnd (slow start phase).", 50*time.Millisecond)
   291  	}
   292  
   293  	// Don't acknowledge the first packet of the last packet train. Let's
   294  	// wait for them to time out, which will trigger a restart of slow
   295  	// start, and initialization of ssthresh to cwnd/2.
   296  	rtxOffset := bytesRead - maxPayload*expected
   297  	c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload)
   298  
   299  	// Acknowledge all the data received so far.
   300  	c.SendAck(790, bytesRead)
   301  
   302  	// This part is tricky: when the timeout happened, we had "expected"
   303  	// packets pending, cwnd reset to 1, and ssthresh set to expected/2.
   304  	// By acknowledging "expected" packets, the slow-start part will
   305  	// increase cwnd to expected/2 (which "consumes" expected/2-1 of the
   306  	// acknowledgements), then the congestion avoidance part will consume
   307  	// an extra expected/2 acks to take cwnd to expected/2 + 1. One ack
   308  	// remains in the "ack count" (which will cause cwnd to be incremented
   309  	// once it reaches cwnd acks).
   310  	//
   311  	// So we're straight into congestion avoidance with cwnd set to
   312  	// expected/2 + 1.
   313  	//
   314  	// Check that packets trains of cwnd packets are sent, and that cwnd is
   315  	// incremented by 1 after we acknowledge each packet.
   316  	expected = expected/2 + 1
   317  	for i := 0; i < iterations; i++ {
   318  		// Read all packets expected on this iteration. Don't
   319  		// acknowledge any of them just yet, so that we can measure the
   320  		// congestion window.
   321  		for j := 0; j < expected; j++ {
   322  			c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
   323  			bytesRead += maxPayload
   324  		}
   325  
   326  		// Check we don't receive any more packets on this iteration.
   327  		// The timeout can't be too high or we'll trigger a timeout.
   328  		c.CheckNoPacketTimeout("More packets received than expected for this cwnd (congestion avoidance phase).", 50*time.Millisecond)
   329  
   330  		// Acknowledge all the data received so far.
   331  		c.SendAck(790, bytesRead)
   332  
   333  		// In cogestion avoidance, the packets trains increase by 1 in
   334  		// each iteration.
   335  		expected++
   336  	}
   337  }
   338  
   339  // cubicCwnd returns an estimate of a cubic window given the
   340  // originalCwnd, wMax, last congestion event time and sRTT.
   341  func cubicCwnd(origCwnd int, wMax int, congEventTime time.Time, sRTT time.Duration) int {
   342  	cwnd := float64(origCwnd)
   343  	// We wait 50ms between each iteration so sRTT as computed by cubic
   344  	// should be close to 50ms.
   345  	elapsed := (time.Since(congEventTime) + sRTT).Seconds()
   346  	k := math.Cbrt(float64(wMax) * 0.3 / 0.7)
   347  	wtRTT := 0.4*math.Pow(elapsed-k, 3) + float64(wMax)
   348  	cwnd += (wtRTT - cwnd) / cwnd
   349  	return int(cwnd)
   350  }
   351  
   352  func TestCubicCongestionAvoidance(t *testing.T) {
   353  	maxPayload := 32
   354  	c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))
   355  	defer c.Cleanup()
   356  
   357  	enableCUBIC(t, c)
   358  
   359  	c.CreateConnected(789, 30000, -1 /* epRcvBuf */)
   360  
   361  	const iterations = 3
   362  	data := make([]byte, 2*maxPayload*(tcp.InitialCwnd<<(iterations+1)))
   363  	for i := range data {
   364  		data[i] = byte(i)
   365  	}
   366  
   367  	// Write all the data in one shot. Packets will only be written at the
   368  	// MTU size though.
   369  	var r bytes.Reader
   370  	r.Reset(data)
   371  	if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
   372  		t.Fatalf("Write failed: %s", err)
   373  	}
   374  
   375  	// Do slow start for a few iterations.
   376  	expected := tcp.InitialCwnd
   377  	bytesRead := 0
   378  	for i := 0; i < iterations; i++ {
   379  		expected = tcp.InitialCwnd << uint(i)
   380  		if i > 0 {
   381  			// Acknowledge all the data received so far if not on
   382  			// first iteration.
   383  			c.SendAck(790, bytesRead)
   384  		}
   385  
   386  		// Read all packets expected on this iteration. Don't
   387  		// acknowledge any of them just yet, so that we can measure the
   388  		// congestion window.
   389  		for j := 0; j < expected; j++ {
   390  			c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
   391  			bytesRead += maxPayload
   392  		}
   393  
   394  		// Check we don't receive any more packets on this iteration.
   395  		// The timeout can't be too high or we'll trigger a timeout.
   396  		c.CheckNoPacketTimeout("More packets received than expected for this cwnd (during slow-start phase).", 50*time.Millisecond)
   397  	}
   398  
   399  	// Don't acknowledge the first packet of the last packet train. Let's
   400  	// wait for them to time out, which will trigger a restart of slow
   401  	// start, and initialization of ssthresh to cwnd * 0.7.
   402  	rtxOffset := bytesRead - maxPayload*expected
   403  	c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload)
   404  
   405  	// Acknowledge all pending data.
   406  	c.SendAck(790, bytesRead)
   407  
   408  	// Store away the time we sent the ACK and assuming a 200ms RTO
   409  	// we estimate that the sender will have an RTO 200ms from now
   410  	// and go back into slow start.
   411  	packetDropTime := time.Now().Add(200 * time.Millisecond)
   412  
   413  	// This part is tricky: when the timeout happened, we had "expected"
   414  	// packets pending, cwnd reset to 1, and ssthresh set to expected * 0.7.
   415  	// By acknowledging "expected" packets, the slow-start part will
   416  	// increase cwnd to expected/2 essentially putting the connection
   417  	// straight into congestion avoidance.
   418  	wMax := expected
   419  	// Lower expected as per cubic spec after a congestion event.
   420  	expected = int(float64(expected) * 0.7)
   421  	cwnd := expected
   422  	for i := 0; i < iterations; i++ {
   423  		// Cubic grows window independent of ACKs. Cubic Window growth
   424  		// is a function of time elapsed since last congestion event.
   425  		// As a result the congestion window does not grow
   426  		// deterministically in response to ACKs.
   427  		//
   428  		// We need to roughly estimate what the cwnd of the sender is
   429  		// based on when we sent the dupacks.
   430  		cwnd := cubicCwnd(cwnd, wMax, packetDropTime, 50*time.Millisecond)
   431  
   432  		packetsExpected := cwnd
   433  		for j := 0; j < packetsExpected; j++ {
   434  			c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
   435  			bytesRead += maxPayload
   436  		}
   437  		t.Logf("expected packets received, next trying to receive any extra packets that may come")
   438  
   439  		// If our estimate was correct there should be no more pending packets.
   440  		// We attempt to read a packet a few times with a short sleep in between
   441  		// to ensure that we don't see the sender send any unexpected packets.
   442  		unexpectedPackets := 0
   443  		for {
   444  			gotPacket := c.ReceiveNonBlockingAndCheckPacket(data, bytesRead, maxPayload)
   445  			if !gotPacket {
   446  				break
   447  			}
   448  			bytesRead += maxPayload
   449  			unexpectedPackets++
   450  			time.Sleep(1 * time.Millisecond)
   451  		}
   452  		if unexpectedPackets != 0 {
   453  			t.Fatalf("received %d unexpected packets for iteration %d", unexpectedPackets, i)
   454  		}
   455  		// Check we don't receive any more packets on this iteration.
   456  		// The timeout can't be too high or we'll trigger a timeout.
   457  		c.CheckNoPacketTimeout("More packets received than expected for this cwnd(congestion avoidance)", 5*time.Millisecond)
   458  
   459  		// Acknowledge all the data received so far.
   460  		c.SendAck(790, bytesRead)
   461  	}
   462  }
   463  
   464  func TestRetransmit(t *testing.T) {
   465  	maxPayload := 32
   466  	c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))
   467  	defer c.Cleanup()
   468  
   469  	c.CreateConnected(789, 30000, -1 /* epRcvBuf */)
   470  
   471  	const iterations = 3
   472  	data := make([]byte, maxPayload*(tcp.InitialCwnd<<(iterations+1)))
   473  	for i := range data {
   474  		data[i] = byte(i)
   475  	}
   476  
   477  	// Write all the data in two shots. Packets will only be written at the
   478  	// MTU size though.
   479  	var r bytes.Reader
   480  	r.Reset(data[:len(data)/2])
   481  	if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
   482  		t.Fatalf("Write failed: %s", err)
   483  	}
   484  	r.Reset(data[len(data)/2:])
   485  	if _, err := c.EP.Write(&r, tcpip.WriteOptions{}); err != nil {
   486  		t.Fatalf("Write failed: %s", err)
   487  	}
   488  
   489  	// Do slow start for a few iterations.
   490  	expected := tcp.InitialCwnd
   491  	bytesRead := 0
   492  	for i := 0; i < iterations; i++ {
   493  		expected = tcp.InitialCwnd << uint(i)
   494  		if i > 0 {
   495  			// Acknowledge all the data received so far if not on
   496  			// first iteration.
   497  			c.SendAck(790, bytesRead)
   498  		}
   499  
   500  		// Read all packets expected on this iteration. Don't
   501  		// acknowledge any of them just yet, so that we can measure the
   502  		// congestion window.
   503  		for j := 0; j < expected; j++ {
   504  			c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
   505  			bytesRead += maxPayload
   506  		}
   507  
   508  		// Check we don't receive any more packets on this iteration.
   509  		// The timeout can't be too high or we'll trigger a timeout.
   510  		c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond)
   511  	}
   512  
   513  	// Wait for a timeout and retransmit.
   514  	rtxOffset := bytesRead - maxPayload*expected
   515  	c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload)
   516  
   517  	metricPollFn := func() error {
   518  		if got, want := c.Stack().Stats().TCP.Timeouts.Value(), uint64(1); got != want {
   519  			return fmt.Errorf("got stats.TCP.Timeouts.Value = %d, want = %d", got, want)
   520  		}
   521  
   522  		if got, want := c.Stack().Stats().TCP.Retransmits.Value(), uint64(1); got != want {
   523  			return fmt.Errorf("got stats.TCP.Retransmits.Value = %d, want = %d", got, want)
   524  		}
   525  
   526  		if got, want := c.EP.Stats().(*tcp.Stats).SendErrors.Timeouts.Value(), uint64(1); got != want {
   527  			return fmt.Errorf("got EP SendErrors.Timeouts.Value = %d, want = %d", got, want)
   528  		}
   529  
   530  		if got, want := c.EP.Stats().(*tcp.Stats).SendErrors.Retransmits.Value(), uint64(1); got != want {
   531  			return fmt.Errorf("got EP stats SendErrors.Retransmits.Value = %d, want = %d", got, want)
   532  		}
   533  
   534  		if got, want := c.Stack().Stats().TCP.SlowStartRetransmits.Value(), uint64(1); got != want {
   535  			return fmt.Errorf("got stats.TCP.SlowStartRetransmits.Value = %d, want = %d", got, want)
   536  		}
   537  
   538  		return nil
   539  	}
   540  
   541  	// Poll when checking metrics.
   542  	if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
   543  		t.Error(err)
   544  	}
   545  
   546  	// Acknowledge half of the pending data.
   547  	rtxOffset = bytesRead - expected*maxPayload/2
   548  	c.SendAck(790, rtxOffset)
   549  
   550  	// Receive the remaining data, making sure that acknowledged data is not
   551  	// retransmitted.
   552  	for offset := rtxOffset; offset < len(data); offset += maxPayload {
   553  		c.ReceiveAndCheckPacket(data, offset, maxPayload)
   554  		c.SendAck(790, offset+maxPayload)
   555  	}
   556  
   557  	c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond)
   558  }