github.com/lightlus/netstack@v1.2.0/tcpip/transport/tcp/tcp_noracedetector_test.go (about)

     1  // Copyright 2018 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  //
    15  // These tests are flaky when run under the go race detector due to some
    16  // iterations taking long enough that the retransmit timer can kick in causing
    17  // the congestion window measurements to fail due to extra packets etc.
    18  //
    19  // +build !race
    20  
    21  package tcp_test
    22  
    23  import (
    24  	"fmt"
    25  	"math"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/lightlus/netstack/tcpip"
    30  	"github.com/lightlus/netstack/tcpip/buffer"
    31  	"github.com/lightlus/netstack/tcpip/header"
    32  	"github.com/lightlus/netstack/tcpip/transport/tcp"
    33  	"github.com/lightlus/netstack/tcpip/transport/tcp/testing/context"
    34  )
    35  
    36  func DisabledTestFastRecovery(t *testing.T) {
    37  	maxPayload := 32
    38  	c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))
    39  	defer c.Cleanup()
    40  
    41  	c.CreateConnected(789, 30000, -1 /* epRcvBuf */)
    42  
    43  	const iterations = 7
    44  	data := buffer.NewView(2 * maxPayload * (tcp.InitialCwnd << (iterations + 1)))
    45  	for i := range data {
    46  		data[i] = byte(i)
    47  	}
    48  
    49  	// Write all the data in one shot. Packets will only be written at the
    50  	// MTU size though.
    51  	if _, _, err := c.EP.Write(tcpip.SlicePayload(data), tcpip.WriteOptions{}); err != nil {
    52  		t.Fatalf("Write failed: %v", err)
    53  	}
    54  
    55  	// Do slow start for a few iterations.
    56  	expected := tcp.InitialCwnd
    57  	bytesRead := 0
    58  	for i := 0; i < iterations; i++ {
    59  		expected = tcp.InitialCwnd << uint(i)
    60  		if i > 0 {
    61  			// Acknowledge all the data received so far if not on
    62  			// first iteration.
    63  			c.SendAck(790, bytesRead)
    64  		}
    65  
    66  		// Read all packets expected on this iteration. Don't
    67  		// acknowledge any of them just yet, so that we can measure the
    68  		// congestion window.
    69  		for j := 0; j < expected; j++ {
    70  			c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
    71  			bytesRead += maxPayload
    72  		}
    73  
    74  		// Check we don't receive any more packets on this iteration.
    75  		// The timeout can't be too high or we'll trigger a timeout.
    76  		c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond)
    77  	}
    78  
    79  	// Send 3 duplicate acks. This should force an immediate retransmit of
    80  	// the pending packet and put the sender into fast recovery.
    81  	rtxOffset := bytesRead - maxPayload*expected
    82  	for i := 0; i < 3; i++ {
    83  		c.SendAck(790, rtxOffset)
    84  	}
    85  
    86  	// Receive the retransmitted packet.
    87  	c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload)
    88  
    89  	if got, want := c.Stack().Stats().TCP.FastRetransmit.Value(), uint64(1); got != want {
    90  		t.Errorf("got stats.TCP.FastRetransmit.Value = %v, want = %v", got, want)
    91  	}
    92  
    93  	if got, want := c.Stack().Stats().TCP.Retransmits.Value(), uint64(1); got != want {
    94  		t.Errorf("got stats.TCP.Retransmit.Value = %v, want = %v", got, want)
    95  	}
    96  
    97  	if got, want := c.Stack().Stats().TCP.FastRecovery.Value(), uint64(1); got != want {
    98  		t.Errorf("got stats.TCP.FastRecovery.Value = %v, want = %v", got, want)
    99  	}
   100  
   101  	// Now send 7 mode duplicate acks. Each of these should cause a window
   102  	// inflation by 1 and cause the sender to send an extra packet.
   103  	for i := 0; i < 7; i++ {
   104  		c.SendAck(790, rtxOffset)
   105  	}
   106  
   107  	recover := bytesRead
   108  
   109  	// Ensure no new packets arrive.
   110  	c.CheckNoPacketTimeout("More packets received than expected during recovery after dupacks for this cwnd.",
   111  		50*time.Millisecond)
   112  
   113  	// Acknowledge half of the pending data.
   114  	rtxOffset = bytesRead - expected*maxPayload/2
   115  	c.SendAck(790, rtxOffset)
   116  
   117  	// Receive the retransmit due to partial ack.
   118  	c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload)
   119  
   120  	if got, want := c.Stack().Stats().TCP.FastRetransmit.Value(), uint64(2); got != want {
   121  		t.Errorf("got stats.TCP.FastRetransmit.Value = %v, want = %v", got, want)
   122  	}
   123  
   124  	if got, want := c.Stack().Stats().TCP.Retransmits.Value(), uint64(2); got != want {
   125  		t.Errorf("got stats.TCP.Retransmit.Value = %v, want = %v", got, want)
   126  	}
   127  
   128  	// Receive the 10 extra packets that should have been released due to
   129  	// the congestion window inflation in recovery.
   130  	for i := 0; i < 10; i++ {
   131  		c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
   132  		bytesRead += maxPayload
   133  	}
   134  
   135  	// A partial ACK during recovery should reduce congestion window by the
   136  	// number acked. Since we had "expected" packets outstanding before sending
   137  	// partial ack and we acked expected/2 , the cwnd and outstanding should
   138  	// be expected/2 + 10 (7 dupAcks + 3 for the original 3 dupacks that triggered
   139  	// fast recovery). Which means the sender should not send any more packets
   140  	// till we ack this one.
   141  	c.CheckNoPacketTimeout("More packets received than expected during recovery after partial ack for this cwnd.",
   142  		50*time.Millisecond)
   143  
   144  	// Acknowledge all pending data to recover point.
   145  	c.SendAck(790, recover)
   146  
   147  	// At this point, the cwnd should reset to expected/2 and there are 10
   148  	// packets outstanding.
   149  	//
   150  	// NOTE: Technically netstack is incorrect in that we adjust the cwnd on
   151  	// the same segment that takes us out of recovery. But because of that
   152  	// the actual cwnd at exit of recovery will be expected/2 + 1 as we
   153  	// acked a cwnd worth of packets which will increase the cwnd further by
   154  	// 1 in congestion avoidance.
   155  	//
   156  	// Now in the first iteration since there are 10 packets outstanding.
   157  	// We would expect to get expected/2 +1 - 10 packets. But subsequent
   158  	// iterations will send us expected/2 + 1 + 1 (per iteration).
   159  	expected = expected/2 + 1 - 10
   160  	for i := 0; i < iterations; i++ {
   161  		// Read all packets expected on this iteration. Don't
   162  		// acknowledge any of them just yet, so that we can measure the
   163  		// congestion window.
   164  		for j := 0; j < expected; j++ {
   165  			c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
   166  			bytesRead += maxPayload
   167  		}
   168  
   169  		// Check we don't receive any more packets on this iteration.
   170  		// The timeout can't be too high or we'll trigger a timeout.
   171  		c.CheckNoPacketTimeout(fmt.Sprintf("More packets received(after deflation) than expected %d for this cwnd.", expected), 50*time.Millisecond)
   172  
   173  		// Acknowledge all the data received so far.
   174  		c.SendAck(790, bytesRead)
   175  
   176  		// In cogestion avoidance, the packets trains increase by 1 in
   177  		// each iteration.
   178  		if i == 0 {
   179  			// After the first iteration we expect to get the full
   180  			// congestion window worth of packets in every
   181  			// iteration.
   182  			expected += 10
   183  		}
   184  		expected++
   185  	}
   186  }
   187  
   188  func DisabledTestExponentialIncreaseDuringSlowStart(t *testing.T) {
   189  	maxPayload := 32
   190  	c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))
   191  	defer c.Cleanup()
   192  
   193  	c.CreateConnected(789, 30000, -1 /* epRcvBuf */)
   194  
   195  	const iterations = 7
   196  	data := buffer.NewView(maxPayload * (tcp.InitialCwnd << (iterations + 1)))
   197  	for i := range data {
   198  		data[i] = byte(i)
   199  	}
   200  
   201  	// Write all the data in one shot. Packets will only be written at the
   202  	// MTU size though.
   203  	if _, _, err := c.EP.Write(tcpip.SlicePayload(data), tcpip.WriteOptions{}); err != nil {
   204  		t.Fatalf("Write failed: %v", err)
   205  	}
   206  
   207  	expected := tcp.InitialCwnd
   208  	bytesRead := 0
   209  	for i := 0; i < iterations; i++ {
   210  		// Read all packets expected on this iteration. Don't
   211  		// acknowledge any of them just yet, so that we can measure the
   212  		// congestion window.
   213  		for j := 0; j < expected; j++ {
   214  			c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
   215  			bytesRead += maxPayload
   216  		}
   217  
   218  		// Check we don't receive any more packets on this iteration.
   219  		// The timeout can't be too high or we'll trigger a timeout.
   220  		c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond)
   221  
   222  		// Acknowledge all the data received so far.
   223  		c.SendAck(790, bytesRead)
   224  
   225  		// Double the number of expected packets for the next iteration.
   226  		expected *= 2
   227  	}
   228  }
   229  
   230  func DisabledTestCongestionAvoidance(t *testing.T) {
   231  	maxPayload := 32
   232  	c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))
   233  	defer c.Cleanup()
   234  
   235  	c.CreateConnected(789, 30000, -1 /* epRcvBuf */)
   236  
   237  	const iterations = 7
   238  	data := buffer.NewView(2 * maxPayload * (tcp.InitialCwnd << (iterations + 1)))
   239  	for i := range data {
   240  		data[i] = byte(i)
   241  	}
   242  
   243  	// Write all the data in one shot. Packets will only be written at the
   244  	// MTU size though.
   245  	if _, _, err := c.EP.Write(tcpip.SlicePayload(data), tcpip.WriteOptions{}); err != nil {
   246  		t.Fatalf("Write failed: %v", err)
   247  	}
   248  
   249  	// Do slow start for a few iterations.
   250  	expected := tcp.InitialCwnd
   251  	bytesRead := 0
   252  	for i := 0; i < iterations; i++ {
   253  		expected = tcp.InitialCwnd << uint(i)
   254  		if i > 0 {
   255  			// Acknowledge all the data received so far if not on
   256  			// first iteration.
   257  			c.SendAck(790, bytesRead)
   258  		}
   259  
   260  		// Read all packets expected on this iteration. Don't
   261  		// acknowledge any of them just yet, so that we can measure the
   262  		// congestion window.
   263  		for j := 0; j < expected; j++ {
   264  			c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
   265  			bytesRead += maxPayload
   266  		}
   267  
   268  		// Check we don't receive any more packets on this iteration.
   269  		// The timeout can't be too high or we'll trigger a timeout.
   270  		c.CheckNoPacketTimeout("More packets received than expected for this cwnd (slow start phase).", 50*time.Millisecond)
   271  	}
   272  
   273  	// Don't acknowledge the first packet of the last packet train. Let's
   274  	// wait for them to time out, which will trigger a restart of slow
   275  	// start, and initialization of ssthresh to cwnd/2.
   276  	rtxOffset := bytesRead - maxPayload*expected
   277  	c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload)
   278  
   279  	// Acknowledge all the data received so far.
   280  	c.SendAck(790, bytesRead)
   281  
   282  	// This part is tricky: when the timeout happened, we had "expected"
   283  	// packets pending, cwnd reset to 1, and ssthresh set to expected/2.
   284  	// By acknowledging "expected" packets, the slow-start part will
   285  	// increase cwnd to expected/2 (which "consumes" expected/2-1 of the
   286  	// acknowledgements), then the congestion avoidance part will consume
   287  	// an extra expected/2 acks to take cwnd to expected/2 + 1. One ack
   288  	// remains in the "ack count" (which will cause cwnd to be incremented
   289  	// once it reaches cwnd acks).
   290  	//
   291  	// So we're straight into congestion avoidance with cwnd set to
   292  	// expected/2 + 1.
   293  	//
   294  	// Check that packets trains of cwnd packets are sent, and that cwnd is
   295  	// incremented by 1 after we acknowledge each packet.
   296  	expected = expected/2 + 1
   297  	for i := 0; i < iterations; i++ {
   298  		// Read all packets expected on this iteration. Don't
   299  		// acknowledge any of them just yet, so that we can measure the
   300  		// congestion window.
   301  		for j := 0; j < expected; j++ {
   302  			c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
   303  			bytesRead += maxPayload
   304  		}
   305  
   306  		// Check we don't receive any more packets on this iteration.
   307  		// The timeout can't be too high or we'll trigger a timeout.
   308  		c.CheckNoPacketTimeout("More packets received than expected for this cwnd (congestion avoidance phase).", 50*time.Millisecond)
   309  
   310  		// Acknowledge all the data received so far.
   311  		c.SendAck(790, bytesRead)
   312  
   313  		// In cogestion avoidance, the packets trains increase by 1 in
   314  		// each iteration.
   315  		expected++
   316  	}
   317  }
   318  
   319  // cubicCwnd returns an estimate of a cubic window given the
   320  // originalCwnd, wMax, last congestion event time and sRTT.
   321  func cubicCwnd(origCwnd int, wMax int, congEventTime time.Time, sRTT time.Duration) int {
   322  	cwnd := float64(origCwnd)
   323  	// We wait 50ms between each iteration so sRTT as computed by cubic
   324  	// should be close to 50ms.
   325  	elapsed := (time.Since(congEventTime) + sRTT).Seconds()
   326  	k := math.Cbrt(float64(wMax) * 0.3 / 0.7)
   327  	wtRTT := 0.4*math.Pow(elapsed-k, 3) + float64(wMax)
   328  	cwnd += (wtRTT - cwnd) / cwnd
   329  	return int(cwnd)
   330  }
   331  
   332  func TestCubicCongestionAvoidance(t *testing.T) {
   333  	maxPayload := 32
   334  	c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))
   335  	defer c.Cleanup()
   336  
   337  	enableCUBIC(t, c)
   338  
   339  	c.CreateConnected(789, 30000, -1 /* epRcvBuf */)
   340  
   341  	const iterations = 7
   342  	data := buffer.NewView(2 * maxPayload * (tcp.InitialCwnd << (iterations + 1)))
   343  
   344  	for i := range data {
   345  		data[i] = byte(i)
   346  	}
   347  
   348  	// Write all the data in one shot. Packets will only be written at the
   349  	// MTU size though.
   350  	if _, _, err := c.EP.Write(tcpip.SlicePayload(data), tcpip.WriteOptions{}); err != nil {
   351  		t.Fatalf("Write failed: %v", err)
   352  	}
   353  
   354  	// Do slow start for a few iterations.
   355  	expected := tcp.InitialCwnd
   356  	bytesRead := 0
   357  	for i := 0; i < iterations; i++ {
   358  		expected = tcp.InitialCwnd << uint(i)
   359  		if i > 0 {
   360  			// Acknowledge all the data received so far if not on
   361  			// first iteration.
   362  			c.SendAck(790, bytesRead)
   363  		}
   364  
   365  		// Read all packets expected on this iteration. Don't
   366  		// acknowledge any of them just yet, so that we can measure the
   367  		// congestion window.
   368  		for j := 0; j < expected; j++ {
   369  			c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
   370  			bytesRead += maxPayload
   371  		}
   372  
   373  		// Check we don't receive any more packets on this iteration.
   374  		// The timeout can't be too high or we'll trigger a timeout.
   375  		c.CheckNoPacketTimeout("More packets received than expected for this cwnd (during slow-start phase).", 50*time.Millisecond)
   376  	}
   377  
   378  	// Don't acknowledge the first packet of the last packet train. Let's
   379  	// wait for them to time out, which will trigger a restart of slow
   380  	// start, and initialization of ssthresh to cwnd * 0.7.
   381  	rtxOffset := bytesRead - maxPayload*expected
   382  	c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload)
   383  
   384  	// Acknowledge all pending data.
   385  	c.SendAck(790, bytesRead)
   386  
   387  	// Store away the time we sent the ACK and assuming a 200ms RTO
   388  	// we estimate that the sender will have an RTO 200ms from now
   389  	// and go back into slow start.
   390  	packetDropTime := time.Now().Add(200 * time.Millisecond)
   391  
   392  	// This part is tricky: when the timeout happened, we had "expected"
   393  	// packets pending, cwnd reset to 1, and ssthresh set to expected * 0.7.
   394  	// By acknowledging "expected" packets, the slow-start part will
   395  	// increase cwnd to expected/2 essentially putting the connection
   396  	// straight into congestion avoidance.
   397  	wMax := expected
   398  	// Lower expected as per cubic spec after a congestion event.
   399  	expected = int(float64(expected) * 0.7)
   400  	cwnd := expected
   401  	for i := 0; i < iterations; i++ {
   402  		// Cubic grows window independent of ACKs. Cubic Window growth
   403  		// is a function of time elapsed since last congestion event.
   404  		// As a result the congestion window does not grow
   405  		// deterministically in response to ACKs.
   406  		//
   407  		// We need to roughly estimate what the cwnd of the sender is
   408  		// based on when we sent the dupacks.
   409  		cwnd := cubicCwnd(cwnd, wMax, packetDropTime, 50*time.Millisecond)
   410  
   411  		packetsExpected := cwnd
   412  		for j := 0; j < packetsExpected; j++ {
   413  			c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
   414  			bytesRead += maxPayload
   415  		}
   416  		t.Logf("expected packets received, next trying to receive any extra packets that may come")
   417  
   418  		// If our estimate was correct there should be no more pending packets.
   419  		// We attempt to read a packet a few times with a short sleep in between
   420  		// to ensure that we don't see the sender send any unexpected packets.
   421  		unexpectedPackets := 0
   422  		for {
   423  			gotPacket := c.ReceiveNonBlockingAndCheckPacket(data, bytesRead, maxPayload)
   424  			if !gotPacket {
   425  				break
   426  			}
   427  			bytesRead += maxPayload
   428  			unexpectedPackets++
   429  			time.Sleep(1 * time.Millisecond)
   430  		}
   431  		if unexpectedPackets != 0 {
   432  			t.Fatalf("received %d unexpected packets for iteration %d", unexpectedPackets, i)
   433  		}
   434  		// Check we don't receive any more packets on this iteration.
   435  		// The timeout can't be too high or we'll trigger a timeout.
   436  		c.CheckNoPacketTimeout("More packets received than expected for this cwnd(congestion avoidance)", 5*time.Millisecond)
   437  
   438  		// Acknowledge all the data received so far.
   439  		c.SendAck(790, bytesRead)
   440  	}
   441  }
   442  
   443  func DisabledTestRetransmit(t *testing.T) {
   444  	maxPayload := 32
   445  	c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))
   446  	defer c.Cleanup()
   447  
   448  	c.CreateConnected(789, 30000, -1 /* epRcvBuf */)
   449  
   450  	const iterations = 7
   451  	data := buffer.NewView(maxPayload * (tcp.InitialCwnd << (iterations + 1)))
   452  	for i := range data {
   453  		data[i] = byte(i)
   454  	}
   455  
   456  	// Write all the data in two shots. Packets will only be written at the
   457  	// MTU size though.
   458  	half := data[:len(data)/2]
   459  	if _, _, err := c.EP.Write(tcpip.SlicePayload(half), tcpip.WriteOptions{}); err != nil {
   460  		t.Fatalf("Write failed: %v", err)
   461  	}
   462  	half = data[len(data)/2:]
   463  	if _, _, err := c.EP.Write(tcpip.SlicePayload(half), tcpip.WriteOptions{}); err != nil {
   464  		t.Fatalf("Write failed: %v", err)
   465  	}
   466  
   467  	// Do slow start for a few iterations.
   468  	expected := tcp.InitialCwnd
   469  	bytesRead := 0
   470  	for i := 0; i < iterations; i++ {
   471  		expected = tcp.InitialCwnd << uint(i)
   472  		if i > 0 {
   473  			// Acknowledge all the data received so far if not on
   474  			// first iteration.
   475  			c.SendAck(790, bytesRead)
   476  		}
   477  
   478  		// Read all packets expected on this iteration. Don't
   479  		// acknowledge any of them just yet, so that we can measure the
   480  		// congestion window.
   481  		for j := 0; j < expected; j++ {
   482  			c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)
   483  			bytesRead += maxPayload
   484  		}
   485  
   486  		// Check we don't receive any more packets on this iteration.
   487  		// The timeout can't be too high or we'll trigger a timeout.
   488  		c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond)
   489  	}
   490  
   491  	// Wait for a timeout and retransmit.
   492  	rtxOffset := bytesRead - maxPayload*expected
   493  	c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload)
   494  
   495  	if got, want := c.Stack().Stats().TCP.Timeouts.Value(), uint64(1); got != want {
   496  		t.Errorf("got stats.TCP.Timeouts.Value = %v, want = %v", got, want)
   497  	}
   498  
   499  	if got, want := c.Stack().Stats().TCP.Retransmits.Value(), uint64(1); got != want {
   500  		t.Errorf("got stats.TCP.Retransmits.Value = %v, want = %v", got, want)
   501  	}
   502  
   503  	if got, want := c.EP.Stats().(*tcp.Stats).SendErrors.Timeouts.Value(), uint64(1); got != want {
   504  		t.Errorf("got EP SendErrors.Timeouts.Value = %v, want = %v", got, want)
   505  	}
   506  
   507  	if got, want := c.EP.Stats().(*tcp.Stats).SendErrors.Retransmits.Value(), uint64(1); got != want {
   508  		t.Errorf("got EP stats SendErrors.Retransmits.Value = %v, want = %v", got, want)
   509  	}
   510  
   511  	if got, want := c.Stack().Stats().TCP.SlowStartRetransmits.Value(), uint64(1); got != want {
   512  		t.Errorf("got stats.TCP.SlowStartRetransmits.Value = %v, want = %v", got, want)
   513  	}
   514  
   515  	// Acknowledge half of the pending data.
   516  	rtxOffset = bytesRead - expected*maxPayload/2
   517  	c.SendAck(790, rtxOffset)
   518  
   519  	// Receive the remaining data, making sure that acknowledged data is not
   520  	// retransmitted.
   521  	for offset := rtxOffset; offset < len(data); offset += maxPayload {
   522  		c.ReceiveAndCheckPacket(data, offset, maxPayload)
   523  		c.SendAck(790, offset+maxPayload)
   524  	}
   525  
   526  	c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond)
   527  }