github.com/tumi8/quic-go@v0.37.4-tum/noninternal/congestion/cubic_test.go (about)

     1  package congestion
     2  
     3  import (
     4  	"math"
     5  	"time"
     6  
     7  	"github.com/tumi8/quic-go/noninternal/protocol"
     8  	. "github.com/onsi/ginkgo/v2"
     9  	. "github.com/onsi/gomega"
    10  )
    11  
    12  const (
    13  	numConnections         uint32  = 2
    14  	nConnectionBeta        float32 = (float32(numConnections) - 1 + beta) / float32(numConnections)
    15  	nConnectionBetaLastMax float32 = (float32(numConnections) - 1 + betaLastMax) / float32(numConnections)
    16  	nConnectionAlpha       float32 = 3 * float32(numConnections) * float32(numConnections) * (1 - nConnectionBeta) / (1 + nConnectionBeta)
    17  	maxCubicTimeInterval           = 30 * time.Millisecond
    18  )
    19  
    20  var _ = Describe("Cubic", func() {
    21  	var (
    22  		clock mockClock
    23  		cubic *Cubic
    24  	)
    25  
    26  	BeforeEach(func() {
    27  		clock = mockClock{}
    28  		cubic = NewCubic(&clock)
    29  		cubic.SetNumConnections(int(numConnections))
    30  	})
    31  
    32  	renoCwnd := func(currentCwnd protocol.ByteCount) protocol.ByteCount {
    33  		return currentCwnd + protocol.ByteCount(float32(maxDatagramSize)*nConnectionAlpha*float32(maxDatagramSize)/float32(currentCwnd))
    34  	}
    35  
    36  	cubicConvexCwnd := func(initialCwnd protocol.ByteCount, rtt, elapsedTime time.Duration) protocol.ByteCount {
    37  		offset := protocol.ByteCount((elapsedTime+rtt)/time.Microsecond) << 10 / 1000000
    38  		deltaCongestionWindow := 410 * offset * offset * offset * maxDatagramSize >> 40
    39  		return initialCwnd + deltaCongestionWindow
    40  	}
    41  
    42  	It("works above origin (with tighter bounds)", func() {
    43  		// Convex growth.
    44  		const rttMin = 100 * time.Millisecond
    45  		const rttMinS = float32(rttMin/time.Millisecond) / 1000.0
    46  		currentCwnd := 10 * maxDatagramSize
    47  		initialCwnd := currentCwnd
    48  
    49  		clock.Advance(time.Millisecond)
    50  		initialTime := clock.Now()
    51  		expectedFirstCwnd := renoCwnd(currentCwnd)
    52  		currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, initialTime)
    53  		Expect(expectedFirstCwnd).To(Equal(currentCwnd))
    54  
    55  		// Normal TCP phase.
    56  		// The maximum number of expected reno RTTs can be calculated by
    57  		// finding the point where the cubic curve and the reno curve meet.
    58  		maxRenoRtts := int(math.Sqrt(float64(nConnectionAlpha/(0.4*rttMinS*rttMinS*rttMinS))) - 2)
    59  		for i := 0; i < maxRenoRtts; i++ {
    60  			// Alternatively, we expect it to increase by one, every time we
    61  			// receive current_cwnd/Alpha acks back.  (This is another way of
    62  			// saying we expect cwnd to increase by approximately Alpha once
    63  			// we receive current_cwnd number ofacks back).
    64  			numAcksThisEpoch := int(float32(currentCwnd/maxDatagramSize) / nConnectionAlpha)
    65  
    66  			initialCwndThisEpoch := currentCwnd
    67  			for n := 0; n < numAcksThisEpoch; n++ {
    68  				// Call once per ACK.
    69  				expectedNextCwnd := renoCwnd(currentCwnd)
    70  				currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
    71  				Expect(currentCwnd).To(Equal(expectedNextCwnd))
    72  			}
    73  			// Our byte-wise Reno implementation is an estimate.  We expect
    74  			// the cwnd to increase by approximately one MSS every
    75  			// cwnd/kDefaultTCPMSS/Alpha acks, but it may be off by as much as
    76  			// half a packet for smaller values of current_cwnd.
    77  			cwndChangeThisEpoch := currentCwnd - initialCwndThisEpoch
    78  			Expect(cwndChangeThisEpoch).To(BeNumerically("~", maxDatagramSize, maxDatagramSize/2))
    79  			clock.Advance(100 * time.Millisecond)
    80  		}
    81  
    82  		for i := 0; i < 54; i++ {
    83  			maxAcksThisEpoch := currentCwnd / maxDatagramSize
    84  			interval := time.Duration(100*1000/maxAcksThisEpoch) * time.Microsecond
    85  			for n := 0; n < int(maxAcksThisEpoch); n++ {
    86  				clock.Advance(interval)
    87  				currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
    88  				expectedCwnd := cubicConvexCwnd(initialCwnd, rttMin, clock.Now().Sub(initialTime))
    89  				// If we allow per-ack updates, every update is a small cubic update.
    90  				Expect(currentCwnd).To(Equal(expectedCwnd))
    91  			}
    92  		}
    93  		expectedCwnd := cubicConvexCwnd(initialCwnd, rttMin, clock.Now().Sub(initialTime))
    94  		currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
    95  		Expect(currentCwnd).To(Equal(expectedCwnd))
    96  	})
    97  
    98  	It("works above the origin with fine grained cubing", func() {
    99  		// Start the test with an artificially large cwnd to prevent Reno
   100  		// from over-taking cubic.
   101  		currentCwnd := 1000 * maxDatagramSize
   102  		initialCwnd := currentCwnd
   103  		rttMin := 100 * time.Millisecond
   104  		clock.Advance(time.Millisecond)
   105  		initialTime := clock.Now()
   106  
   107  		currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
   108  		clock.Advance(600 * time.Millisecond)
   109  		currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
   110  
   111  		// We expect the algorithm to perform only non-zero, fine-grained cubic
   112  		// increases on every ack in this case.
   113  		for i := 0; i < 100; i++ {
   114  			clock.Advance(10 * time.Millisecond)
   115  			expectedCwnd := cubicConvexCwnd(initialCwnd, rttMin, clock.Now().Sub(initialTime))
   116  			nextCwnd := cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
   117  			// Make sure we are performing cubic increases.
   118  			Expect(nextCwnd).To(Equal(expectedCwnd))
   119  			// Make sure that these are non-zero, less-than-packet sized increases.
   120  			Expect(nextCwnd).To(BeNumerically(">", currentCwnd))
   121  			cwndDelta := nextCwnd - currentCwnd
   122  			Expect(maxDatagramSize / 10).To(BeNumerically(">", cwndDelta))
   123  			currentCwnd = nextCwnd
   124  		}
   125  	})
   126  
   127  	It("handles per ack updates", func() {
   128  		// Start the test with a large cwnd and RTT, to force the first
   129  		// increase to be a cubic increase.
   130  		initialCwndPackets := 150
   131  		currentCwnd := protocol.ByteCount(initialCwndPackets) * maxDatagramSize
   132  		rttMin := 350 * time.Millisecond
   133  
   134  		// Initialize the epoch
   135  		clock.Advance(time.Millisecond)
   136  		// Keep track of the growth of the reno-equivalent cwnd.
   137  		rCwnd := renoCwnd(currentCwnd)
   138  		currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
   139  		initialCwnd := currentCwnd
   140  
   141  		// Simulate the return of cwnd packets in less than
   142  		// MaxCubicInterval() time.
   143  		maxAcks := int(float32(initialCwndPackets) / nConnectionAlpha)
   144  		interval := maxCubicTimeInterval / time.Duration(maxAcks+1)
   145  
   146  		// In this scenario, the first increase is dictated by the cubic
   147  		// equation, but it is less than one byte, so the cwnd doesn't
   148  		// change.  Normally, without per-ack increases, any cwnd plateau
   149  		// will cause the cwnd to be pinned for MaxCubicTimeInterval().  If
   150  		// we enable per-ack updates, the cwnd will continue to grow,
   151  		// regardless of the temporary plateau.
   152  		clock.Advance(interval)
   153  		rCwnd = renoCwnd(rCwnd)
   154  		Expect(cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())).To(Equal(currentCwnd))
   155  		for i := 1; i < maxAcks; i++ {
   156  			clock.Advance(interval)
   157  			nextCwnd := cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
   158  			rCwnd = renoCwnd(rCwnd)
   159  			// The window shoud increase on every ack.
   160  			Expect(nextCwnd).To(BeNumerically(">", currentCwnd))
   161  			Expect(nextCwnd).To(Equal(rCwnd))
   162  			currentCwnd = nextCwnd
   163  		}
   164  
   165  		// After all the acks are returned from the epoch, we expect the
   166  		// cwnd to have increased by nearly one packet.  (Not exactly one
   167  		// packet, because our byte-wise Reno algorithm is always a slight
   168  		// under-estimation).  Without per-ack updates, the current_cwnd
   169  		// would otherwise be unchanged.
   170  		minimumExpectedIncrease := maxDatagramSize * 9 / 10
   171  		Expect(currentCwnd).To(BeNumerically(">", initialCwnd+minimumExpectedIncrease))
   172  	})
   173  
   174  	It("handles loss events", func() {
   175  		rttMin := 100 * time.Millisecond
   176  		currentCwnd := 422 * maxDatagramSize
   177  		expectedCwnd := renoCwnd(currentCwnd)
   178  		// Initialize the state.
   179  		clock.Advance(time.Millisecond)
   180  		Expect(cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())).To(Equal(expectedCwnd))
   181  
   182  		// On the first loss, the last max congestion window is set to the
   183  		// congestion window before the loss.
   184  		preLossCwnd := currentCwnd
   185  		Expect(cubic.lastMaxCongestionWindow).To(BeZero())
   186  		expectedCwnd = protocol.ByteCount(float32(currentCwnd) * nConnectionBeta)
   187  		Expect(cubic.CongestionWindowAfterPacketLoss(currentCwnd)).To(Equal(expectedCwnd))
   188  		Expect(cubic.lastMaxCongestionWindow).To(Equal(preLossCwnd))
   189  		currentCwnd = expectedCwnd
   190  
   191  		// On the second loss, the current congestion window has not yet
   192  		// reached the last max congestion window.  The last max congestion
   193  		// window will be reduced by an additional backoff factor to allow
   194  		// for competition.
   195  		preLossCwnd = currentCwnd
   196  		expectedCwnd = protocol.ByteCount(float32(currentCwnd) * nConnectionBeta)
   197  		Expect(cubic.CongestionWindowAfterPacketLoss(currentCwnd)).To(Equal(expectedCwnd))
   198  		currentCwnd = expectedCwnd
   199  		Expect(preLossCwnd).To(BeNumerically(">", cubic.lastMaxCongestionWindow))
   200  		expectedLastMax := protocol.ByteCount(float32(preLossCwnd) * nConnectionBetaLastMax)
   201  		Expect(cubic.lastMaxCongestionWindow).To(Equal(expectedLastMax))
   202  		Expect(expectedCwnd).To(BeNumerically("<", cubic.lastMaxCongestionWindow))
   203  		// Simulate an increase, and check that we are below the origin.
   204  		currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
   205  		Expect(cubic.lastMaxCongestionWindow).To(BeNumerically(">", currentCwnd))
   206  
   207  		// On the final loss, simulate the condition where the congestion
   208  		// window had a chance to grow nearly to the last congestion window.
   209  		currentCwnd = cubic.lastMaxCongestionWindow - 1
   210  		preLossCwnd = currentCwnd
   211  		expectedCwnd = protocol.ByteCount(float32(currentCwnd) * nConnectionBeta)
   212  		Expect(cubic.CongestionWindowAfterPacketLoss(currentCwnd)).To(Equal(expectedCwnd))
   213  		expectedLastMax = preLossCwnd
   214  		Expect(cubic.lastMaxCongestionWindow).To(Equal(expectedLastMax))
   215  	})
   216  
   217  	It("works below origin", func() {
   218  		// Concave growth.
   219  		rttMin := 100 * time.Millisecond
   220  		currentCwnd := 422 * maxDatagramSize
   221  		expectedCwnd := renoCwnd(currentCwnd)
   222  		// Initialize the state.
   223  		clock.Advance(time.Millisecond)
   224  		Expect(cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())).To(Equal(expectedCwnd))
   225  
   226  		expectedCwnd = protocol.ByteCount(float32(currentCwnd) * nConnectionBeta)
   227  		Expect(cubic.CongestionWindowAfterPacketLoss(currentCwnd)).To(Equal(expectedCwnd))
   228  		currentCwnd = expectedCwnd
   229  		// First update after loss to initialize the epoch.
   230  		currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
   231  		// Cubic phase.
   232  		for i := 0; i < 40; i++ {
   233  			clock.Advance(100 * time.Millisecond)
   234  			currentCwnd = cubic.CongestionWindowAfterAck(maxDatagramSize, currentCwnd, rttMin, clock.Now())
   235  		}
   236  		expectedCwnd = 553632 * maxDatagramSize / 1460
   237  		Expect(currentCwnd).To(Equal(expectedCwnd))
   238  	})
   239  })