github.com/lightlus/netstack@v1.2.0/tcpip/transport/tcp/tcp_sack_test.go (about) 1 // Copyright 2018 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package tcp_test 16 17 import ( 18 "fmt" 19 "log" 20 "reflect" 21 "testing" 22 "time" 23 24 "github.com/lightlus/netstack/tcpip" 25 "github.com/lightlus/netstack/tcpip/buffer" 26 "github.com/lightlus/netstack/tcpip/header" 27 "github.com/lightlus/netstack/tcpip/seqnum" 28 "github.com/lightlus/netstack/tcpip/stack" 29 "github.com/lightlus/netstack/tcpip/transport/tcp" 30 "github.com/lightlus/netstack/tcpip/transport/tcp/testing/context" 31 ) 32 33 // createConnectedWithSACKPermittedOption creates and connects c.ep with the 34 // SACKPermitted option enabled if the stack in the context has the SACK support 35 // enabled. 36 func createConnectedWithSACKPermittedOption(c *context.Context) *context.RawEndpoint { 37 return c.CreateConnectedWithOptions(header.TCPSynOptions{SACKPermitted: c.SACKEnabled()}) 38 } 39 40 // createConnectedWithSACKAndTS creates and connects c.ep with the SACK & TS 41 // option enabled if the stack in the context has SACK and TS enabled. 42 func createConnectedWithSACKAndTS(c *context.Context) *context.RawEndpoint { 43 return c.CreateConnectedWithOptions(header.TCPSynOptions{SACKPermitted: c.SACKEnabled(), TS: true}) 44 } 45 46 func setStackSACKPermitted(t *testing.T, c *context.Context, enable bool) { 47 t.Helper() 48 if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, tcp.SACKEnabled(enable)); err != nil { 49 t.Fatalf("c.s.SetTransportProtocolOption(tcp.ProtocolNumber, SACKEnabled(%v) = %v", enable, err) 50 } 51 } 52 53 // TestSackPermittedConnect establishes a connection with the SACK option 54 // enabled. 55 func TestSackPermittedConnect(t *testing.T) { 56 for _, sackEnabled := range []bool{false, true} { 57 t.Run(fmt.Sprintf("stack.sackEnabled: %v", sackEnabled), func(t *testing.T) { 58 c := context.New(t, defaultMTU) 59 defer c.Cleanup() 60 61 setStackSACKPermitted(t, c, sackEnabled) 62 rep := createConnectedWithSACKPermittedOption(c) 63 data := []byte{1, 2, 3} 64 65 rep.SendPacket(data, nil) 66 savedSeqNum := rep.NextSeqNum 67 rep.VerifyACKNoSACK() 68 69 // Make an out of order packet and send it. 70 rep.NextSeqNum += 3 71 sackBlocks := []header.SACKBlock{ 72 {rep.NextSeqNum, rep.NextSeqNum.Add(seqnum.Size(len(data)))}, 73 } 74 rep.SendPacket(data, nil) 75 76 // Restore the saved sequence number so that the 77 // VerifyXXX calls use the right sequence number for 78 // checking ACK numbers. 79 rep.NextSeqNum = savedSeqNum 80 if sackEnabled { 81 rep.VerifyACKHasSACK(sackBlocks) 82 } else { 83 rep.VerifyACKNoSACK() 84 } 85 86 // Send the missing segment. 87 rep.SendPacket(data, nil) 88 // The ACK should contain the cumulative ACK for all 9 89 // bytes sent and no SACK blocks. 90 rep.NextSeqNum += 3 91 // Check that no SACK block is returned in the ACK. 92 rep.VerifyACKNoSACK() 93 }) 94 } 95 } 96 97 // TestSackDisabledConnect establishes a connection with the SACK option 98 // disabled and verifies that no SACKs are sent for out of order segments. 99 func TestSackDisabledConnect(t *testing.T) { 100 for _, sackEnabled := range []bool{false, true} { 101 t.Run(fmt.Sprintf("sackEnabled: %v", sackEnabled), func(t *testing.T) { 102 c := context.New(t, defaultMTU) 103 defer c.Cleanup() 104 105 setStackSACKPermitted(t, c, sackEnabled) 106 107 rep := c.CreateConnectedWithOptions(header.TCPSynOptions{}) 108 109 data := []byte{1, 2, 3} 110 111 rep.SendPacket(data, nil) 112 savedSeqNum := rep.NextSeqNum 113 rep.VerifyACKNoSACK() 114 115 // Make an out of order packet and send it. 116 rep.NextSeqNum += 3 117 rep.SendPacket(data, nil) 118 119 // The ACK should contain the older sequence number and 120 // no SACK blocks. 121 rep.NextSeqNum = savedSeqNum 122 rep.VerifyACKNoSACK() 123 124 // Send the missing segment. 125 rep.SendPacket(data, nil) 126 // The ACK should contain the cumulative ACK for all 9 127 // bytes sent and no SACK blocks. 128 rep.NextSeqNum += 3 129 // Check that no SACK block is returned in the ACK. 130 rep.VerifyACKNoSACK() 131 }) 132 } 133 } 134 135 // TestSackPermittedAccept accepts and establishes a connection with the 136 // SACKPermitted option enabled if the connection request specifies the 137 // SACKPermitted option. In case of SYN cookies SACK should be disabled as we 138 // don't encode the SACK information in the cookie. 139 func TestSackPermittedAccept(t *testing.T) { 140 type testCase struct { 141 cookieEnabled bool 142 sackPermitted bool 143 wndScale int 144 wndSize uint16 145 } 146 147 testCases := []testCase{ 148 // When cookie is used window scaling is disabled. 149 {true, false, -1, 0xffff}, // When cookie is used window scaling is disabled. 150 {false, true, 5, 0x8000}, // 0x8000 * 2^5 = 1<<20 = 1MB window (the default). 151 } 152 savedSynCountThreshold := tcp.SynRcvdCountThreshold 153 defer func() { 154 tcp.SynRcvdCountThreshold = savedSynCountThreshold 155 }() 156 for _, tc := range testCases { 157 t.Run(fmt.Sprintf("test: %#v", tc), func(t *testing.T) { 158 if tc.cookieEnabled { 159 tcp.SynRcvdCountThreshold = 0 160 } else { 161 tcp.SynRcvdCountThreshold = savedSynCountThreshold 162 } 163 for _, sackEnabled := range []bool{false, true} { 164 t.Run(fmt.Sprintf("test stack.sackEnabled: %v", sackEnabled), func(t *testing.T) { 165 c := context.New(t, defaultMTU) 166 defer c.Cleanup() 167 setStackSACKPermitted(t, c, sackEnabled) 168 169 rep := c.AcceptWithOptions(tc.wndScale, header.TCPSynOptions{MSS: defaultIPv4MSS, SACKPermitted: tc.sackPermitted}) 170 // Now verify no SACK blocks are 171 // received when sack is disabled. 172 data := []byte{1, 2, 3} 173 rep.SendPacket(data, nil) 174 rep.VerifyACKNoSACK() 175 176 savedSeqNum := rep.NextSeqNum 177 178 // Make an out of order packet and send 179 // it. 180 rep.NextSeqNum += 3 181 sackBlocks := []header.SACKBlock{ 182 {rep.NextSeqNum, rep.NextSeqNum.Add(seqnum.Size(len(data)))}, 183 } 184 rep.SendPacket(data, nil) 185 186 // The ACK should contain the older 187 // sequence number. 188 rep.NextSeqNum = savedSeqNum 189 if sackEnabled && tc.sackPermitted { 190 rep.VerifyACKHasSACK(sackBlocks) 191 } else { 192 rep.VerifyACKNoSACK() 193 } 194 195 // Send the missing segment. 196 rep.SendPacket(data, nil) 197 // The ACK should contain the cumulative 198 // ACK for all 9 bytes sent and no SACK 199 // blocks. 200 rep.NextSeqNum += 3 201 // Check that no SACK block is returned 202 // in the ACK. 203 rep.VerifyACKNoSACK() 204 }) 205 } 206 }) 207 } 208 } 209 210 // TestSackDisabledAccept accepts and establishes a connection with 211 // the SACKPermitted option disabled and verifies that no SACKs are 212 // sent for out of order packets. 213 func TestSackDisabledAccept(t *testing.T) { 214 type testCase struct { 215 cookieEnabled bool 216 wndScale int 217 wndSize uint16 218 } 219 220 testCases := []testCase{ 221 // When cookie is used window scaling is disabled. 222 {true, -1, 0xffff}, // When cookie is used window scaling is disabled. 223 {false, 5, 0x8000}, // 0x8000 * 2^5 = 1<<20 = 1MB window (the default). 224 } 225 savedSynCountThreshold := tcp.SynRcvdCountThreshold 226 defer func() { 227 tcp.SynRcvdCountThreshold = savedSynCountThreshold 228 }() 229 for _, tc := range testCases { 230 t.Run(fmt.Sprintf("test: %#v", tc), func(t *testing.T) { 231 if tc.cookieEnabled { 232 tcp.SynRcvdCountThreshold = 0 233 } else { 234 tcp.SynRcvdCountThreshold = savedSynCountThreshold 235 } 236 for _, sackEnabled := range []bool{false, true} { 237 t.Run(fmt.Sprintf("test: sackEnabled: %v", sackEnabled), func(t *testing.T) { 238 c := context.New(t, defaultMTU) 239 defer c.Cleanup() 240 setStackSACKPermitted(t, c, sackEnabled) 241 242 rep := c.AcceptWithOptions(tc.wndScale, header.TCPSynOptions{MSS: defaultIPv4MSS}) 243 244 // Now verify no SACK blocks are 245 // received when sack is disabled. 246 data := []byte{1, 2, 3} 247 rep.SendPacket(data, nil) 248 rep.VerifyACKNoSACK() 249 savedSeqNum := rep.NextSeqNum 250 251 // Make an out of order packet and send 252 // it. 253 rep.NextSeqNum += 3 254 rep.SendPacket(data, nil) 255 256 // The ACK should contain the older 257 // sequence number and no SACK blocks. 258 rep.NextSeqNum = savedSeqNum 259 rep.VerifyACKNoSACK() 260 261 // Send the missing segment. 262 rep.SendPacket(data, nil) 263 // The ACK should contain the cumulative 264 // ACK for all 9 bytes sent and no SACK 265 // blocks. 266 rep.NextSeqNum += 3 267 // Check that no SACK block is returned 268 // in the ACK. 269 rep.VerifyACKNoSACK() 270 }) 271 } 272 }) 273 } 274 } 275 276 func TestUpdateSACKBlocks(t *testing.T) { 277 testCases := []struct { 278 segStart seqnum.Value 279 segEnd seqnum.Value 280 rcvNxt seqnum.Value 281 sackBlocks []header.SACKBlock 282 updated []header.SACKBlock 283 }{ 284 // Trivial cases where current SACK block list is empty and we 285 // have an out of order delivery. 286 {10, 11, 2, []header.SACKBlock{}, []header.SACKBlock{{10, 11}}}, 287 {10, 12, 2, []header.SACKBlock{}, []header.SACKBlock{{10, 12}}}, 288 {10, 20, 2, []header.SACKBlock{}, []header.SACKBlock{{10, 20}}}, 289 290 // Cases where current SACK block list is not empty and we have 291 // an out of order delivery. Tests that the updated SACK block 292 // list has the first block as the one that contains the new 293 // SACK block representing the segment that was just delivered. 294 {10, 11, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{10, 11}, {12, 20}}}, 295 {24, 30, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{24, 30}, {12, 20}}}, 296 {24, 30, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{24, 30}, {12, 20}, {32, 40}}}, 297 298 // Ensure that we only retain header.MaxSACKBlocks and drop the 299 // oldest one if adding a new block exceeds 300 // header.MaxSACKBlocks. 301 {24, 30, 9, 302 []header.SACKBlock{{12, 20}, {32, 40}, {42, 50}, {52, 60}, {62, 70}, {72, 80}}, 303 []header.SACKBlock{{24, 30}, {12, 20}, {32, 40}, {42, 50}, {52, 60}, {62, 70}}}, 304 305 // Cases where segment extends an existing SACK block. 306 {10, 12, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{10, 20}}}, 307 {10, 22, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{10, 22}}}, 308 {10, 22, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{10, 22}}}, 309 {15, 22, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{12, 22}}}, 310 {15, 25, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{12, 25}}}, 311 {11, 25, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{11, 25}}}, 312 {10, 12, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{10, 20}, {32, 40}}}, 313 {10, 22, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{10, 22}, {32, 40}}}, 314 {10, 22, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{10, 22}, {32, 40}}}, 315 {15, 22, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{12, 22}, {32, 40}}}, 316 {15, 25, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{12, 25}, {32, 40}}}, 317 {11, 25, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{11, 25}, {32, 40}}}, 318 319 // Cases where segment contains rcvNxt. 320 {10, 20, 15, []header.SACKBlock{{20, 30}, {40, 50}}, []header.SACKBlock{{40, 50}}}, 321 } 322 323 for _, tc := range testCases { 324 var sack tcp.SACKInfo 325 copy(sack.Blocks[:], tc.sackBlocks) 326 sack.NumBlocks = len(tc.sackBlocks) 327 tcp.UpdateSACKBlocks(&sack, tc.segStart, tc.segEnd, tc.rcvNxt) 328 if got, want := sack.Blocks[:sack.NumBlocks], tc.updated; !reflect.DeepEqual(got, want) { 329 t.Errorf("UpdateSACKBlocks(%v, %v, %v, %v), got: %v, want: %v", tc.sackBlocks, tc.segStart, tc.segEnd, tc.rcvNxt, got, want) 330 } 331 332 } 333 } 334 335 func TestTrimSackBlockList(t *testing.T) { 336 testCases := []struct { 337 rcvNxt seqnum.Value 338 sackBlocks []header.SACKBlock 339 trimmed []header.SACKBlock 340 }{ 341 // Simple cases where we trim whole entries. 342 {2, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}}, 343 {21, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{22, 30}, {32, 40}}}, 344 {31, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{32, 40}}}, 345 {40, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{}}, 346 // Cases where we need to update a block. 347 {12, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{12, 20}, {22, 30}, {32, 40}}}, 348 {23, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{23, 30}, {32, 40}}}, 349 {33, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{33, 40}}}, 350 {41, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{}}, 351 } 352 for _, tc := range testCases { 353 var sack tcp.SACKInfo 354 copy(sack.Blocks[:], tc.sackBlocks) 355 sack.NumBlocks = len(tc.sackBlocks) 356 tcp.TrimSACKBlockList(&sack, tc.rcvNxt) 357 if got, want := sack.Blocks[:sack.NumBlocks], tc.trimmed; !reflect.DeepEqual(got, want) { 358 t.Errorf("TrimSackBlockList(%v, %v), got: %v, want: %v", tc.sackBlocks, tc.rcvNxt, got, want) 359 } 360 } 361 } 362 363 func TestSACKRecovery(t *testing.T) { 364 const maxPayload = 10 365 // See: tcp.makeOptions for why tsOptionSize is set to 12 here. 366 const tsOptionSize = 12 367 // Enabling SACK means the payload size is reduced to account 368 // for the extra space required for the TCP options. 369 // 370 // We increase the MTU by 40 bytes to account for SACK and Timestamp 371 // options. 372 const maxTCPOptionSize = 40 373 374 c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxTCPOptionSize+maxPayload)) 375 defer c.Cleanup() 376 377 c.Stack().AddTCPProbe(func(s stack.TCPEndpointState) { 378 // We use log.Printf instead of t.Logf here because this probe 379 // can fire even when the test function has finished. This is 380 // because closing the endpoint in cleanup() does not mean the 381 // actual worker loop terminates immediately as it still has to 382 // do a full TCP shutdown. But this test can finish running 383 // before the shutdown is done. Using t.Logf in such a case 384 // causes the test to panic due to logging after test finished. 385 log.Printf("state: %+v\n", s) 386 }) 387 setStackSACKPermitted(t, c, true) 388 createConnectedWithSACKAndTS(c) 389 390 const iterations = 7 391 data := buffer.NewView(2 * maxPayload * (tcp.InitialCwnd << (iterations + 1))) 392 for i := range data { 393 data[i] = byte(i) 394 } 395 396 // Write all the data in one shot. Packets will only be written at the 397 // MTU size though. 398 if _, _, err := c.EP.Write(tcpip.SlicePayload(data), tcpip.WriteOptions{}); err != nil { 399 t.Fatalf("Write failed: %v", err) 400 } 401 402 // Do slow start for a few iterations. 403 expected := tcp.InitialCwnd 404 bytesRead := 0 405 for i := 0; i < iterations; i++ { 406 expected = tcp.InitialCwnd << uint(i) 407 if i > 0 { 408 // Acknowledge all the data received so far if not on 409 // first iteration. 410 c.SendAck(790, bytesRead) 411 } 412 413 // Read all packets expected on this iteration. Don't 414 // acknowledge any of them just yet, so that we can measure the 415 // congestion window. 416 for j := 0; j < expected; j++ { 417 c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize) 418 bytesRead += maxPayload 419 } 420 421 // Check we don't receive any more packets on this iteration. 422 // The timeout can't be too high or we'll trigger a timeout. 423 c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond) 424 } 425 426 // Send 3 duplicate acks. This should force an immediate retransmit of 427 // the pending packet and put the sender into fast recovery. 428 rtxOffset := bytesRead - maxPayload*expected 429 start := c.IRS.Add(seqnum.Size(rtxOffset) + 30 + 1) 430 end := start.Add(10) 431 for i := 0; i < 3; i++ { 432 c.SendAckWithSACK(790, rtxOffset, []header.SACKBlock{{start, end}}) 433 end = end.Add(10) 434 } 435 436 // Receive the retransmitted packet. 437 c.ReceiveAndCheckPacketWithOptions(data, rtxOffset, maxPayload, tsOptionSize) 438 439 tcpStats := c.Stack().Stats().TCP 440 stats := []struct { 441 stat *tcpip.StatCounter 442 name string 443 want uint64 444 }{ 445 {tcpStats.FastRetransmit, "stats.TCP.FastRetransmit", 1}, 446 {tcpStats.Retransmits, "stats.TCP.Retransmits", 1}, 447 {tcpStats.SACKRecovery, "stats.TCP.SACKRecovery", 1}, 448 {tcpStats.FastRecovery, "stats.TCP.FastRecovery", 0}, 449 } 450 for _, s := range stats { 451 if got, want := s.stat.Value(), s.want; got != want { 452 t.Errorf("got %s.Value() = %v, want = %v", s.name, got, want) 453 } 454 } 455 456 // Now send 7 mode duplicate ACKs. In SACK TCP dupAcks do not cause 457 // window inflation and sending of packets is completely handled by the 458 // SACK Recovery algorithm. We should see no packets being released, as 459 // the cwnd at this point after entering recovery should be half of the 460 // outstanding number of packets in flight. 461 for i := 0; i < 7; i++ { 462 c.SendAckWithSACK(790, rtxOffset, []header.SACKBlock{{start, end}}) 463 end = end.Add(10) 464 } 465 466 recover := bytesRead 467 468 // Ensure no new packets arrive. 469 c.CheckNoPacketTimeout("More packets received than expected during recovery after dupacks for this cwnd.", 470 50*time.Millisecond) 471 472 // Acknowledge half of the pending data. This along with the 10 sacked 473 // segments above should reduce the outstanding below the current 474 // congestion window allowing the sender to transmit data. 475 rtxOffset = bytesRead - expected*maxPayload/2 476 477 // Now send a partial ACK w/ a SACK block that indicates that the next 3 478 // segments are lost and we have received 6 segments after the lost 479 // segments. This should cause the sender to immediately transmit all 3 480 // segments in response to this ACK unlike in FastRecovery where only 1 481 // segment is retransmitted per ACK. 482 start = c.IRS.Add(seqnum.Size(rtxOffset) + 30 + 1) 483 end = start.Add(60) 484 c.SendAckWithSACK(790, rtxOffset, []header.SACKBlock{{start, end}}) 485 486 // At this point, we acked expected/2 packets and we SACKED 6 packets and 487 // 3 segments were considered lost due to the SACK block we sent. 488 // 489 // So total packets outstanding can be calculated as follows after 7 490 // iterations of slow start -> 10/20/40/80/160/320/640. So expected 491 // should be 640 at start, then we went to recover at which point the 492 // cwnd should be set to 320 + 3 (for the 3 dupAcks which have left the 493 // network). 494 // Outstanding at this point after acking half the window 495 // (320 packets) will be: 496 // outstanding = 640-320-6(due to SACK block)-3 = 311 497 // 498 // The last 3 is due to the fact that the first 3 packets after 499 // rtxOffset will be considered lost due to the SACK blocks sent. 500 // Receive the retransmit due to partial ack. 501 502 c.ReceiveAndCheckPacketWithOptions(data, rtxOffset, maxPayload, tsOptionSize) 503 // Receive the 2 extra packets that should have been retransmitted as 504 // those should be considered lost and immediately retransmitted based 505 // on the SACK information in the previous ACK sent above. 506 for i := 0; i < 2; i++ { 507 c.ReceiveAndCheckPacketWithOptions(data, rtxOffset+maxPayload*(i+1), maxPayload, tsOptionSize) 508 } 509 510 // Now we should get 9 more new unsent packets as the cwnd is 323 and 511 // outstanding is 311. 512 for i := 0; i < 9; i++ { 513 c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize) 514 bytesRead += maxPayload 515 } 516 517 // In SACK recovery only the first segment is fast retransmitted when 518 // entering recovery. 519 if got, want := c.Stack().Stats().TCP.FastRetransmit.Value(), uint64(1); got != want { 520 t.Errorf("got stats.TCP.FastRetransmit.Value = %v, want = %v", got, want) 521 } 522 523 if got, want := c.EP.Stats().(*tcp.Stats).SendErrors.FastRetransmit.Value(), uint64(1); got != want { 524 t.Errorf("got EP stats SendErrors.FastRetransmit = %v, want = %v", got, want) 525 } 526 527 if got, want := c.Stack().Stats().TCP.Retransmits.Value(), uint64(4); got != want { 528 t.Errorf("got stats.TCP.Retransmits.Value = %v, want = %v", got, want) 529 } 530 531 if got, want := c.EP.Stats().(*tcp.Stats).SendErrors.Retransmits.Value(), uint64(4); got != want { 532 t.Errorf("got EP stats Stats.SendErrors.Retransmits = %v, want = %v", got, want) 533 } 534 535 c.CheckNoPacketTimeout("More packets received than expected during recovery after partial ack for this cwnd.", 50*time.Millisecond) 536 537 // Acknowledge all pending data to recover point. 538 c.SendAck(790, recover) 539 540 // At this point, the cwnd should reset to expected/2 and there are 9 541 // packets outstanding. 542 // 543 // Now in the first iteration since there are 9 packets outstanding. 544 // We would expect to get expected/2 - 9 packets. But subsequent 545 // iterations will send us expected/2 + 1 (per iteration). 546 expected = expected/2 - 9 547 for i := 0; i < iterations; i++ { 548 // Read all packets expected on this iteration. Don't 549 // acknowledge any of them just yet, so that we can measure the 550 // congestion window. 551 for j := 0; j < expected; j++ { 552 c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize) 553 bytesRead += maxPayload 554 } 555 // Check we don't receive any more packets on this iteration. 556 // The timeout can't be too high or we'll trigger a timeout. 557 c.CheckNoPacketTimeout(fmt.Sprintf("More packets received(after deflation) than expected %d for this cwnd and iteration: %d.", expected, i), 50*time.Millisecond) 558 559 // Acknowledge all the data received so far. 560 c.SendAck(790, bytesRead) 561 562 // In cogestion avoidance, the packets trains increase by 1 in 563 // each iteration. 564 if i == 0 { 565 // After the first iteration we expect to get the full 566 // congestion window worth of packets in every 567 // iteration. 568 expected += 9 569 } 570 expected++ 571 } 572 }