github.com/flowerwrong/netstack@v0.0.0-20191009141956-e5848263af28/tcpip/link/sharedmem/sharedmem_test.go (about) 1 // Copyright 2018 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 // +build linux 16 17 package sharedmem 18 19 import ( 20 "bytes" 21 "io/ioutil" 22 "math/rand" 23 "os" 24 "strings" 25 "sync" 26 "syscall" 27 "testing" 28 "time" 29 30 "github.com/FlowerWrong/netstack/tcpip" 31 "github.com/FlowerWrong/netstack/tcpip/buffer" 32 "github.com/FlowerWrong/netstack/tcpip/header" 33 "github.com/FlowerWrong/netstack/tcpip/link/sharedmem/pipe" 34 "github.com/FlowerWrong/netstack/tcpip/link/sharedmem/queue" 35 "github.com/FlowerWrong/netstack/tcpip/stack" 36 ) 37 38 const ( 39 localLinkAddr = "\xde\xad\xbe\xef\x56\x78" 40 remoteLinkAddr = "\xde\xad\xbe\xef\x12\x34" 41 42 queueDataSize = 1024 * 1024 43 queuePipeSize = 4096 44 ) 45 46 type queueBuffers struct { 47 data []byte 48 rx pipe.Tx 49 tx pipe.Rx 50 } 51 52 func initQueue(t *testing.T, q *queueBuffers, c *QueueConfig) { 53 // Prepare tx pipe. 54 b, err := getBuffer(c.TxPipeFD) 55 if err != nil { 56 t.Fatalf("getBuffer failed: %v", err) 57 } 58 q.tx.Init(b) 59 60 // Prepare rx pipe. 61 b, err = getBuffer(c.RxPipeFD) 62 if err != nil { 63 t.Fatalf("getBuffer failed: %v", err) 64 } 65 q.rx.Init(b) 66 67 // Get data slice. 68 q.data, err = getBuffer(c.DataFD) 69 if err != nil { 70 t.Fatalf("getBuffer failed: %v", err) 71 } 72 } 73 74 func (q *queueBuffers) cleanup() { 75 syscall.Munmap(q.tx.Bytes()) 76 syscall.Munmap(q.rx.Bytes()) 77 syscall.Munmap(q.data) 78 } 79 80 type packetInfo struct { 81 addr tcpip.LinkAddress 82 proto tcpip.NetworkProtocolNumber 83 vv buffer.VectorisedView 84 } 85 86 type testContext struct { 87 t *testing.T 88 ep *endpoint 89 txCfg QueueConfig 90 rxCfg QueueConfig 91 txq queueBuffers 92 rxq queueBuffers 93 94 packetCh chan struct{} 95 mu sync.Mutex 96 packets []packetInfo 97 } 98 99 func newTestContext(t *testing.T, mtu, bufferSize uint32, addr tcpip.LinkAddress) *testContext { 100 var err error 101 c := &testContext{ 102 t: t, 103 packetCh: make(chan struct{}, 1000000), 104 } 105 c.txCfg = createQueueFDs(t, queueSizes{ 106 dataSize: queueDataSize, 107 txPipeSize: queuePipeSize, 108 rxPipeSize: queuePipeSize, 109 sharedDataSize: 4096, 110 }) 111 112 c.rxCfg = createQueueFDs(t, queueSizes{ 113 dataSize: queueDataSize, 114 txPipeSize: queuePipeSize, 115 rxPipeSize: queuePipeSize, 116 sharedDataSize: 4096, 117 }) 118 119 initQueue(t, &c.txq, &c.txCfg) 120 initQueue(t, &c.rxq, &c.rxCfg) 121 122 ep, err := New(mtu, bufferSize, addr, c.txCfg, c.rxCfg) 123 if err != nil { 124 t.Fatalf("New failed: %v", err) 125 } 126 127 c.ep = ep.(*endpoint) 128 c.ep.Attach(c) 129 130 return c 131 } 132 133 func (c *testContext) DeliverNetworkPacket(_ stack.LinkEndpoint, remoteLinkAddr, localLinkAddr tcpip.LinkAddress, proto tcpip.NetworkProtocolNumber, vv buffer.VectorisedView) { 134 c.mu.Lock() 135 c.packets = append(c.packets, packetInfo{ 136 addr: remoteLinkAddr, 137 proto: proto, 138 vv: vv.Clone(nil), 139 }) 140 c.mu.Unlock() 141 142 c.packetCh <- struct{}{} 143 } 144 145 func (c *testContext) cleanup() { 146 c.ep.Close() 147 closeFDs(&c.txCfg) 148 closeFDs(&c.rxCfg) 149 c.txq.cleanup() 150 c.rxq.cleanup() 151 } 152 153 func (c *testContext) waitForPackets(n int, to <-chan time.Time, errorStr string) { 154 for i := 0; i < n; i++ { 155 select { 156 case <-c.packetCh: 157 case <-to: 158 c.t.Fatalf(errorStr) 159 } 160 } 161 } 162 163 func (c *testContext) pushRxCompletion(size uint32, bs []queue.RxBuffer) { 164 b := c.rxq.rx.Push(queue.RxCompletionSize(len(bs))) 165 queue.EncodeRxCompletion(b, size, 0) 166 for i := range bs { 167 queue.EncodeRxCompletionBuffer(b, i, queue.RxBuffer{ 168 Offset: bs[i].Offset, 169 Size: bs[i].Size, 170 ID: bs[i].ID, 171 }) 172 } 173 } 174 175 func randomFill(b []byte) { 176 for i := range b { 177 b[i] = byte(rand.Intn(256)) 178 } 179 } 180 181 func shuffle(b []int) { 182 for i := len(b) - 1; i >= 0; i-- { 183 j := rand.Intn(i + 1) 184 b[i], b[j] = b[j], b[i] 185 } 186 } 187 188 func createFile(t *testing.T, size int64, initQueue bool) int { 189 tmpDir := os.Getenv("TEST_TMPDIR") 190 if tmpDir == "" { 191 tmpDir = os.Getenv("TMPDIR") 192 } 193 f, err := ioutil.TempFile(tmpDir, "sharedmem_test") 194 if err != nil { 195 t.Fatalf("TempFile failed: %v", err) 196 } 197 defer f.Close() 198 syscall.Unlink(f.Name()) 199 200 if initQueue { 201 // Write the "slot-free" flag in the initial queue. 202 _, err := f.WriteAt([]byte{0, 0, 0, 0, 0, 0, 0, 0x80}, 0) 203 if err != nil { 204 t.Fatalf("WriteAt failed: %v", err) 205 } 206 } 207 208 fd, err := syscall.Dup(int(f.Fd())) 209 if err != nil { 210 t.Fatalf("Dup failed: %v", err) 211 } 212 213 if err := syscall.Ftruncate(fd, size); err != nil { 214 syscall.Close(fd) 215 t.Fatalf("Ftruncate failed: %v", err) 216 } 217 218 return fd 219 } 220 221 func closeFDs(c *QueueConfig) { 222 syscall.Close(c.DataFD) 223 syscall.Close(c.EventFD) 224 syscall.Close(c.TxPipeFD) 225 syscall.Close(c.RxPipeFD) 226 syscall.Close(c.SharedDataFD) 227 } 228 229 type queueSizes struct { 230 dataSize int64 231 txPipeSize int64 232 rxPipeSize int64 233 sharedDataSize int64 234 } 235 236 func createQueueFDs(t *testing.T, s queueSizes) QueueConfig { 237 fd, _, err := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, 0, 0) 238 if err != 0 { 239 t.Fatalf("eventfd failed: %v", error(err)) 240 } 241 242 return QueueConfig{ 243 EventFD: int(fd), 244 DataFD: createFile(t, s.dataSize, false), 245 TxPipeFD: createFile(t, s.txPipeSize, true), 246 RxPipeFD: createFile(t, s.rxPipeSize, true), 247 SharedDataFD: createFile(t, s.sharedDataSize, false), 248 } 249 } 250 251 // TestSimpleSend sends 1000 packets with random header and payload sizes, 252 // then checks that the right payload is received on the shared memory queues. 253 func TestSimpleSend(t *testing.T) { 254 c := newTestContext(t, 20000, 1500, localLinkAddr) 255 defer c.cleanup() 256 257 // Prepare route. 258 r := stack.Route{ 259 RemoteLinkAddress: remoteLinkAddr, 260 } 261 262 for iters := 1000; iters > 0; iters-- { 263 func() { 264 // Prepare and send packet. 265 n := rand.Intn(10000) 266 hdr := buffer.NewPrependable(n + int(c.ep.MaxHeaderLength())) 267 hdrBuf := hdr.Prepend(n) 268 randomFill(hdrBuf) 269 270 n = rand.Intn(10000) 271 buf := buffer.NewView(n) 272 randomFill(buf) 273 274 proto := tcpip.NetworkProtocolNumber(rand.Intn(0x10000)) 275 if err := c.ep.WritePacket(&r, nil /* gso */, hdr, buf.ToVectorisedView(), proto); err != nil { 276 t.Fatalf("WritePacket failed: %v", err) 277 } 278 279 // Receive packet. 280 desc := c.txq.tx.Pull() 281 pi := queue.DecodeTxPacketHeader(desc) 282 if pi.Reserved != 0 { 283 t.Fatalf("Reserved value is non-zero: 0x%x", pi.Reserved) 284 } 285 contents := make([]byte, 0, pi.Size) 286 for i := 0; i < pi.BufferCount; i++ { 287 bi := queue.DecodeTxBufferHeader(desc, i) 288 contents = append(contents, c.txq.data[bi.Offset:][:bi.Size]...) 289 } 290 c.txq.tx.Flush() 291 292 defer func() { 293 // Tell the endpoint about the completion of the write. 294 b := c.txq.rx.Push(8) 295 queue.EncodeTxCompletion(b, pi.ID) 296 c.txq.rx.Flush() 297 }() 298 299 // Check the ethernet header. 300 ethTemplate := make(header.Ethernet, header.EthernetMinimumSize) 301 ethTemplate.Encode(&header.EthernetFields{ 302 SrcAddr: localLinkAddr, 303 DstAddr: remoteLinkAddr, 304 Type: proto, 305 }) 306 if got := contents[:header.EthernetMinimumSize]; !bytes.Equal(got, []byte(ethTemplate)) { 307 t.Fatalf("Bad ethernet header in packet: got %x, want %x", got, ethTemplate) 308 } 309 310 // Compare contents skipping the ethernet header added by the 311 // endpoint. 312 merged := append(hdrBuf, buf...) 313 if uint32(len(contents)) < pi.Size { 314 t.Fatalf("Sum of buffers is less than packet size: %v < %v", len(contents), pi.Size) 315 } 316 contents = contents[:pi.Size][header.EthernetMinimumSize:] 317 318 if !bytes.Equal(contents, merged) { 319 t.Fatalf("Buffers are different: got %x (%v bytes), want %x (%v bytes)", contents, len(contents), merged, len(merged)) 320 } 321 }() 322 } 323 } 324 325 // TestPreserveSrcAddressInSend calls WritePacket once with LocalLinkAddress 326 // set in Route (using much of the same code as TestSimpleSend), then checks 327 // that the encoded ethernet header received includes the correct SrcAddr. 328 func TestPreserveSrcAddressInSend(t *testing.T) { 329 c := newTestContext(t, 20000, 1500, localLinkAddr) 330 defer c.cleanup() 331 332 newLocalLinkAddress := tcpip.LinkAddress(strings.Repeat("0xFE", 6)) 333 // Set both remote and local link address in route. 334 r := stack.Route{ 335 RemoteLinkAddress: remoteLinkAddr, 336 LocalLinkAddress: newLocalLinkAddress, 337 } 338 339 // WritePacket panics given a prependable with anything less than 340 // the minimum size of the ethernet header. 341 hdr := buffer.NewPrependable(header.EthernetMinimumSize) 342 343 proto := tcpip.NetworkProtocolNumber(rand.Intn(0x10000)) 344 if err := c.ep.WritePacket(&r, nil /* gso */, hdr, buffer.VectorisedView{}, proto); err != nil { 345 t.Fatalf("WritePacket failed: %v", err) 346 } 347 348 // Receive packet. 349 desc := c.txq.tx.Pull() 350 pi := queue.DecodeTxPacketHeader(desc) 351 if pi.Reserved != 0 { 352 t.Fatalf("Reserved value is non-zero: 0x%x", pi.Reserved) 353 } 354 contents := make([]byte, 0, pi.Size) 355 for i := 0; i < pi.BufferCount; i++ { 356 bi := queue.DecodeTxBufferHeader(desc, i) 357 contents = append(contents, c.txq.data[bi.Offset:][:bi.Size]...) 358 } 359 c.txq.tx.Flush() 360 361 defer func() { 362 // Tell the endpoint about the completion of the write. 363 b := c.txq.rx.Push(8) 364 queue.EncodeTxCompletion(b, pi.ID) 365 c.txq.rx.Flush() 366 }() 367 368 // Check that the ethernet header contains the expected SrcAddr. 369 ethTemplate := make(header.Ethernet, header.EthernetMinimumSize) 370 ethTemplate.Encode(&header.EthernetFields{ 371 SrcAddr: newLocalLinkAddress, 372 DstAddr: remoteLinkAddr, 373 Type: proto, 374 }) 375 if got := contents[:header.EthernetMinimumSize]; !bytes.Equal(got, []byte(ethTemplate)) { 376 t.Fatalf("Bad ethernet header in packet: got %x, want %x", got, ethTemplate) 377 } 378 } 379 380 // TestFillTxQueue sends packets until the queue is full. 381 func TestFillTxQueue(t *testing.T) { 382 c := newTestContext(t, 20000, 1500, localLinkAddr) 383 defer c.cleanup() 384 385 // Prepare to send a packet. 386 r := stack.Route{ 387 RemoteLinkAddress: remoteLinkAddr, 388 } 389 390 buf := buffer.NewView(100) 391 392 // Each packet is uses no more than 40 bytes, so write that many packets 393 // until the tx queue if full. 394 ids := make(map[uint64]struct{}) 395 for i := queuePipeSize / 40; i > 0; i-- { 396 hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength())) 397 398 if err := c.ep.WritePacket(&r, nil /* gso */, hdr, buf.ToVectorisedView(), header.IPv4ProtocolNumber); err != nil { 399 t.Fatalf("WritePacket failed unexpectedly: %v", err) 400 } 401 402 // Check that they have different IDs. 403 desc := c.txq.tx.Pull() 404 pi := queue.DecodeTxPacketHeader(desc) 405 if _, ok := ids[pi.ID]; ok { 406 t.Fatalf("ID (%v) reused", pi.ID) 407 } 408 ids[pi.ID] = struct{}{} 409 } 410 411 // Next attempt to write must fail. 412 hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength())) 413 if want, err := tcpip.ErrWouldBlock, c.ep.WritePacket(&r, nil /* gso */, hdr, buf.ToVectorisedView(), header.IPv4ProtocolNumber); err != want { 414 t.Fatalf("WritePacket return unexpected result: got %v, want %v", err, want) 415 } 416 } 417 418 // TestFillTxQueueAfterBadCompletion sends a bad completion, then sends packets 419 // until the queue is full. 420 func TestFillTxQueueAfterBadCompletion(t *testing.T) { 421 c := newTestContext(t, 20000, 1500, localLinkAddr) 422 defer c.cleanup() 423 424 // Send a bad completion. 425 queue.EncodeTxCompletion(c.txq.rx.Push(8), 1) 426 c.txq.rx.Flush() 427 428 // Prepare to send a packet. 429 r := stack.Route{ 430 RemoteLinkAddress: remoteLinkAddr, 431 } 432 433 buf := buffer.NewView(100) 434 435 // Send two packets so that the id slice has at least two slots. 436 for i := 2; i > 0; i-- { 437 hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength())) 438 if err := c.ep.WritePacket(&r, nil /* gso */, hdr, buf.ToVectorisedView(), header.IPv4ProtocolNumber); err != nil { 439 t.Fatalf("WritePacket failed unexpectedly: %v", err) 440 } 441 } 442 443 // Complete the two writes twice. 444 for i := 2; i > 0; i-- { 445 pi := queue.DecodeTxPacketHeader(c.txq.tx.Pull()) 446 447 queue.EncodeTxCompletion(c.txq.rx.Push(8), pi.ID) 448 queue.EncodeTxCompletion(c.txq.rx.Push(8), pi.ID) 449 c.txq.rx.Flush() 450 } 451 c.txq.tx.Flush() 452 453 // Each packet is uses no more than 40 bytes, so write that many packets 454 // until the tx queue if full. 455 ids := make(map[uint64]struct{}) 456 for i := queuePipeSize / 40; i > 0; i-- { 457 hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength())) 458 if err := c.ep.WritePacket(&r, nil /* gso */, hdr, buf.ToVectorisedView(), header.IPv4ProtocolNumber); err != nil { 459 t.Fatalf("WritePacket failed unexpectedly: %v", err) 460 } 461 462 // Check that they have different IDs. 463 desc := c.txq.tx.Pull() 464 pi := queue.DecodeTxPacketHeader(desc) 465 if _, ok := ids[pi.ID]; ok { 466 t.Fatalf("ID (%v) reused", pi.ID) 467 } 468 ids[pi.ID] = struct{}{} 469 } 470 471 // Next attempt to write must fail. 472 hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength())) 473 if want, err := tcpip.ErrWouldBlock, c.ep.WritePacket(&r, nil /* gso */, hdr, buf.ToVectorisedView(), header.IPv4ProtocolNumber); err != want { 474 t.Fatalf("WritePacket return unexpected result: got %v, want %v", err, want) 475 } 476 } 477 478 // TestFillTxMemory sends packets until the we run out of shared memory. 479 func TestFillTxMemory(t *testing.T) { 480 const bufferSize = 1500 481 c := newTestContext(t, 20000, bufferSize, localLinkAddr) 482 defer c.cleanup() 483 484 // Prepare to send a packet. 485 r := stack.Route{ 486 RemoteLinkAddress: remoteLinkAddr, 487 } 488 489 buf := buffer.NewView(100) 490 491 // Each packet is uses up one buffer, so write as many as possible until 492 // we fill the memory. 493 ids := make(map[uint64]struct{}) 494 for i := queueDataSize / bufferSize; i > 0; i-- { 495 hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength())) 496 if err := c.ep.WritePacket(&r, nil /* gso */, hdr, buf.ToVectorisedView(), header.IPv4ProtocolNumber); err != nil { 497 t.Fatalf("WritePacket failed unexpectedly: %v", err) 498 } 499 500 // Check that they have different IDs. 501 desc := c.txq.tx.Pull() 502 pi := queue.DecodeTxPacketHeader(desc) 503 if _, ok := ids[pi.ID]; ok { 504 t.Fatalf("ID (%v) reused", pi.ID) 505 } 506 ids[pi.ID] = struct{}{} 507 c.txq.tx.Flush() 508 } 509 510 // Next attempt to write must fail. 511 hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength())) 512 err := c.ep.WritePacket(&r, nil /* gso */, hdr, buf.ToVectorisedView(), header.IPv4ProtocolNumber) 513 if want := tcpip.ErrWouldBlock; err != want { 514 t.Fatalf("WritePacket return unexpected result: got %v, want %v", err, want) 515 } 516 } 517 518 // TestFillTxMemoryWithMultiBuffer sends packets until the we run out of 519 // shared memory for a 2-buffer packet, but still with room for a 1-buffer 520 // packet. 521 func TestFillTxMemoryWithMultiBuffer(t *testing.T) { 522 const bufferSize = 1500 523 c := newTestContext(t, 20000, bufferSize, localLinkAddr) 524 defer c.cleanup() 525 526 // Prepare to send a packet. 527 r := stack.Route{ 528 RemoteLinkAddress: remoteLinkAddr, 529 } 530 531 buf := buffer.NewView(100) 532 533 // Each packet is uses up one buffer, so write as many as possible 534 // until there is only one buffer left. 535 for i := queueDataSize/bufferSize - 1; i > 0; i-- { 536 hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength())) 537 if err := c.ep.WritePacket(&r, nil /* gso */, hdr, buf.ToVectorisedView(), header.IPv4ProtocolNumber); err != nil { 538 t.Fatalf("WritePacket failed unexpectedly: %v", err) 539 } 540 541 // Pull the posted buffer. 542 c.txq.tx.Pull() 543 c.txq.tx.Flush() 544 } 545 546 // Attempt to write a two-buffer packet. It must fail. 547 { 548 hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength())) 549 uu := buffer.NewView(bufferSize).ToVectorisedView() 550 if want, err := tcpip.ErrWouldBlock, c.ep.WritePacket(&r, nil /* gso */, hdr, uu, header.IPv4ProtocolNumber); err != want { 551 t.Fatalf("WritePacket return unexpected result: got %v, want %v", err, want) 552 } 553 } 554 555 // Attempt to write the one-buffer packet again. It must succeed. 556 { 557 hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength())) 558 if err := c.ep.WritePacket(&r, nil /* gso */, hdr, buf.ToVectorisedView(), header.IPv4ProtocolNumber); err != nil { 559 t.Fatalf("WritePacket failed unexpectedly: %v", err) 560 } 561 } 562 } 563 564 func pollPull(t *testing.T, p *pipe.Rx, to <-chan time.Time, errStr string) []byte { 565 t.Helper() 566 567 for { 568 b := p.Pull() 569 if b != nil { 570 return b 571 } 572 573 select { 574 case <-time.After(10 * time.Millisecond): 575 case <-to: 576 t.Fatal(errStr) 577 } 578 } 579 } 580 581 // TestSimpleReceive completes 1000 different receives with random payload and 582 // random number of buffers. It checks that the contents match the expected 583 // values. 584 func TestSimpleReceive(t *testing.T) { 585 const bufferSize = 1500 586 c := newTestContext(t, 20000, bufferSize, localLinkAddr) 587 defer c.cleanup() 588 589 // Check that buffers have been posted. 590 limit := c.ep.rx.q.PostedBuffersLimit() 591 for i := uint64(0); i < limit; i++ { 592 timeout := time.After(2 * time.Second) 593 bi := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for all buffers to be posted")) 594 595 if want := i * bufferSize; want != bi.Offset { 596 t.Fatalf("Bad posted offset: got %v, want %v", bi.Offset, want) 597 } 598 599 if want := i; want != bi.ID { 600 t.Fatalf("Bad posted ID: got %v, want %v", bi.ID, want) 601 } 602 603 if bufferSize != bi.Size { 604 t.Fatalf("Bad posted bufferSize: got %v, want %v", bi.Size, bufferSize) 605 } 606 } 607 c.rxq.tx.Flush() 608 609 // Create a slice with the indices 0..limit-1. 610 idx := make([]int, limit) 611 for i := range idx { 612 idx[i] = i 613 } 614 615 // Complete random packets 1000 times. 616 for iters := 1000; iters > 0; iters-- { 617 timeout := time.After(2 * time.Second) 618 // Prepare a random packet. 619 shuffle(idx) 620 n := 1 + rand.Intn(10) 621 bufs := make([]queue.RxBuffer, n) 622 contents := make([]byte, bufferSize*n-rand.Intn(500)) 623 randomFill(contents) 624 for i := range bufs { 625 j := idx[i] 626 bufs[i].Size = bufferSize 627 bufs[i].Offset = uint64(bufferSize * j) 628 bufs[i].ID = uint64(j) 629 630 copy(c.rxq.data[bufs[i].Offset:][:bufferSize], contents[i*bufferSize:]) 631 } 632 633 // Push completion. 634 c.pushRxCompletion(uint32(len(contents)), bufs) 635 c.rxq.rx.Flush() 636 syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0}) 637 638 // Wait for packet to be received, then check it. 639 c.waitForPackets(1, time.After(5*time.Second), "Timeout waiting for packet") 640 c.mu.Lock() 641 rcvd := []byte(c.packets[0].vv.First()) 642 c.packets = c.packets[:0] 643 c.mu.Unlock() 644 645 if contents := contents[header.EthernetMinimumSize:]; !bytes.Equal(contents, rcvd) { 646 t.Fatalf("Unexpected buffer contents: got %x, want %x", rcvd, contents) 647 } 648 649 // Check that buffers have been reposted. 650 for i := range bufs { 651 bi := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for buffers to be reposted")) 652 if bi != bufs[i] { 653 t.Fatalf("Unexpected buffer reposted: got %x, want %x", bi, bufs[i]) 654 } 655 } 656 c.rxq.tx.Flush() 657 } 658 } 659 660 // TestRxBuffersReposted tests that rx buffers get reposted after they have been 661 // completed. 662 func TestRxBuffersReposted(t *testing.T) { 663 const bufferSize = 1500 664 c := newTestContext(t, 20000, bufferSize, localLinkAddr) 665 defer c.cleanup() 666 667 // Receive all posted buffers. 668 limit := c.ep.rx.q.PostedBuffersLimit() 669 buffers := make([]queue.RxBuffer, 0, limit) 670 for i := limit; i > 0; i-- { 671 timeout := time.After(2 * time.Second) 672 buffers = append(buffers, queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for all buffers"))) 673 } 674 c.rxq.tx.Flush() 675 676 // Check that all buffers are reposted when individually completed. 677 for i := range buffers { 678 timeout := time.After(2 * time.Second) 679 // Complete the buffer. 680 c.pushRxCompletion(buffers[i].Size, buffers[i:][:1]) 681 c.rxq.rx.Flush() 682 syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0}) 683 684 // Wait for it to be reposted. 685 bi := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for buffer to be reposted")) 686 if bi != buffers[i] { 687 t.Fatalf("Different buffer posted: got %v, want %v", bi, buffers[i]) 688 } 689 } 690 c.rxq.tx.Flush() 691 692 // Check that all buffers are reposted when completed in pairs. 693 for i := 0; i < len(buffers)/2; i++ { 694 timeout := time.After(2 * time.Second) 695 // Complete with two buffers. 696 c.pushRxCompletion(2*bufferSize, buffers[2*i:][:2]) 697 c.rxq.rx.Flush() 698 syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0}) 699 700 // Wait for them to be reposted. 701 for j := 0; j < 2; j++ { 702 bi := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for buffer to be reposted")) 703 if bi != buffers[2*i+j] { 704 t.Fatalf("Different buffer posted: got %v, want %v", bi, buffers[2*i+j]) 705 } 706 } 707 } 708 c.rxq.tx.Flush() 709 } 710 711 // TestReceivePostingIsFull checks that the endpoint will properly handle the 712 // case when a received buffer cannot be immediately reposted because it hasn't 713 // been pulled from the tx pipe yet. 714 func TestReceivePostingIsFull(t *testing.T) { 715 const bufferSize = 1500 716 c := newTestContext(t, 20000, bufferSize, localLinkAddr) 717 defer c.cleanup() 718 719 // Complete first posted buffer before flushing it from the tx pipe. 720 first := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, time.After(time.Second), "Timeout waiting for first buffer to be posted")) 721 c.pushRxCompletion(first.Size, []queue.RxBuffer{first}) 722 c.rxq.rx.Flush() 723 syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0}) 724 725 // Check that packet is received. 726 c.waitForPackets(1, time.After(time.Second), "Timeout waiting for completed packet") 727 728 // Complete another buffer. 729 second := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, time.After(time.Second), "Timeout waiting for second buffer to be posted")) 730 c.pushRxCompletion(second.Size, []queue.RxBuffer{second}) 731 c.rxq.rx.Flush() 732 syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0}) 733 734 // Check that no packet is received yet, as the worker is blocked trying 735 // to repost. 736 select { 737 case <-time.After(500 * time.Millisecond): 738 case <-c.packetCh: 739 t.Fatalf("Unexpected packet received") 740 } 741 742 // Flush tx queue, which will allow the first buffer to be reposted, 743 // and the second completion to be pulled. 744 c.rxq.tx.Flush() 745 syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0}) 746 747 // Check that second packet completes. 748 c.waitForPackets(1, time.After(time.Second), "Timeout waiting for second completed packet") 749 } 750 751 // TestCloseWhileWaitingToPost closes the endpoint while it is waiting to 752 // repost a buffer. Make sure it backs out. 753 func TestCloseWhileWaitingToPost(t *testing.T) { 754 const bufferSize = 1500 755 c := newTestContext(t, 20000, bufferSize, localLinkAddr) 756 cleaned := false 757 defer func() { 758 if !cleaned { 759 c.cleanup() 760 } 761 }() 762 763 // Complete first posted buffer before flushing it from the tx pipe. 764 bi := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, time.After(time.Second), "Timeout waiting for initial buffer to be posted")) 765 c.pushRxCompletion(bi.Size, []queue.RxBuffer{bi}) 766 c.rxq.rx.Flush() 767 syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0}) 768 769 // Wait for packet to be indicated. 770 c.waitForPackets(1, time.After(time.Second), "Timeout waiting for completed packet") 771 772 // Cleanup and wait for worker to complete. 773 c.cleanup() 774 cleaned = true 775 c.ep.Wait() 776 }