github.com/nicocha30/gvisor-ligolo@v0.0.0-20230726075806-989fa2c0a413/pkg/tcpip/link/sharedmem/server_tx.go (about)

     1  // Copyright 2021 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  //go:build linux
    16  // +build linux
    17  
    18  package sharedmem
    19  
    20  import (
    21  	"golang.org/x/sys/unix"
    22  	"github.com/nicocha30/gvisor-ligolo/pkg/atomicbitops"
    23  	"github.com/nicocha30/gvisor-ligolo/pkg/buffer"
    24  	"github.com/nicocha30/gvisor-ligolo/pkg/cleanup"
    25  	"github.com/nicocha30/gvisor-ligolo/pkg/eventfd"
    26  	"github.com/nicocha30/gvisor-ligolo/pkg/tcpip/link/sharedmem/pipe"
    27  	"github.com/nicocha30/gvisor-ligolo/pkg/tcpip/link/sharedmem/queue"
    28  	"github.com/nicocha30/gvisor-ligolo/pkg/tcpip/stack"
    29  )
    30  
    31  // serverTx represents the server end of the sharedmem queue and is used to send
    32  // packets to the peer in the buffers posted by the peer in the fillPipe.
    33  type serverTx struct {
    34  	// fillPipe represents the receive end of the pipe that carries the RxBuffers
    35  	// posted by the peer.
    36  	fillPipe pipe.Rx
    37  
    38  	// completionPipe represents the transmit end of the pipe that carries the
    39  	// descriptors for filled RxBuffers.
    40  	completionPipe pipe.Tx
    41  
    42  	// data represents the buffer area where the packet payload is held.
    43  	data []byte
    44  
    45  	// eventFD is used to notify the peer when fill requests are fulfilled.
    46  	eventFD eventfd.Eventfd
    47  
    48  	// sharedData the memory region to use to enable/disable notifications.
    49  	sharedData []byte
    50  
    51  	// sharedEventFDState is the memory region in sharedData used to enable/disable
    52  	// notifications on eventFD.
    53  	sharedEventFDState *atomicbitops.Uint32
    54  }
    55  
    56  // init initializes all tstate needed by the serverTx queue based on the
    57  // information provided.
    58  //
    59  // The caller always retains ownership of all file descriptors passed in. The
    60  // queue implementation will duplicate any that it may need in the future.
    61  func (s *serverTx) init(c *QueueConfig) error {
    62  	// Map in all buffers.
    63  	fillPipeMem, err := getBuffer(c.TxPipeFD)
    64  	if err != nil {
    65  		return err
    66  	}
    67  	cu := cleanup.Make(func() { unix.Munmap(fillPipeMem) })
    68  	defer cu.Clean()
    69  
    70  	completionPipeMem, err := getBuffer(c.RxPipeFD)
    71  	if err != nil {
    72  		return err
    73  	}
    74  	cu.Add(func() { unix.Munmap(completionPipeMem) })
    75  
    76  	data, err := getBuffer(c.DataFD)
    77  	if err != nil {
    78  		return err
    79  	}
    80  	cu.Add(func() { unix.Munmap(data) })
    81  
    82  	sharedData, err := getBuffer(c.SharedDataFD)
    83  	if err != nil {
    84  		return err
    85  	}
    86  	cu.Add(func() { unix.Munmap(sharedData) })
    87  
    88  	// Duplicate the eventFD so that caller can close it but we can still
    89  	// use it.
    90  	efd, err := c.EventFD.Dup()
    91  	if err != nil {
    92  		return err
    93  	}
    94  	cu.Add(func() { efd.Close() })
    95  
    96  	cu.Release()
    97  
    98  	s.fillPipe.Init(fillPipeMem)
    99  	s.completionPipe.Init(completionPipeMem)
   100  	s.data = data
   101  	s.eventFD = efd
   102  	s.sharedData = sharedData
   103  	s.sharedEventFDState = sharedDataPointer(sharedData)
   104  
   105  	return nil
   106  }
   107  
   108  func (s *serverTx) cleanup() {
   109  	unix.Munmap(s.fillPipe.Bytes())
   110  	unix.Munmap(s.completionPipe.Bytes())
   111  	unix.Munmap(s.data)
   112  	unix.Munmap(s.sharedData)
   113  	s.eventFD.Close()
   114  }
   115  
   116  // acquireBuffers acquires enough buffers to hold all the data in views or
   117  // returns nil if not enough buffers are currently available.
   118  func (s *serverTx) acquireBuffers(pktBuffer buffer.Buffer, buffers []queue.RxBuffer) (acquiredBuffers []queue.RxBuffer) {
   119  	acquiredBuffers = buffers[:0]
   120  	wantBytes := int(pktBuffer.Size())
   121  	for wantBytes > 0 {
   122  		var b []byte
   123  		if b = s.fillPipe.Pull(); b == nil {
   124  			s.fillPipe.Abort()
   125  			return nil
   126  		}
   127  		rxBuffer := queue.DecodeRxBufferHeader(b)
   128  		acquiredBuffers = append(acquiredBuffers, rxBuffer)
   129  		wantBytes -= int(rxBuffer.Size)
   130  	}
   131  	return acquiredBuffers
   132  }
   133  
   134  // fillPacket copies the data in the provided views into buffers pulled from the
   135  // fillPipe and returns a slice of RxBuffers that contain the copied data as
   136  // well as the total number of bytes copied.
   137  //
   138  // To avoid allocations the filledBuffers are appended to the buffers slice
   139  // which will be grown as required. This method takes ownership of pktBuffer.
   140  func (s *serverTx) fillPacket(pktBuffer buffer.Buffer, buffers []queue.RxBuffer) (filledBuffers []queue.RxBuffer, totalCopied uint32) {
   141  	bufs := s.acquireBuffers(pktBuffer, buffers)
   142  	if bufs == nil {
   143  		pktBuffer.Release()
   144  		return nil, 0
   145  	}
   146  	br := pktBuffer.AsBufferReader()
   147  	defer br.Close()
   148  
   149  	for i := 0; br.Len() > 0 && i < len(bufs); i++ {
   150  		buf := bufs[i]
   151  		copied, err := br.Read(s.data[buf.Offset:][:buf.Size])
   152  		buf.Size = uint32(copied)
   153  		// Copy the packet into the posted buffer.
   154  		totalCopied += bufs[i].Size
   155  		if err != nil {
   156  			return bufs, totalCopied
   157  		}
   158  	}
   159  	return bufs, totalCopied
   160  }
   161  
   162  func (s *serverTx) transmit(pkt stack.PacketBufferPtr) bool {
   163  	buffers := make([]queue.RxBuffer, 8)
   164  	buffers, totalCopied := s.fillPacket(pkt.ToBuffer(), buffers)
   165  	if totalCopied == 0 {
   166  		// drop the packet as not enough buffers were probably available
   167  		// to send.
   168  		return false
   169  	}
   170  	b := s.completionPipe.Push(queue.RxCompletionSize(len(buffers)))
   171  	if b == nil {
   172  		return false
   173  	}
   174  	queue.EncodeRxCompletion(b, totalCopied, 0 /* reserved */)
   175  	for i := 0; i < len(buffers); i++ {
   176  		queue.EncodeRxCompletionBuffer(b, i, buffers[i])
   177  	}
   178  	s.completionPipe.Flush()
   179  	s.fillPipe.Flush()
   180  	return true
   181  }
   182  
   183  func (s *serverTx) notificationsEnabled() bool {
   184  	// notifications are considered to be enabled unless explicitly disabled.
   185  	return s.sharedEventFDState.Load() != queue.EventFDDisabled
   186  }
   187  
   188  func (s *serverTx) notify() {
   189  	if s.notificationsEnabled() {
   190  		s.eventFD.Notify()
   191  	}
   192  }