github.com/sagernet/gvisor@v0.0.0-20240428053021-e691de28565f/pkg/xdp/rxqueue.go (about)

     1  // Copyright 2022 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  //go:build amd64 || arm64
    16  // +build amd64 arm64
    17  
    18  package xdp
    19  
    20  import (
    21  	"golang.org/x/sys/unix"
    22  	"github.com/sagernet/gvisor/pkg/atomicbitops"
    23  )
    24  
    25  // The RXQueue is how the kernel tells a process which buffers are full with
    26  // incoming packets.
    27  //
    28  // RXQueue is not thread-safe and requires external synchronization
    29  type RXQueue struct {
    30  	// mem is the mmap'd area shared with the kernel. Many other fields of
    31  	// this struct point into mem.
    32  	mem []byte
    33  
    34  	// ring is the actual ring buffer. It is a list of XDP descriptors
    35  	// pointing to incoming packets.
    36  	//
    37  	// len(ring) must be a power of 2.
    38  	ring []unix.XDPDesc
    39  
    40  	// mask is used whenever indexing into ring. It is always len(ring)-1.
    41  	// It prevents index out of bounds errors while allowing the producer
    42  	// and consumer pointers to repeatedly "overflow" and loop back around
    43  	// the ring.
    44  	mask uint32
    45  
    46  	// producer points to the shared atomic value that indicates the last
    47  	// produced descriptor. Only the kernel updates this value.
    48  	producer *atomicbitops.Uint32
    49  
    50  	// consumer points to the shared atomic value that indicates the last
    51  	// consumed descriptor. Only we update this value.
    52  	consumer *atomicbitops.Uint32
    53  
    54  	// flags points to the shared atomic value that holds flags for the
    55  	// queue.
    56  	flags *atomicbitops.Uint32
    57  
    58  	// Cached values are used to avoid relatively expensive atomic
    59  	// operations. They are used, incremented, and decremented multiple
    60  	// times with non-atomic operations, and then "batch-updated" by
    61  	// reading or writing atomically to synchronize with the kernel.
    62  
    63  	// cachedProducer is updated when we atomically read *producer.
    64  	cachedProducer uint32
    65  	// cachedConsumer is used to atomically write *consumer.
    66  	cachedConsumer uint32
    67  }
    68  
    69  // Peek returns the number of packets available to read as well as the index at
    70  // which they start. Peek will only return a packet once, so callers must
    71  // process any received packets.
    72  func (rq *RXQueue) Peek() (nReceived, index uint32) {
    73  	// Get the number of available buffers and update cachedConsumer to
    74  	// reflect that we're going to consume them.
    75  	entries := rq.free()
    76  	index = rq.cachedConsumer
    77  	rq.cachedConsumer += entries
    78  	return entries, index
    79  }
    80  
    81  func (rq *RXQueue) free() uint32 {
    82  	// Return any buffers we know about without incurring an atomic
    83  	// operation if possible.
    84  	entries := rq.cachedProducer - rq.cachedConsumer
    85  	// If we're not aware of any RX'd packets, refresh the producer pointer
    86  	// to see whether the kernel enqueued anything.
    87  	if entries == 0 {
    88  		rq.cachedProducer = rq.producer.Load()
    89  		entries = rq.cachedProducer - rq.cachedConsumer
    90  	}
    91  	return entries
    92  }
    93  
    94  // Release notifies the kernel that we have consumed nDone packets.
    95  func (rq *RXQueue) Release(nDone uint32) {
    96  	// We don't have to use an atomic add because only we update this; the
    97  	// kernel just reads it.
    98  	rq.consumer.Store(rq.consumer.RacyLoad() + nDone)
    99  }
   100  
   101  // Get gets the descriptor at index.
   102  func (rq *RXQueue) Get(index uint32) unix.XDPDesc {
   103  	// Use mask to avoid overflowing and loop back around the ring.
   104  	return rq.ring[index&rq.mask]
   105  }