github.com/sagernet/gvisor@v0.0.0-20240428053021-e691de28565f/pkg/xdp/xdp_unsafe.go (about)

     1  // Copyright 2022 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  //go:build amd64 || arm64
    16  // +build amd64 arm64
    17  
    18  package xdp
    19  
    20  import (
    21  	"fmt"
    22  	"reflect"
    23  	"unsafe"
    24  
    25  	"golang.org/x/sys/unix"
    26  	"github.com/sagernet/gvisor/pkg/atomicbitops"
    27  )
    28  
    29  func registerUMEM(fd int, reg unix.XDPUmemReg) error {
    30  	if _, _, errno := unix.Syscall6(unix.SYS_SETSOCKOPT, uintptr(fd), unix.SOL_XDP, unix.XDP_UMEM_REG, uintptr(unsafe.Pointer(&reg)), unsafe.Sizeof(reg), 0); errno != 0 {
    31  		return fmt.Errorf("failed to setsockopt(XDP_UMEM_REG): errno %d", errno)
    32  	}
    33  	return nil
    34  }
    35  
    36  func getOffsets(fd int) (unix.XDPMmapOffsets, error) {
    37  	var off unix.XDPMmapOffsets
    38  	size := unsafe.Sizeof(off)
    39  	if _, _, errno := unix.Syscall6(unix.SYS_GETSOCKOPT, uintptr(fd), unix.SOL_XDP, unix.XDP_MMAP_OFFSETS, uintptr(unsafe.Pointer(&off)), uintptr(unsafe.Pointer(&size)), 0); errno != 0 {
    40  		return unix.XDPMmapOffsets{}, fmt.Errorf("failed to get offsets: %v", errno)
    41  	} else if unsafe.Sizeof(off) != size {
    42  		return unix.XDPMmapOffsets{}, fmt.Errorf("expected optlen of %d, but found %d", unsafe.Sizeof(off), size)
    43  	}
    44  	return off, nil
    45  }
    46  
    47  func sliceBackingPointer(slice []byte) uintptr {
    48  	return uintptr(unsafe.Pointer(&slice[0]))
    49  }
    50  
    51  func sizeOfFillQueueDesc() uint64 {
    52  	return uint64(unsafe.Sizeof(uint64(0)))
    53  }
    54  
    55  func sizeOfRXQueueDesc() uint64 {
    56  	return uint64(unsafe.Sizeof(unix.XDPDesc{}))
    57  }
    58  
    59  func sizeOfCompletionQueueDesc() uint64 {
    60  	return uint64(unsafe.Sizeof(uint64(0)))
    61  }
    62  
    63  func sizeOfTXQueueDesc() uint64 {
    64  	return uint64(unsafe.Sizeof(unix.XDPDesc{}))
    65  }
    66  
    67  func (fq *FillQueue) init(off unix.XDPMmapOffsets, opts Opts) {
    68  	fillQueueRingHdr := (*reflect.SliceHeader)(unsafe.Pointer(&fq.ring))
    69  	fillQueueRingHdr.Data = uintptr(unsafe.Pointer(&fq.mem[off.Fr.Desc]))
    70  	fillQueueRingHdr.Len = int(opts.NDescriptors)
    71  	fillQueueRingHdr.Cap = fillQueueRingHdr.Len
    72  	fq.producer = (*atomicbitops.Uint32)(unsafe.Pointer(&fq.mem[off.Fr.Producer]))
    73  	fq.consumer = (*atomicbitops.Uint32)(unsafe.Pointer(&fq.mem[off.Fr.Consumer]))
    74  	fq.flags = (*atomicbitops.Uint32)(unsafe.Pointer(&fq.mem[off.Fr.Flags]))
    75  }
    76  
    77  func (rq *RXQueue) init(off unix.XDPMmapOffsets, opts Opts) {
    78  	rxQueueRingHdr := (*reflect.SliceHeader)(unsafe.Pointer(&rq.ring))
    79  	rxQueueRingHdr.Data = uintptr(unsafe.Pointer(&rq.mem[off.Rx.Desc]))
    80  	rxQueueRingHdr.Len = int(opts.NDescriptors)
    81  	rxQueueRingHdr.Cap = rxQueueRingHdr.Len
    82  	rq.producer = (*atomicbitops.Uint32)(unsafe.Pointer(&rq.mem[off.Rx.Producer]))
    83  	rq.consumer = (*atomicbitops.Uint32)(unsafe.Pointer(&rq.mem[off.Rx.Consumer]))
    84  	rq.flags = (*atomicbitops.Uint32)(unsafe.Pointer(&rq.mem[off.Rx.Flags]))
    85  	// These probably don't have to be atomic, but we're only loading once
    86  	// so better safe than sorry.
    87  	rq.cachedProducer = rq.producer.Load()
    88  	rq.cachedConsumer = rq.consumer.Load()
    89  }
    90  
    91  func (cq *CompletionQueue) init(off unix.XDPMmapOffsets, opts Opts) {
    92  	completionQueueRingHdr := (*reflect.SliceHeader)(unsafe.Pointer(&cq.ring))
    93  	completionQueueRingHdr.Data = uintptr(unsafe.Pointer(&cq.mem[off.Cr.Desc]))
    94  	completionQueueRingHdr.Len = int(opts.NDescriptors)
    95  	completionQueueRingHdr.Cap = completionQueueRingHdr.Len
    96  	cq.producer = (*atomicbitops.Uint32)(unsafe.Pointer(&cq.mem[off.Cr.Producer]))
    97  	cq.consumer = (*atomicbitops.Uint32)(unsafe.Pointer(&cq.mem[off.Cr.Consumer]))
    98  	cq.flags = (*atomicbitops.Uint32)(unsafe.Pointer(&cq.mem[off.Cr.Flags]))
    99  	// These probably don't have to be atomic, but we're only loading once
   100  	// so better safe than sorry.
   101  	cq.cachedProducer = cq.producer.Load()
   102  	cq.cachedConsumer = cq.consumer.Load()
   103  }
   104  
   105  func (tq *TXQueue) init(off unix.XDPMmapOffsets, opts Opts) {
   106  	txQueueRingHdr := (*reflect.SliceHeader)(unsafe.Pointer(&tq.ring))
   107  	txQueueRingHdr.Data = uintptr(unsafe.Pointer(&tq.mem[off.Tx.Desc]))
   108  	txQueueRingHdr.Len = int(opts.NDescriptors)
   109  	txQueueRingHdr.Cap = txQueueRingHdr.Len
   110  	tq.producer = (*atomicbitops.Uint32)(unsafe.Pointer(&tq.mem[off.Tx.Producer]))
   111  	tq.consumer = (*atomicbitops.Uint32)(unsafe.Pointer(&tq.mem[off.Tx.Consumer]))
   112  	tq.flags = (*atomicbitops.Uint32)(unsafe.Pointer(&tq.mem[off.Tx.Flags]))
   113  }
   114  
   115  // kick notifies the kernel that there are packets to transmit.
   116  func (tq *TXQueue) kick() error {
   117  	if tq.flags.RacyLoad()&unix.XDP_RING_NEED_WAKEUP == 0 {
   118  		return nil
   119  	}
   120  
   121  	var msg unix.Msghdr
   122  	if _, _, errno := unix.Syscall6(unix.SYS_SENDMSG, uintptr(tq.sockfd), uintptr(unsafe.Pointer(&msg)), unix.MSG_DONTWAIT|unix.MSG_NOSIGNAL, 0, 0, 0); errno != 0 {
   123  		return fmt.Errorf("failed to kick TX queue via sendmsg: errno %d", errno)
   124  	}
   125  	return nil
   126  }