github.com/nicocha30/gvisor-ligolo@v0.0.0-20230726075806-989fa2c0a413/pkg/xdp/xdp_unsafe.go (about)

     1  // Copyright 2022 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package xdp
    16  
    17  import (
    18  	"fmt"
    19  	"reflect"
    20  	"unsafe"
    21  
    22  	"golang.org/x/sys/unix"
    23  	"github.com/nicocha30/gvisor-ligolo/pkg/atomicbitops"
    24  )
    25  
    26  func registerUMEM(fd int, reg unix.XDPUmemReg) error {
    27  	if _, _, errno := unix.Syscall6(unix.SYS_SETSOCKOPT, uintptr(fd), unix.SOL_XDP, unix.XDP_UMEM_REG, uintptr(unsafe.Pointer(&reg)), unsafe.Sizeof(reg), 0); errno != 0 {
    28  		return fmt.Errorf("failed to setsockopt(XDP_UMEM_REG): errno %d", errno)
    29  	}
    30  	return nil
    31  }
    32  
    33  func getOffsets(fd int) (unix.XDPMmapOffsets, error) {
    34  	var off unix.XDPMmapOffsets
    35  	size := unsafe.Sizeof(off)
    36  	if _, _, errno := unix.Syscall6(unix.SYS_GETSOCKOPT, uintptr(fd), unix.SOL_XDP, unix.XDP_MMAP_OFFSETS, uintptr(unsafe.Pointer(&off)), uintptr(unsafe.Pointer(&size)), 0); errno != 0 {
    37  		return unix.XDPMmapOffsets{}, fmt.Errorf("failed to get offsets: %v", errno)
    38  	} else if unsafe.Sizeof(off) != size {
    39  		return unix.XDPMmapOffsets{}, fmt.Errorf("expected optlen of %d, but found %d", unsafe.Sizeof(off), size)
    40  	}
    41  	return off, nil
    42  }
    43  
    44  func sliceBackingPointer(slice []byte) uintptr {
    45  	return uintptr(unsafe.Pointer(&slice[0]))
    46  }
    47  
    48  func sizeOfFillQueueDesc() uint64 {
    49  	return uint64(unsafe.Sizeof(uint64(0)))
    50  }
    51  
    52  func sizeOfRXQueueDesc() uint64 {
    53  	return uint64(unsafe.Sizeof(unix.XDPDesc{}))
    54  }
    55  
    56  func sizeOfCompletionQueueDesc() uint64 {
    57  	return uint64(unsafe.Sizeof(uint64(0)))
    58  }
    59  
    60  func sizeOfTXQueueDesc() uint64 {
    61  	return uint64(unsafe.Sizeof(unix.XDPDesc{}))
    62  }
    63  
    64  func (fq *FillQueue) init(off unix.XDPMmapOffsets, opts ReadOnlySocketOpts) {
    65  	fillQueueRingHdr := (*reflect.SliceHeader)(unsafe.Pointer(&fq.ring))
    66  	fillQueueRingHdr.Data = uintptr(unsafe.Pointer(&fq.mem[off.Fr.Desc]))
    67  	fillQueueRingHdr.Len = int(opts.NDescriptors)
    68  	fillQueueRingHdr.Cap = fillQueueRingHdr.Len
    69  	fq.producer = (*atomicbitops.Uint32)(unsafe.Pointer(&fq.mem[off.Fr.Producer]))
    70  	fq.consumer = (*atomicbitops.Uint32)(unsafe.Pointer(&fq.mem[off.Fr.Consumer]))
    71  	fq.flags = (*atomicbitops.Uint32)(unsafe.Pointer(&fq.mem[off.Fr.Flags]))
    72  }
    73  
    74  func (rq *RXQueue) init(off unix.XDPMmapOffsets, opts ReadOnlySocketOpts) {
    75  	rxQueueRingHdr := (*reflect.SliceHeader)(unsafe.Pointer(&rq.ring))
    76  	rxQueueRingHdr.Data = uintptr(unsafe.Pointer(&rq.mem[off.Rx.Desc]))
    77  	rxQueueRingHdr.Len = int(opts.NDescriptors)
    78  	rxQueueRingHdr.Cap = rxQueueRingHdr.Len
    79  	rq.producer = (*atomicbitops.Uint32)(unsafe.Pointer(&rq.mem[off.Rx.Producer]))
    80  	rq.consumer = (*atomicbitops.Uint32)(unsafe.Pointer(&rq.mem[off.Rx.Consumer]))
    81  	rq.flags = (*atomicbitops.Uint32)(unsafe.Pointer(&rq.mem[off.Rx.Flags]))
    82  	// These probably don't have to be atomic, but we're only loading once
    83  	// so better safe than sorry.
    84  	rq.cachedProducer = rq.producer.Load()
    85  	rq.cachedConsumer = rq.consumer.Load()
    86  }
    87  
    88  func (cq *CompletionQueue) init(off unix.XDPMmapOffsets, opts ReadOnlySocketOpts) {
    89  	completionQueueRingHdr := (*reflect.SliceHeader)(unsafe.Pointer(&cq.ring))
    90  	completionQueueRingHdr.Data = uintptr(unsafe.Pointer(&cq.mem[off.Cr.Desc]))
    91  	completionQueueRingHdr.Len = int(opts.NDescriptors)
    92  	completionQueueRingHdr.Cap = completionQueueRingHdr.Len
    93  	cq.producer = (*atomicbitops.Uint32)(unsafe.Pointer(&cq.mem[off.Cr.Producer]))
    94  	cq.consumer = (*atomicbitops.Uint32)(unsafe.Pointer(&cq.mem[off.Cr.Consumer]))
    95  	cq.flags = (*atomicbitops.Uint32)(unsafe.Pointer(&cq.mem[off.Cr.Flags]))
    96  	// These probably don't have to be atomic, but we're only loading once
    97  	// so better safe than sorry.
    98  	cq.cachedProducer = cq.producer.Load()
    99  	cq.cachedConsumer = cq.consumer.Load()
   100  }
   101  
   102  func (tq *TXQueue) init(off unix.XDPMmapOffsets, opts ReadOnlySocketOpts) {
   103  	txQueueRingHdr := (*reflect.SliceHeader)(unsafe.Pointer(&tq.ring))
   104  	txQueueRingHdr.Data = uintptr(unsafe.Pointer(&tq.mem[off.Tx.Desc]))
   105  	txQueueRingHdr.Len = int(opts.NDescriptors)
   106  	txQueueRingHdr.Cap = txQueueRingHdr.Len
   107  	tq.producer = (*atomicbitops.Uint32)(unsafe.Pointer(&tq.mem[off.Tx.Producer]))
   108  	tq.consumer = (*atomicbitops.Uint32)(unsafe.Pointer(&tq.mem[off.Tx.Consumer]))
   109  	tq.flags = (*atomicbitops.Uint32)(unsafe.Pointer(&tq.mem[off.Tx.Flags]))
   110  }
   111  
   112  // kick notifies the kernel that there are packets to transmit.
   113  func (tq *TXQueue) kick() error {
   114  	if tq.flags.RacyLoad()&unix.XDP_RING_NEED_WAKEUP == 0 {
   115  		return nil
   116  	}
   117  
   118  	var msg unix.Msghdr
   119  	if _, _, errno := unix.Syscall6(unix.SYS_SENDMSG, uintptr(tq.sockfd), uintptr(unsafe.Pointer(&msg)), unix.MSG_DONTWAIT|unix.MSG_NOSIGNAL, 0, 0, 0); errno != 0 {
   120  		return fmt.Errorf("failed to kick TX queue via sendmsg: errno %d", errno)
   121  	}
   122  	return nil
   123  }