github.com/ttpreport/gvisor-ligolo@v0.0.0-20240123134145-a858404967ba/pkg/xdp/fillqueue.go (about) 1 // Copyright 2022 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 //go:build amd64 || arm64 16 // +build amd64 arm64 17 18 package xdp 19 20 import ( 21 "github.com/ttpreport/gvisor-ligolo/pkg/atomicbitops" 22 ) 23 24 // The FillQueue is how a process tells the kernel which buffers are available 25 // to be filled by incoming packets. 26 // 27 // FillQueue is not thread-safe and requires external synchronization 28 type FillQueue struct { 29 // mem is the mmap'd area shared with the kernel. Many other fields of 30 // this struct point into mem. 31 mem []byte 32 33 // ring is the actual ring buffer. It is a list of frame addresses 34 // ready for incoming packets. 35 // 36 // len(ring) must be a power of 2. 37 ring []uint64 38 39 // mask is used whenever indexing into ring. It is always len(ring)-1. 40 // It prevents index out of bounds errors while allowing the producer 41 // and consumer pointers to repeatedly "overflow" and loop back around 42 // the ring. 43 mask uint32 44 45 // producer points to the shared atomic value that indicates the last 46 // produced descriptor. Only we update this value. 47 producer *atomicbitops.Uint32 48 49 // consumer points to the shared atomic value that indicates the last 50 // consumed descriptor. Only the kernel updates this value. 51 consumer *atomicbitops.Uint32 52 53 // flags points to the shared atomic value that holds flags for the 54 // queue. 55 flags *atomicbitops.Uint32 56 57 // Cached values are used to avoid relatively expensive atomic 58 // operations. They are used, incremented, and decremented multiple 59 // times with non-atomic operations, and then "batch-updated" by 60 // reading or writing atomically to synchronize with the kernel. 61 62 // cachedProducer is used to atomically write *producer. 63 cachedProducer uint32 64 // cachedConsumer is updated when we atomically read *consumer. 65 // cachedConsumer is actually len(ring) larger than the real consumer 66 // value. See free() for details. 67 cachedConsumer uint32 68 } 69 70 // free returns the number of free descriptors in the fill queue. 71 func (fq *FillQueue) free(toReserve uint32) uint32 { 72 // Try to find free descriptors without incurring an atomic operation. 73 // 74 // cachedConsumer is always len(fq.ring) larger than the real consumer 75 // value. This lets us, in the common case, compute the number of free 76 // descriptors simply via fq.cachedConsumer - fq.cachedProducer without 77 // also adding len(fq.ring). 78 if available := fq.cachedConsumer - fq.cachedProducer; available >= toReserve { 79 return available 80 } 81 82 // If we didn't already have enough descriptors available, check 83 // whether the kernel has returned some to us. 84 fq.cachedConsumer = fq.consumer.Load() 85 fq.cachedConsumer += uint32(len(fq.ring)) 86 return fq.cachedConsumer - fq.cachedProducer 87 } 88 89 // Notify updates the producer such that it is visible to the kernel. 90 func (fq *FillQueue) Notify() { 91 fq.producer.Store(fq.cachedProducer) 92 } 93 94 // Set sets the fill queue's descriptor at index to addr. 95 func (fq *FillQueue) Set(index uint32, addr uint64) { 96 // Use mask to avoid overflowing and loop back around the ring. 97 fq.ring[index&fq.mask] = addr 98 } 99 100 // FillAll posts as many empty buffers as possible for the kernel to fill, then 101 // notifies the kernel. 102 // 103 // +checklocks:umem.mu 104 func (fq *FillQueue) FillAll(umem *UMEM) { 105 // Figure out how many buffers and queue slots are available. 106 available := fq.free(umem.nFreeFrames) 107 if available == 0 { 108 return 109 } 110 if available > umem.nFreeFrames { 111 available = umem.nFreeFrames 112 } 113 114 // Fill the queue as much as possible and notify ther kernel. 115 index := fq.cachedProducer 116 fq.cachedProducer += available 117 for i := uint32(0); i < available; i++ { 118 fq.Set(index+i, umem.AllocFrame()) 119 } 120 fq.Notify() 121 }