github.com/ttpreport/gvisor-ligolo@v0.0.0-20240123134145-a858404967ba/pkg/sentry/platform/systrap/context_queue.go (about)

     1  // Copyright 2023 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package systrap
    16  
    17  import (
    18  	"sync/atomic"
    19  )
    20  
    21  // LINT.IfChange
    22  const (
    23  	// maxEntries is the size of the ringbuffer.
    24  	maxContextQueueEntries uint32 = uint32(maxGuestContexts) + 1
    25  )
    26  
    27  type queuedContext struct {
    28  	contextID uint32
    29  	threadID  uint32
    30  }
    31  
    32  // contextQueue is a structure shared with the each stub thread that is used to
    33  // signal to stub threads which contexts are ready to resume running.
    34  //
    35  // It is a lockless ringbuffer where threads try to police themselves on whether
    36  // they should continue waiting for a context or go to sleep if they are
    37  // unneeded.
    38  type contextQueue struct {
    39  	// start is an index used for taking contexts out of the ringbuffer.
    40  	start uint32
    41  	// end is an index used for putting new contexts into the ringbuffer.
    42  	end uint32
    43  
    44  	// numActiveThreads indicates to the sentry how many stubs are running.
    45  	// It is changed only by stub threads.
    46  	numActiveThreads uint32
    47  	// numThreadsToWakeup is the number of threads requested by Sentry to wake up.
    48  	// The Sentry increments it and stub threads decrements.
    49  	numThreadsToWakeup uint32
    50  	// numActiveContext is a number of running and waiting contexts
    51  	numActiveContexts uint32
    52  	// numAwakeContexts is the number of awake contexts. It includes all
    53  	// active contexts and contexts that are running in the Sentry.
    54  	numAwakeContexts uint32
    55  
    56  	fastPathDisabledTS  uint64
    57  	fastPathFailedInRow uint32
    58  	fastPathDisabled    uint32
    59  	ringbuffer          [maxContextQueueEntries]uint64
    60  }
    61  
    62  const (
    63  	// Each element of a contextQueue ring buffer is a sum of its index
    64  	// shifted by CQ_INDEX_SHIFT and context_id.
    65  	contextQueueIndexShift = 32
    66  )
    67  
    68  // LINT.ThenChange(./sysmsg/sysmsg_lib.c)
    69  
    70  func (q *contextQueue) init() {
    71  	for i := uint32(0); i < maxContextQueueEntries; i++ {
    72  		q.ringbuffer[i] = uint64(invalidContextID)
    73  	}
    74  	// Allow tests to trigger overflows of start and end.
    75  	idx := ^uint32(0) - maxContextQueueEntries*4
    76  	atomic.StoreUint32(&q.start, idx)
    77  	atomic.StoreUint32(&q.end, idx)
    78  	atomic.StoreUint64(&q.fastPathDisabledTS, 0)
    79  	atomic.StoreUint32(&q.fastPathFailedInRow, 0)
    80  	atomic.StoreUint32(&q.numActiveThreads, 0)
    81  	atomic.StoreUint32(&q.numThreadsToWakeup, 0)
    82  	atomic.StoreUint32(&q.numActiveContexts, 0)
    83  	atomic.StoreUint32(&q.numAwakeContexts, 0)
    84  	atomic.StoreUint32(&q.fastPathDisabled, 0)
    85  }
    86  
    87  func (q *contextQueue) isEmpty() bool {
    88  	return atomic.LoadUint32(&q.start) == atomic.LoadUint32(&q.end)
    89  }
    90  
    91  func (q *contextQueue) queuedContexts() uint32 {
    92  	return (atomic.LoadUint32(&q.end) + maxContextQueueEntries - atomic.LoadUint32(&q.start)) % maxContextQueueEntries
    93  }
    94  
    95  func (q *contextQueue) add(ctx *sharedContext, stubFastPathEnabled bool) uint32 {
    96  	if stubFastPathEnabled {
    97  		q.enableFastPath()
    98  	} else {
    99  		q.disableFastPath()
   100  	}
   101  	contextID := ctx.contextID
   102  	atomic.AddUint32(&q.numActiveContexts, 1)
   103  	next := atomic.AddUint32(&q.end, 1)
   104  	if (next % maxContextQueueEntries) ==
   105  		(atomic.LoadUint32(&q.start) % maxContextQueueEntries) {
   106  		// should be unreacheable
   107  		panic("contextQueue is full")
   108  	}
   109  	idx := next - 1
   110  	next = idx % maxContextQueueEntries
   111  	v := (uint64(idx) << contextQueueIndexShift) + uint64(contextID)
   112  	atomic.StoreUint64(&q.ringbuffer[next], v)
   113  	return next // remove me
   114  }
   115  
   116  func (q *contextQueue) disableFastPath() {
   117  	atomic.StoreUint32(&q.fastPathDisabled, 1)
   118  }
   119  
   120  func (q *contextQueue) enableFastPath() {
   121  	atomic.StoreUint32(&q.fastPathDisabled, 0)
   122  }