github.com/metacubex/gvisor@v0.0.0-20240320004321-933faba989ec/pkg/sentry/platform/systrap/context_queue.go (about)

     1  // Copyright 2023 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package systrap
    16  
    17  import (
    18  	"sync/atomic"
    19  
    20  	"github.com/metacubex/gvisor/pkg/sentry/platform"
    21  )
    22  
    23  // LINT.IfChange
    24  const (
    25  	// maxEntries is the size of the ringbuffer.
    26  	maxContextQueueEntries uint32 = uint32(maxGuestContexts) + 1
    27  )
    28  
    29  type queuedContext struct {
    30  	contextID uint32
    31  	threadID  uint32
    32  }
    33  
    34  // contextQueue is a structure shared with the each stub thread that is used to
    35  // signal to stub threads which contexts are ready to resume running.
    36  //
    37  // It is a lockless ringbuffer where threads try to police themselves on whether
    38  // they should continue waiting for a context or go to sleep if they are
    39  // unneeded.
    40  type contextQueue struct {
    41  	// start is an index used for taking contexts out of the ringbuffer.
    42  	start uint32
    43  	// end is an index used for putting new contexts into the ringbuffer.
    44  	end uint32
    45  
    46  	// numActiveThreads indicates to the sentry how many stubs are running.
    47  	// It is changed only by stub threads.
    48  	numActiveThreads uint32
    49  	// numSpinningThreads indicates to the sentry how many stubs are waiting
    50  	// to receive a context from the queue, and are not doing useful work.
    51  	numSpinningThreads uint32
    52  	// numThreadsToWakeup is the number of threads requested by Sentry to wake up.
    53  	// The Sentry increments it and stub threads decrements.
    54  	numThreadsToWakeup uint32
    55  	// numActiveContext is a number of running and waiting contexts
    56  	numActiveContexts uint32
    57  	// numAwakeContexts is the number of awake contexts. It includes all
    58  	// active contexts and contexts that are running in the Sentry.
    59  	numAwakeContexts uint32
    60  
    61  	fastPathDisabled uint32
    62  	usedFastPath     uint32
    63  	ringbuffer       [maxContextQueueEntries]uint64
    64  }
    65  
    66  const (
    67  	// Each element of a contextQueue ring buffer is a sum of its index
    68  	// shifted by CQ_INDEX_SHIFT and context_id.
    69  	contextQueueIndexShift = 32
    70  )
    71  
    72  // LINT.ThenChange(./sysmsg/sysmsg_lib.c)
    73  
    74  func (q *contextQueue) init() {
    75  	for i := uint32(0); i < maxContextQueueEntries; i++ {
    76  		q.ringbuffer[i] = uint64(invalidContextID)
    77  	}
    78  	// Allow tests to trigger overflows of start and end.
    79  	idx := ^uint32(0) - maxContextQueueEntries*4
    80  	atomic.StoreUint32(&q.start, idx)
    81  	atomic.StoreUint32(&q.end, idx)
    82  	atomic.StoreUint32(&q.numActiveThreads, 0)
    83  	atomic.StoreUint32(&q.numSpinningThreads, 0)
    84  	atomic.StoreUint32(&q.numThreadsToWakeup, 0)
    85  	atomic.StoreUint32(&q.numActiveContexts, 0)
    86  	atomic.StoreUint32(&q.numAwakeContexts, 0)
    87  	atomic.StoreUint32(&q.fastPathDisabled, 1)
    88  	atomic.StoreUint32(&q.usedFastPath, 0)
    89  }
    90  
    91  func (q *contextQueue) isEmpty() bool {
    92  	return atomic.LoadUint32(&q.start) == atomic.LoadUint32(&q.end)
    93  }
    94  
    95  func (q *contextQueue) queuedContexts() uint32 {
    96  	return (atomic.LoadUint32(&q.end) + maxContextQueueEntries - atomic.LoadUint32(&q.start)) % maxContextQueueEntries
    97  }
    98  
    99  // add puts the the given ctx onto the context queue, and records a state of
   100  // the subprocess after insertion to see if there are more active stub threads
   101  // or more waiting contexts.
   102  func (q *contextQueue) add(ctx *sharedContext) *platform.ContextError {
   103  	ctx.startWaitingTS = cputicks()
   104  
   105  	if fastpath.stubFastPath() {
   106  		q.enableFastPath()
   107  	} else {
   108  		q.disableFastPath()
   109  	}
   110  	contextID := ctx.contextID
   111  	atomic.AddUint32(&q.numActiveContexts, 1)
   112  	next := atomic.AddUint32(&q.end, 1)
   113  	if (next % maxContextQueueEntries) ==
   114  		(atomic.LoadUint32(&q.start) % maxContextQueueEntries) {
   115  		// reachable only in case of corrupted memory
   116  		return corruptedSharedMemoryErr("context queue is full, indicates tampering with queue counters")
   117  	}
   118  	idx := next - 1
   119  	next = idx % maxContextQueueEntries
   120  	v := (uint64(idx) << contextQueueIndexShift) + uint64(contextID)
   121  	atomic.StoreUint64(&q.ringbuffer[next], v)
   122  
   123  	if atomic.SwapUint32(&q.usedFastPath, 0) != 0 {
   124  		fastpath.usedStubFastPath.Store(true)
   125  	}
   126  	return nil
   127  }
   128  
   129  func (q *contextQueue) disableFastPath() {
   130  	atomic.StoreUint32(&q.fastPathDisabled, 1)
   131  }
   132  
   133  func (q *contextQueue) enableFastPath() {
   134  	atomic.StoreUint32(&q.fastPathDisabled, 0)
   135  }
   136  
   137  func (q *contextQueue) fastPathEnabled() bool {
   138  	return atomic.LoadUint32(&q.fastPathDisabled) == 0
   139  }