github.com/nicocha30/gvisor-ligolo@v0.0.0-20230726075806-989fa2c0a413/pkg/sleep/sleep_unsafe.go (about)

     1  // Copyright 2018 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // Package sleep allows goroutines to efficiently sleep on multiple sources of
    16  // notifications (wakers). It offers O(1) complexity, which is different from
    17  // multi-channel selects which have O(n) complexity (where n is the number of
    18  // channels) and a considerable constant factor.
    19  //
    20  // It is similar to edge-triggered epoll waits, where the user registers each
    21  // object of interest once, and then can repeatedly wait on all of them.
    22  //
    23  // A Waker object is used to wake a sleeping goroutine (G) up, or prevent it
    24  // from going to sleep next. A Sleeper object is used to receive notifications
    25  // from wakers, and if no notifications are available, to optionally sleep until
    26  // one becomes available.
    27  //
    28  // A Waker can be associated with at most one Sleeper, but a Sleeper can be
    29  // associated with multiple Wakers. A Sleeper has a list of asserted (ready)
    30  // wakers; when Fetch() is called repeatedly, elements from this list are
    31  // returned until the list becomes empty in which case the goroutine goes to
    32  // sleep. When Assert() is called on a Waker, it adds itself to the Sleeper's
    33  // asserted list and wakes the G up from its sleep if needed.
    34  //
    35  // Sleeper objects are expected to be used as follows, with just one goroutine
    36  // executing this code:
    37  //
    38  //	// One time set-up.
    39  //	s := sleep.Sleeper{}
    40  //	s.AddWaker(&w1)
    41  //	s.AddWaker(&w2)
    42  //
    43  //	// Called repeatedly.
    44  //	for {
    45  //		switch s.Fetch(true) {
    46  //		case &w1:
    47  //			// Do work triggered by w1 being asserted.
    48  //		case &w2:
    49  //			// Do work triggered by w2 being asserted.
    50  //		}
    51  //	}
    52  //
    53  // And Waker objects are expected to call w.Assert() when they want the sleeper
    54  // to wake up and perform work.
    55  //
    56  // The notifications are edge-triggered, which means that if a Waker calls
    57  // Assert() several times before the sleeper has the chance to wake up, it will
    58  // only be notified once and should perform all pending work (alternatively, it
    59  // can also call Assert() on the waker, to ensure that it will wake up again).
    60  //
    61  // The "unsafeness" here is in the casts to/from unsafe.Pointer, which is safe
    62  // when only one type is used for each unsafe.Pointer (which is the case here),
    63  // we should just make sure that this remains the case in the future. The usage
    64  // of unsafe package could be confined to sharedWaker and sharedSleeper types
    65  // that would hold pointers in atomic.Pointers, but the go compiler currently
    66  // can't optimize these as well (it won't inline their method calls), which
    67  // reduces performance.
    68  package sleep
    69  
    70  import (
    71  	"sync/atomic"
    72  	"unsafe"
    73  
    74  	"github.com/nicocha30/gvisor-ligolo/pkg/sync"
    75  )
    76  
    77  const (
    78  	// preparingG is stored in sleepers to indicate that they're preparing
    79  	// to sleep.
    80  	preparingG = 1
    81  )
    82  
    83  var (
    84  	// assertedSleeper is a sentinel sleeper. A pointer to it is stored in
    85  	// wakers that are asserted.
    86  	assertedSleeper Sleeper
    87  )
    88  
    89  // Sleeper allows a goroutine to sleep and receive wake up notifications from
    90  // Wakers in an efficient way.
    91  //
    92  // This is similar to edge-triggered epoll in that wakers are added to the
    93  // sleeper once and the sleeper can then repeatedly sleep in O(1) time while
    94  // waiting on all wakers.
    95  //
    96  // None of the methods in a Sleeper can be called concurrently. Wakers that have
    97  // been added to a sleeper A can only be added to another sleeper after A.Done()
    98  // returns. These restrictions allow this to be implemented lock-free.
    99  //
   100  // This struct is thread-compatible.
   101  //
   102  // +stateify savable
   103  type Sleeper struct {
   104  	_ sync.NoCopy
   105  
   106  	// sharedList is a "stack" of asserted wakers. They atomically add
   107  	// themselves to the front of this list as they become asserted.
   108  	sharedList unsafe.Pointer `state:".(*Waker)"`
   109  
   110  	// localList is a list of asserted wakers that is only accessible to the
   111  	// waiter, and thus doesn't have to be accessed atomically. When
   112  	// fetching more wakers, the waiter will first go through this list, and
   113  	// only  when it's empty will it atomically fetch wakers from
   114  	// sharedList.
   115  	localList *Waker
   116  
   117  	// allWakers is a list with all wakers that have been added to this
   118  	// sleeper. It is used during cleanup to remove associations.
   119  	allWakers *Waker
   120  
   121  	// waitingG holds the G that is sleeping, if any. It is used by wakers
   122  	// to determine which G, if any, they should wake.
   123  	waitingG uintptr `state:"zero"`
   124  }
   125  
   126  // saveSharedList is invoked by stateify.
   127  func (s *Sleeper) saveSharedList() *Waker {
   128  	return (*Waker)(atomic.LoadPointer(&s.sharedList))
   129  }
   130  
   131  // loadSharedList is invoked by stateify.
   132  func (s *Sleeper) loadSharedList(w *Waker) {
   133  	atomic.StorePointer(&s.sharedList, unsafe.Pointer(w))
   134  }
   135  
   136  // AddWaker associates the given waker to the sleeper.
   137  func (s *Sleeper) AddWaker(w *Waker) {
   138  	if w.allWakersNext != nil {
   139  		panic("waker has non-nil allWakersNext; owned by another sleeper?")
   140  	}
   141  	if w.next != nil {
   142  		panic("waker has non-nil next; queued in another sleeper?")
   143  	}
   144  
   145  	// Add the waker to the list of all wakers.
   146  	w.allWakersNext = s.allWakers
   147  	s.allWakers = w
   148  
   149  	// Try to associate the waker with the sleeper. If it's already
   150  	// asserted, we simply enqueue it in the "ready" list.
   151  	for {
   152  		p := (*Sleeper)(atomic.LoadPointer(&w.s))
   153  		if p == &assertedSleeper {
   154  			s.enqueueAssertedWaker(w, true /* wakep */)
   155  			return
   156  		}
   157  
   158  		if atomic.CompareAndSwapPointer(&w.s, usleeper(p), usleeper(s)) {
   159  			return
   160  		}
   161  	}
   162  }
   163  
   164  // nextWaker returns the next waker in the notification list, blocking if
   165  // needed. The parameter wakepOrSleep indicates that if the operation does not
   166  // block, then we will need to explicitly wake a runtime P.
   167  //
   168  // Precondition: wakepOrSleep may be true iff block is true.
   169  //
   170  //go:nosplit
   171  func (s *Sleeper) nextWaker(block, wakepOrSleep bool) *Waker {
   172  	// Attempt to replenish the local list if it's currently empty.
   173  	if s.localList == nil {
   174  		for atomic.LoadPointer(&s.sharedList) == nil {
   175  			// Fail request if caller requested that we
   176  			// don't block.
   177  			if !block {
   178  				return nil
   179  			}
   180  
   181  			// Indicate to wakers that we're about to sleep,
   182  			// this allows them to abort the wait by setting
   183  			// waitingG back to zero (which we'll notice
   184  			// before committing the sleep).
   185  			atomic.StoreUintptr(&s.waitingG, preparingG)
   186  
   187  			// Check if something was queued while we were
   188  			// preparing to sleep. We need this interleaving
   189  			// to avoid missing wake ups.
   190  			if atomic.LoadPointer(&s.sharedList) != nil {
   191  				atomic.StoreUintptr(&s.waitingG, 0)
   192  				break
   193  			}
   194  
   195  			// Since we are sleeping for sure, we no longer
   196  			// need to wakep once we get a value.
   197  			wakepOrSleep = false
   198  
   199  			// Try to commit the sleep and report it to the
   200  			// tracer as a select.
   201  			//
   202  			// gopark puts the caller to sleep and calls
   203  			// commitSleep to decide whether to immediately
   204  			// wake the caller up or to leave it sleeping.
   205  			const traceEvGoBlockSelect = 24
   206  			// See:runtime2.go in the go runtime package for
   207  			// the values to pass as the waitReason here.
   208  			const waitReasonSelect = 9
   209  			sync.Gopark(commitSleep, unsafe.Pointer(&s.waitingG), sync.WaitReasonSelect, sync.TraceEvGoBlockSelect, 0)
   210  		}
   211  
   212  		// Pull the shared list out and reverse it in the local
   213  		// list. Given that wakers push themselves in reverse
   214  		// order, we fix things here.
   215  		v := (*Waker)(atomic.SwapPointer(&s.sharedList, nil))
   216  		for v != nil {
   217  			cur := v
   218  			v = v.next
   219  
   220  			cur.next = s.localList
   221  			s.localList = cur
   222  		}
   223  	}
   224  
   225  	// Remove the waker in the front of the list.
   226  	w := s.localList
   227  	s.localList = w.next
   228  
   229  	// Do we need to wake a P?
   230  	if wakepOrSleep {
   231  		sync.Wakep()
   232  	}
   233  
   234  	return w
   235  }
   236  
   237  // commitSleep signals to wakers that the given g is now sleeping. Wakers can
   238  // then fetch it and wake it.
   239  //
   240  // The commit may fail if wakers have been asserted after our last check, in
   241  // which case they will have set s.waitingG to zero.
   242  //
   243  //go:norace
   244  //go:nosplit
   245  func commitSleep(g uintptr, waitingG unsafe.Pointer) bool {
   246  	return sync.RaceUncheckedAtomicCompareAndSwapUintptr((*uintptr)(waitingG), preparingG, g)
   247  }
   248  
   249  // fetch is the backing implementation for Fetch and AssertAndFetch.
   250  //
   251  // Preconditions are the same as nextWaker.
   252  //
   253  //go:nosplit
   254  func (s *Sleeper) fetch(block, wakepOrSleep bool) *Waker {
   255  	for {
   256  		w := s.nextWaker(block, wakepOrSleep)
   257  		if w == nil {
   258  			return nil
   259  		}
   260  
   261  		// Reassociate the waker with the sleeper. If the waker was
   262  		// still asserted we can return it, otherwise try the next one.
   263  		old := (*Sleeper)(atomic.SwapPointer(&w.s, usleeper(s)))
   264  		if old == &assertedSleeper {
   265  			return w
   266  		}
   267  	}
   268  }
   269  
   270  // Fetch fetches the next wake-up notification. If a notification is
   271  // immediately available, the asserted waker is returned immediately.
   272  // Otherwise, the behavior depends on the value of 'block': if true, the
   273  // current goroutine blocks until a notification arrives and returns the
   274  // asserted waker; if false, nil will be returned.
   275  //
   276  // N.B. This method is *not* thread-safe. Only one goroutine at a time is
   277  // allowed to call this method.
   278  func (s *Sleeper) Fetch(block bool) *Waker {
   279  	return s.fetch(block, false /* wakepOrSleep */)
   280  }
   281  
   282  // AssertAndFetch asserts the given waker and fetches the next wake-up notification.
   283  // Note that this will always be blocking, since there is no value in joining a
   284  // non-blocking operation.
   285  //
   286  // N.B. Like Fetch, this method is *not* thread-safe. This will also yield the current
   287  // P to the next goroutine, avoiding associated scheduled overhead.
   288  //
   289  // +checkescape:all
   290  //
   291  //go:nosplit
   292  func (s *Sleeper) AssertAndFetch(n *Waker) *Waker {
   293  	n.assert(false /* wakep */)
   294  	return s.fetch(true /* block */, true /* wakepOrSleep*/)
   295  }
   296  
   297  // Done is used to indicate that the caller won't use this Sleeper anymore. It
   298  // removes the association with all wakers so that they can be safely reused
   299  // by another sleeper after Done() returns.
   300  func (s *Sleeper) Done() {
   301  	// Remove all associations that we can, and build a list of the ones we
   302  	// could not. An association can be removed right away from waker w if
   303  	// w.s has a pointer to the sleeper, that is, the waker is not asserted
   304  	// yet. By atomically switching w.s to nil, we guarantee that
   305  	// subsequent calls to Assert() on the waker will not result in it
   306  	// being queued.
   307  	for w := s.allWakers; w != nil; w = s.allWakers {
   308  		next := w.allWakersNext // Before zapping.
   309  		if atomic.CompareAndSwapPointer(&w.s, usleeper(s), nil) {
   310  			w.allWakersNext = nil
   311  			w.next = nil
   312  			s.allWakers = next // Move ahead.
   313  			continue
   314  		}
   315  
   316  		// Dequeue exactly one waiter from the list, it may not be
   317  		// this one but we know this one is in the process. We must
   318  		// leave it in the asserted state but drop it from our lists.
   319  		if w := s.nextWaker(true, false); w != nil {
   320  			prev := &s.allWakers
   321  			for *prev != w {
   322  				prev = &((*prev).allWakersNext)
   323  			}
   324  			*prev = (*prev).allWakersNext
   325  			w.allWakersNext = nil
   326  			w.next = nil
   327  		}
   328  	}
   329  }
   330  
   331  // enqueueAssertedWaker enqueues an asserted waker to the "ready" circular list
   332  // of wakers that want to notify the sleeper.
   333  //
   334  //go:nosplit
   335  func (s *Sleeper) enqueueAssertedWaker(w *Waker, wakep bool) {
   336  	// Add the new waker to the front of the list.
   337  	for {
   338  		v := (*Waker)(atomic.LoadPointer(&s.sharedList))
   339  		w.next = v
   340  		if atomic.CompareAndSwapPointer(&s.sharedList, uwaker(v), uwaker(w)) {
   341  			break
   342  		}
   343  	}
   344  
   345  	// Nothing to do if there isn't a G waiting.
   346  	if atomic.LoadUintptr(&s.waitingG) == 0 {
   347  		return
   348  	}
   349  
   350  	// Signal to the sleeper that a waker has been asserted.
   351  	switch g := atomic.SwapUintptr(&s.waitingG, 0); g {
   352  	case 0, preparingG:
   353  	default:
   354  		// We managed to get a G. Wake it up.
   355  		sync.Goready(g, 0, wakep)
   356  	}
   357  }
   358  
   359  // Waker represents a source of wake-up notifications to be sent to sleepers. A
   360  // waker can be associated with at most one sleeper at a time, and at any given
   361  // time is either in asserted or non-asserted state.
   362  //
   363  // Once asserted, the waker remains so until it is manually cleared or a sleeper
   364  // consumes its assertion (i.e., a sleeper wakes up or is prevented from going
   365  // to sleep due to the waker).
   366  //
   367  // This struct is thread-safe, that is, its methods can be called concurrently
   368  // by multiple goroutines.
   369  //
   370  // Note, it is not safe to copy a Waker as its fields are modified by value
   371  // (the pointer fields are individually modified with atomic operations).
   372  //
   373  // +stateify savable
   374  type Waker struct {
   375  	_ sync.NoCopy
   376  
   377  	// s is the sleeper that this waker can wake up. Only one sleeper at a
   378  	// time is allowed. This field can have three classes of values:
   379  	// nil -- the waker is not asserted: it either is not associated with
   380  	//     a sleeper, or is queued to a sleeper due to being previously
   381  	//     asserted. This is the zero value.
   382  	// &assertedSleeper -- the waker is asserted.
   383  	// otherwise -- the waker is not asserted, and is associated with the
   384  	//     given sleeper. Once it transitions to asserted state, the
   385  	//     associated sleeper will be woken.
   386  	s unsafe.Pointer `state:".(wakerState)"`
   387  
   388  	// next is used to form a linked list of asserted wakers in a sleeper.
   389  	next *Waker
   390  
   391  	// allWakersNext is used to form a linked list of all wakers associated
   392  	// to a given sleeper.
   393  	allWakersNext *Waker
   394  }
   395  
   396  type wakerState struct {
   397  	asserted bool
   398  	other    *Sleeper
   399  }
   400  
   401  // saveS is invoked by stateify.
   402  func (w *Waker) saveS() wakerState {
   403  	s := (*Sleeper)(atomic.LoadPointer(&w.s))
   404  	if s == &assertedSleeper {
   405  		return wakerState{asserted: true}
   406  	}
   407  	return wakerState{other: s}
   408  }
   409  
   410  // loadS is invoked by stateify.
   411  func (w *Waker) loadS(ws wakerState) {
   412  	if ws.asserted {
   413  		atomic.StorePointer(&w.s, unsafe.Pointer(&assertedSleeper))
   414  	} else {
   415  		atomic.StorePointer(&w.s, unsafe.Pointer(ws.other))
   416  	}
   417  }
   418  
   419  // assert is the implementation for Assert.
   420  //
   421  //go:nosplit
   422  func (w *Waker) assert(wakep bool) {
   423  	// Nothing to do if the waker is already asserted. This check allows us
   424  	// to complete this case (already asserted) without any interlocked
   425  	// operations on x86.
   426  	if atomic.LoadPointer(&w.s) == usleeper(&assertedSleeper) {
   427  		return
   428  	}
   429  
   430  	// Mark the waker as asserted, and wake up a sleeper if there is one.
   431  	switch s := (*Sleeper)(atomic.SwapPointer(&w.s, usleeper(&assertedSleeper))); s {
   432  	case nil:
   433  	case &assertedSleeper:
   434  	default:
   435  		s.enqueueAssertedWaker(w, wakep)
   436  	}
   437  }
   438  
   439  // Assert moves the waker to an asserted state, if it isn't asserted yet. When
   440  // asserted, the waker will cause its matching sleeper to wake up.
   441  func (w *Waker) Assert() {
   442  	w.assert(true /* wakep */)
   443  }
   444  
   445  // Clear moves the waker to then non-asserted state and returns whether it was
   446  // asserted before being cleared.
   447  //
   448  // N.B. The waker isn't removed from the "ready" list of a sleeper (if it
   449  // happens to be in one), but the sleeper will notice that it is not asserted
   450  // anymore and won't return it to the caller.
   451  func (w *Waker) Clear() bool {
   452  	// Nothing to do if the waker is not asserted. This check allows us to
   453  	// complete this case (already not asserted) without any interlocked
   454  	// operations on x86.
   455  	if atomic.LoadPointer(&w.s) != usleeper(&assertedSleeper) {
   456  		return false
   457  	}
   458  
   459  	// Try to store nil in the sleeper, which indicates that the waker is
   460  	// not asserted.
   461  	return atomic.CompareAndSwapPointer(&w.s, usleeper(&assertedSleeper), nil)
   462  }
   463  
   464  // IsAsserted returns whether the waker is currently asserted (i.e., if it's
   465  // currently in a state that would cause its matching sleeper to wake up).
   466  func (w *Waker) IsAsserted() bool {
   467  	return (*Sleeper)(atomic.LoadPointer(&w.s)) == &assertedSleeper
   468  }
   469  
   470  func usleeper(s *Sleeper) unsafe.Pointer {
   471  	return unsafe.Pointer(s)
   472  }
   473  
   474  func uwaker(w *Waker) unsafe.Pointer {
   475  	return unsafe.Pointer(w)
   476  }