gvisor.dev/gvisor@v0.0.0-20240520182842-f9d4d51c7e0f/pkg/sync/seqatomic/generic_seqatomic_unsafe.go (about)

     1  // Copyright 2019 The gVisor Authors.
     2  //
     3  // Use of this source code is governed by a BSD-style
     4  // license that can be found in the LICENSE file.
     5  
     6  // Package seqatomic doesn't exist. This file must be instantiated using the
     7  // go_template_instance rule in tools/go_generics/defs.bzl.
     8  package seqatomic
     9  
    10  import (
    11  	"unsafe"
    12  
    13  	"gvisor.dev/gvisor/pkg/gohacks"
    14  	"gvisor.dev/gvisor/pkg/sync"
    15  )
    16  
    17  // Value is a required type parameter.
    18  type Value struct{}
    19  
    20  // SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race
    21  // with any writer critical sections in seq.
    22  //
    23  //go:nosplit
    24  func SeqAtomicLoad(seq *sync.SeqCount, ptr *Value) Value {
    25  	for {
    26  		if val, ok := SeqAtomicTryLoad(seq, seq.BeginRead(), ptr); ok {
    27  			return val
    28  		}
    29  	}
    30  }
    31  
    32  // SeqAtomicTryLoad returns a copy of *ptr while in a reader critical section
    33  // in seq initiated by a call to seq.BeginRead() that returned epoch. If the
    34  // read would race with a writer critical section, SeqAtomicTryLoad returns
    35  // (unspecified, false).
    36  //
    37  //go:nosplit
    38  func SeqAtomicTryLoad(seq *sync.SeqCount, epoch sync.SeqCountEpoch, ptr *Value) (val Value, ok bool) {
    39  	if sync.RaceEnabled {
    40  		// runtime.RaceDisable() doesn't actually stop the race detector, so it
    41  		// can't help us here. Instead, call runtime.memmove directly, which is
    42  		// not instrumented by the race detector.
    43  		gohacks.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
    44  	} else {
    45  		// This is ~40% faster for short reads than going through memmove.
    46  		val = *ptr
    47  	}
    48  	ok = seq.ReadOk(epoch)
    49  	return
    50  }
    51  
    52  // SeqAtomicStore sets *ptr to a copy of val, ensuring that any racing reader
    53  // critical sections are forced to retry.
    54  //
    55  //go:nosplit
    56  func SeqAtomicStore(seq *sync.SeqCount, ptr *Value, val Value) {
    57  	seq.BeginWrite()
    58  	SeqAtomicStoreSeqed(ptr, val)
    59  	seq.EndWrite()
    60  }
    61  
    62  // SeqAtomicStoreSeqed sets *ptr to a copy of val.
    63  //
    64  // Preconditions: ptr is protected by a SeqCount that will be in a writer
    65  // critical section throughout the call to SeqAtomicStore.
    66  //
    67  //go:nosplit
    68  func SeqAtomicStoreSeqed(ptr *Value, val Value) {
    69  	if sync.RaceEnabled {
    70  		gohacks.Memmove(unsafe.Pointer(ptr), unsafe.Pointer(&val), unsafe.Sizeof(val))
    71  	} else {
    72  		*ptr = val
    73  	}
    74  }