github.com/songzhibin97/go-baseutils@v0.0.2-0.20240302024150-487d8ce9c082/sys/syncx/pool.go (about)

     1  //go:build !race
     2  // +build !race
     3  
     4  package syncx
     5  
     6  import (
     7  	"runtime"
     8  	"sync"
     9  	"sync/atomic"
    10  	"unsafe"
    11  
    12  	"github.com/songzhibin97/go-baseutils/internal/runtimex"
    13  )
    14  
    15  type Pool struct {
    16  	noCopy noCopy
    17  
    18  	local     unsafe.Pointer // local fixed-size per-P pool, actual type is [P]poolLocal
    19  	localSize uintptr        // size of the local array
    20  
    21  	newSize int32 // mark every time New is executed
    22  	gcSize  int32 // recommended number of gc
    23  
    24  	// New optionally specifies a function to generate
    25  	// a value when Get would otherwise return nil.
    26  	// It may not be changed concurrently with calls to Get.
    27  	New func() interface{}
    28  	// NoGC any objects in this Pool.
    29  	NoGC bool
    30  }
    31  
    32  // noCopy may be embedded into structs which must not be copied
    33  // after the first use.
    34  //
    35  // See https://golang.org/issues/8005#issuecomment-190753527
    36  // for details.
    37  type noCopy struct{}
    38  
    39  // Lock is a no-op used by -copylocks checker from `go vet`.
    40  func (*noCopy) Lock() {}
    41  
    42  func (*noCopy) Unlock() {}
    43  
    44  const blockSize = 256
    45  
    46  type block [blockSize]interface{}
    47  
    48  const shared = 0
    49  
    50  const unused = 1
    51  
    52  // Local per-P Pool appendix.
    53  type poolLocalInternal struct {
    54  	pidx    int    // idx of private
    55  	private *block // Can be used only by the respective P.
    56  	// Local P can pushHead/popHead; any P can popTail.
    57  	// 1 is shared, 2 is unused
    58  	shared [2]poolChain
    59  }
    60  
    61  type poolLocal struct {
    62  	poolLocalInternal
    63  	// Prevents false sharing on widespread platforms with
    64  	// 128 mod (cache line size) = 0 .
    65  	pad [128 - unsafe.Sizeof(poolLocalInternal{})%128]byte
    66  }
    67  
    68  // Put adds x to the pool.
    69  func (p *Pool) Put(x interface{}) {
    70  	if x == nil {
    71  		return
    72  	}
    73  	l, pid := p.pin()
    74  	if l.pidx >= blockSize {
    75  		l.shared[shared].pushHead(l.private)
    76  		l.pidx, l.private = 0, nil
    77  	}
    78  	if l.private == nil {
    79  		l.private, _ = l.shared[unused].popHead()
    80  		if l.private == nil {
    81  			l.private = p.getSlow(pid, unused)
    82  		}
    83  		if l.private == nil {
    84  			l.private = &block{}
    85  		}
    86  	}
    87  	l.private[l.pidx] = x
    88  	l.pidx++
    89  	x = nil
    90  	runtimex.Unpin()
    91  }
    92  
    93  // Get selects an arbitrary item from the Pool, removes it from the
    94  // Pool, and returns it to the caller.
    95  // Get may choose to ignore the pool and treat it as empty.
    96  // Callers should not assume any relation between values passed to Put and
    97  // the values returned by Get.
    98  //
    99  // If Get would otherwise return nil and p.New is non-nil, Get returns
   100  // the result of calling p.New.
   101  func (p *Pool) Get() (x interface{}) {
   102  	l, pid := p.pin()
   103  	if l.pidx > 0 {
   104  		l.pidx--
   105  		x = l.private[l.pidx]
   106  		l.private[l.pidx] = nil
   107  	}
   108  	if x == nil {
   109  		// Try to pop the head of the local shard. We prefer
   110  		// the head over the tail for temporal locality of
   111  		// reuse.
   112  		var b, _ = l.shared[shared].popHead()
   113  		if b == nil {
   114  			b = p.getSlow(pid, shared)
   115  		}
   116  		if b != nil {
   117  			if l.private != nil {
   118  				l.shared[unused].pushHead(l.private)
   119  			}
   120  			l.private = b
   121  			l.pidx = blockSize - 1
   122  			x = l.private[l.pidx]
   123  			l.private[l.pidx] = nil
   124  		}
   125  	}
   126  	runtimex.Unpin()
   127  	if x == nil && p.New != nil {
   128  		atomic.AddInt32(&p.newSize, 1)
   129  		x = p.New()
   130  	}
   131  	return x
   132  }
   133  
   134  func (p *Pool) getSlow(pid int, idx int) *block {
   135  	// See the comment in pin regarding ordering of the loads.
   136  	size := atomic.LoadUintptr(&p.localSize) // load-acquire
   137  	locals := p.local                        // load-consume
   138  	// Try to steal one element from other procs.
   139  	for i := 0; i < int(size); i++ {
   140  		l := indexLocal(locals, (pid+i+1)%int(size))
   141  		if x, _ := l.shared[idx].popTail(); x != nil {
   142  			return x
   143  		}
   144  	}
   145  	return nil
   146  }
   147  
   148  // pin pins the current goroutine to P, disables preemption and
   149  // returns poolLocal pool for the P and the P's id.
   150  // Caller must call runtime_procUnpin() when done with the pool.
   151  func (p *Pool) pin() (*poolLocal, int) {
   152  	pid := runtimex.Pin()
   153  	// In pinSlow we store to local and then to localSize, here we load in opposite order.
   154  	// Since we've disabled preemption, GC cannot happen in between.
   155  	// Thus here we must observe local at least as large localSize.
   156  	// We can observe a newSize/larger local, it is fine (we must observe its zero-initialized-ness).
   157  	s := atomic.LoadUintptr(&p.localSize) // load-acquire
   158  	l := p.local                          // load-consume
   159  	if uintptr(pid) < s {
   160  		return indexLocal(l, pid), pid
   161  	}
   162  	return p.pinSlow()
   163  }
   164  
   165  func (p *Pool) pinSlow() (*poolLocal, int) {
   166  	// Retry under the mutex.
   167  	// Can not lock the mutex while pinned.
   168  	runtimex.Unpin()
   169  	allPoolsMu.Lock()
   170  	defer allPoolsMu.Unlock()
   171  	pid := runtimex.Pin()
   172  	// poolCleanup won't be called while we are pinned.
   173  	s := p.localSize
   174  	l := p.local
   175  	if uintptr(pid) < s {
   176  		return indexLocal(l, pid), pid
   177  	}
   178  	if p.local == nil {
   179  		allPools = append(allPools, p)
   180  	}
   181  	// If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
   182  	size := runtime.GOMAXPROCS(0)
   183  	local := make([]poolLocal, size)
   184  	atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release
   185  	atomic.StoreUintptr(&p.localSize, uintptr(size))         // store-release
   186  	return &local[pid], pid
   187  }
   188  
   189  // GC will follow these rules:
   190  // 1. Mark the tag `newSize`, if `newSize` exists this time, skip GC.
   191  // 2. Calculate the current size, mark as `newSize`.
   192  // 3. if `newSize`  < `oldSize`, skip GC.
   193  // 4. GC size is oldSize/2, the real GC is to throw away a few poolLocals directly.
   194  func (p *Pool) gc() {
   195  	if p.NoGC {
   196  		return
   197  	}
   198  	// 1. check newSize
   199  	if p.newSize > 0 {
   200  		p.newSize = 0
   201  		return
   202  	}
   203  	var newSize int32
   204  	for i := 0; i < int(p.localSize); i++ {
   205  		l := indexLocal(p.local, i)
   206  		newSize += l.shared[shared].size
   207  	}
   208  	// 2. if new < old; old = new
   209  	if newSize < p.gcSize {
   210  		p.gcSize = newSize
   211  		return
   212  	}
   213  	// 3. if new < procs; return
   214  	if newSize <= int32(p.localSize) {
   215  		p.gcSize = newSize
   216  		return
   217  	}
   218  	// 4. gc old/2
   219  	var gcSize int32
   220  	for i := 0; i < int(p.localSize) && gcSize < p.gcSize/2; i++ {
   221  		l := indexLocal(p.local, i)
   222  		gcSize += l.shared[shared].size
   223  		l.shared[shared].size, l.shared[shared].head, l.shared[shared].tail = 0, nil, nil
   224  		l.shared[unused].size, l.shared[unused].head, l.shared[unused].tail = 0, nil, nil
   225  	}
   226  	p.gcSize = newSize - gcSize
   227  }
   228  
   229  var (
   230  	allPoolsMu sync.Mutex
   231  	period     int
   232  	// allPools is the set of pools that have non-empty primary
   233  	// caches. Protected by either 1) allPoolsMu and pinning or 2)
   234  	// STW.
   235  	allPools []*Pool
   236  )
   237  
   238  // GC will be executed every 4 cycles
   239  func poolCleanup() {
   240  	runtime_poolCleanup()
   241  	period++
   242  	if period&0x4 == 0 {
   243  		return
   244  	}
   245  	// This function is called with the world stopped, at the beginning of a garbage collection.
   246  	// It must not allocate and probably should not call any runtime functions.
   247  
   248  	// Because the world is stopped, no pool user can be in a
   249  	// pinned section (in effect, this has all Ps pinned).
   250  
   251  	// Move primary cache to victim cache.
   252  	for _, p := range allPools {
   253  		p.gc()
   254  	}
   255  }
   256  
   257  func init() {
   258  	// FIXME: The linkname here is risky.
   259  	//  If Go renames these func officially, we need to synchronize here, otherwise it may cause OOM.
   260  	runtime_registerPoolCleanup(poolCleanup)
   261  }
   262  
   263  func indexLocal(l unsafe.Pointer, i int) *poolLocal {
   264  	lp := unsafe.Pointer(uintptr(l) + uintptr(i)*unsafe.Sizeof(poolLocal{}))
   265  	return (*poolLocal)(lp)
   266  }