github.com/songzhibin97/gkit@v1.2.13/sys/syncx/pool.go (about)

     1  //go:build !race
     2  // +build !race
     3  
     4  package syncx
     5  
     6  import (
     7  	"github.com/songzhibin97/gkit/internal/runtimex"
     8  	"runtime"
     9  	"sync"
    10  	"sync/atomic"
    11  	"unsafe"
    12  )
    13  
    14  type Pool struct {
    15  	noCopy noCopy
    16  
    17  	local     unsafe.Pointer // local fixed-size per-P pool, actual type is [P]poolLocal
    18  	localSize uintptr        // size of the local array
    19  
    20  	newSize int32 // mark every time New is executed
    21  	gcSize  int32 // recommended number of gc
    22  
    23  	// New optionally specifies a function to generate
    24  	// a value when Get would otherwise return nil.
    25  	// It may not be changed concurrently with calls to Get.
    26  	New func() interface{}
    27  	// NoGC any objects in this Pool.
    28  	NoGC bool
    29  }
    30  
    31  // noCopy may be embedded into structs which must not be copied
    32  // after the first use.
    33  //
    34  // See https://golang.org/issues/8005#issuecomment-190753527
    35  // for details.
    36  type noCopy struct{}
    37  
    38  // Lock is a no-op used by -copylocks checker from `go vet`.
    39  func (*noCopy) Lock()   {}
    40  func (*noCopy) Unlock() {}
    41  
    42  const blockSize = 256
    43  
    44  type block [blockSize]interface{}
    45  
    46  const shared = 0
    47  const unused = 1
    48  
    49  // Local per-P Pool appendix.
    50  type poolLocalInternal struct {
    51  	pidx    int    // idx of private
    52  	private *block // Can be used only by the respective P.
    53  	// Local P can pushHead/popHead; any P can popTail.
    54  	// 1 is shared, 2 is unused
    55  	shared [2]poolChain
    56  }
    57  
    58  type poolLocal struct {
    59  	poolLocalInternal
    60  	// Prevents false sharing on widespread platforms with
    61  	// 128 mod (cache line size) = 0 .
    62  	pad [128 - unsafe.Sizeof(poolLocalInternal{})%128]byte
    63  }
    64  
    65  // Put adds x to the pool.
    66  func (p *Pool) Put(x interface{}) {
    67  	if x == nil {
    68  		return
    69  	}
    70  	l, pid := p.pin()
    71  	if l.pidx >= blockSize {
    72  		l.shared[shared].pushHead(l.private)
    73  		l.pidx, l.private = 0, nil
    74  	}
    75  	if l.private == nil {
    76  		l.private, _ = l.shared[unused].popHead()
    77  		if l.private == nil {
    78  			l.private = p.getSlow(pid, unused)
    79  		}
    80  		if l.private == nil {
    81  			l.private = &block{}
    82  		}
    83  	}
    84  	l.private[l.pidx] = x
    85  	l.pidx++
    86  	x = nil
    87  	runtimex.Unpin()
    88  }
    89  
    90  // Get selects an arbitrary item from the Pool, removes it from the
    91  // Pool, and returns it to the caller.
    92  // Get may choose to ignore the pool and treat it as empty.
    93  // Callers should not assume any relation between values passed to Put and
    94  // the values returned by Get.
    95  //
    96  // If Get would otherwise return nil and p.New is non-nil, Get returns
    97  // the result of calling p.New.
    98  func (p *Pool) Get() (x interface{}) {
    99  	l, pid := p.pin()
   100  	if l.pidx > 0 {
   101  		l.pidx--
   102  		x = l.private[l.pidx]
   103  		l.private[l.pidx] = nil
   104  	}
   105  	if x == nil {
   106  		// Try to pop the head of the local shard. We prefer
   107  		// the head over the tail for temporal locality of
   108  		// reuse.
   109  		var b, _ = l.shared[shared].popHead()
   110  		if b == nil {
   111  			b = p.getSlow(pid, shared)
   112  		}
   113  		if b != nil {
   114  			if l.private != nil {
   115  				l.shared[unused].pushHead(l.private)
   116  			}
   117  			l.private = b
   118  			l.pidx = blockSize - 1
   119  			x = l.private[l.pidx]
   120  			l.private[l.pidx] = nil
   121  		}
   122  	}
   123  	runtimex.Unpin()
   124  	if x == nil && p.New != nil {
   125  		atomic.AddInt32(&p.newSize, 1)
   126  		x = p.New()
   127  	}
   128  	return x
   129  }
   130  
   131  func (p *Pool) getSlow(pid int, idx int) *block {
   132  	// See the comment in pin regarding ordering of the loads.
   133  	size := atomic.LoadUintptr(&p.localSize) // load-acquire
   134  	locals := p.local                        // load-consume
   135  	// Try to steal one element from other procs.
   136  	for i := 0; i < int(size); i++ {
   137  		l := indexLocal(locals, (pid+i+1)%int(size))
   138  		if x, _ := l.shared[idx].popTail(); x != nil {
   139  			return x
   140  		}
   141  	}
   142  	return nil
   143  }
   144  
   145  // pin pins the current goroutine to P, disables preemption and
   146  // returns poolLocal pool for the P and the P's id.
   147  // Caller must call runtime_procUnpin() when done with the pool.
   148  func (p *Pool) pin() (*poolLocal, int) {
   149  	pid := runtimex.Pin()
   150  	// In pinSlow we store to local and then to localSize, here we load in opposite order.
   151  	// Since we've disabled preemption, GC cannot happen in between.
   152  	// Thus here we must observe local at least as large localSize.
   153  	// We can observe a newSize/larger local, it is fine (we must observe its zero-initialized-ness).
   154  	s := atomic.LoadUintptr(&p.localSize) // load-acquire
   155  	l := p.local                          // load-consume
   156  	if uintptr(pid) < s {
   157  		return indexLocal(l, pid), pid
   158  	}
   159  	return p.pinSlow()
   160  }
   161  
   162  func (p *Pool) pinSlow() (*poolLocal, int) {
   163  	// Retry under the mutex.
   164  	// Can not lock the mutex while pinned.
   165  	runtimex.Unpin()
   166  	allPoolsMu.Lock()
   167  	defer allPoolsMu.Unlock()
   168  	pid := runtimex.Pin()
   169  	// poolCleanup won't be called while we are pinned.
   170  	s := p.localSize
   171  	l := p.local
   172  	if uintptr(pid) < s {
   173  		return indexLocal(l, pid), pid
   174  	}
   175  	if p.local == nil {
   176  		allPools = append(allPools, p)
   177  	}
   178  	// If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
   179  	size := runtime.GOMAXPROCS(0)
   180  	local := make([]poolLocal, size)
   181  	atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release
   182  	atomic.StoreUintptr(&p.localSize, uintptr(size))         // store-release
   183  	return &local[pid], pid
   184  }
   185  
   186  // GC will follow these rules:
   187  // 1. Mark the tag `newSize`, if `newSize` exists this time, skip GC.
   188  // 2. Calculate the current size, mark as `newSize`.
   189  // 3. if `newSize`  < `oldSize`, skip GC.
   190  // 4. GC size is oldSize/2, the real GC is to throw away a few poolLocals directly.
   191  func (p *Pool) gc() {
   192  	if p.NoGC {
   193  		return
   194  	}
   195  	// 1. check newSize
   196  	if p.newSize > 0 {
   197  		p.newSize = 0
   198  		return
   199  	}
   200  	var newSize int32
   201  	for i := 0; i < int(p.localSize); i++ {
   202  		l := indexLocal(p.local, i)
   203  		newSize += l.shared[shared].size
   204  	}
   205  	// 2. if new < old; old = new
   206  	if newSize < p.gcSize {
   207  		p.gcSize = newSize
   208  		return
   209  	}
   210  	// 3. if new < procs; return
   211  	if newSize <= int32(p.localSize) {
   212  		p.gcSize = newSize
   213  		return
   214  	}
   215  	// 4. gc old/2
   216  	var gcSize int32
   217  	for i := 0; i < int(p.localSize) && gcSize < p.gcSize/2; i++ {
   218  		l := indexLocal(p.local, i)
   219  		gcSize += l.shared[shared].size
   220  		l.shared[shared].size, l.shared[shared].head, l.shared[shared].tail = 0, nil, nil
   221  		l.shared[unused].size, l.shared[unused].head, l.shared[unused].tail = 0, nil, nil
   222  	}
   223  	p.gcSize = newSize - gcSize
   224  }
   225  
   226  var (
   227  	allPoolsMu sync.Mutex
   228  	period     int
   229  	// allPools is the set of pools that have non-empty primary
   230  	// caches. Protected by either 1) allPoolsMu and pinning or 2)
   231  	// STW.
   232  	allPools []*Pool
   233  )
   234  
   235  // GC will be executed every 4 cycles
   236  func poolCleanup() {
   237  	runtime_poolCleanup()
   238  	period++
   239  	if period&0x4 == 0 {
   240  		return
   241  	}
   242  	// This function is called with the world stopped, at the beginning of a garbage collection.
   243  	// It must not allocate and probably should not call any runtime functions.
   244  
   245  	// Because the world is stopped, no pool user can be in a
   246  	// pinned section (in effect, this has all Ps pinned).
   247  
   248  	// Move primary cache to victim cache.
   249  	for _, p := range allPools {
   250  		p.gc()
   251  	}
   252  }
   253  
   254  func init() {
   255  	// FIXME: The linkname here is risky.
   256  	//  If Go renames these func officially, we need to synchronize here, otherwise it may cause OOM.
   257  	runtime_registerPoolCleanup(poolCleanup)
   258  }
   259  
   260  func indexLocal(l unsafe.Pointer, i int) *poolLocal {
   261  	lp := unsafe.Pointer(uintptr(l) + uintptr(i)*unsafe.Sizeof(poolLocal{}))
   262  	return (*poolLocal)(lp)
   263  }