github.com/aaabigfish/gopkg@v1.1.0/syncx/pool.go (about)

     1  // Copyright 2021 ByteDance Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  //go:build !race
    16  // +build !race
    17  
    18  package syncx
    19  
    20  import (
    21  	"runtime"
    22  	"sync"
    23  	"sync/atomic"
    24  	"unsafe"
    25  
    26  	"github.com/aaabigfish/gopkg/internal/runtimex"
    27  )
    28  
    29  type Pool struct {
    30  	noCopy noCopy
    31  
    32  	local     unsafe.Pointer // local fixed-size per-P pool, actual type is [P]poolLocal
    33  	localSize uintptr        // size of the local array
    34  
    35  	newSize int32 // mark every time New is executed
    36  	gcSize  int32 // recommended number of gc
    37  
    38  	// New optionally specifies a function to generate
    39  	// a value when Get would otherwise return nil.
    40  	// It may not be changed concurrently with calls to Get.
    41  	New func() interface{}
    42  	// NoGC any objects in this Pool.
    43  	NoGC bool
    44  }
    45  
    46  // noCopy may be embedded into structs which must not be copied
    47  // after the first use.
    48  //
    49  // See https://golang.org/issues/8005#issuecomment-190753527
    50  // for details.
    51  type noCopy struct{}
    52  
    53  // Lock is a no-op used by -copylocks checker from `go vet`.
    54  func (*noCopy) Lock()   {}
    55  func (*noCopy) Unlock() {}
    56  
    57  const blockSize = 256
    58  
    59  type block [blockSize]interface{}
    60  
    61  const shared = 0
    62  const unused = 1
    63  
    64  // Local per-P Pool appendix.
    65  type poolLocalInternal struct {
    66  	pidx    int    // idx of private
    67  	private *block // Can be used only by the respective P.
    68  	// Local P can pushHead/popHead; any P can popTail.
    69  	// 1 is shared, 2 is unused
    70  	shared [2]poolChain
    71  }
    72  
    73  type poolLocal struct {
    74  	poolLocalInternal
    75  	// Prevents false sharing on widespread platforms with
    76  	// 128 mod (cache line size) = 0 .
    77  	pad [128 - unsafe.Sizeof(poolLocalInternal{})%128]byte
    78  }
    79  
    80  // Put adds x to the pool.
    81  func (p *Pool) Put(x interface{}) {
    82  	if x == nil {
    83  		return
    84  	}
    85  	l, pid := p.pin()
    86  	if l.pidx >= blockSize {
    87  		l.shared[shared].pushHead(l.private)
    88  		l.pidx, l.private = 0, nil
    89  	}
    90  	if l.private == nil {
    91  		l.private, _ = l.shared[unused].popHead()
    92  		if l.private == nil {
    93  			l.private = p.getSlow(pid, unused)
    94  		}
    95  		if l.private == nil {
    96  			l.private = &block{}
    97  		}
    98  	}
    99  	l.private[l.pidx] = x
   100  	l.pidx++
   101  	x = nil
   102  	runtimex.Unpin()
   103  }
   104  
   105  // Get selects an arbitrary item from the Pool, removes it from the
   106  // Pool, and returns it to the caller.
   107  // Get may choose to ignore the pool and treat it as empty.
   108  // Callers should not assume any relation between values passed to Put and
   109  // the values returned by Get.
   110  //
   111  // If Get would otherwise return nil and p.New is non-nil, Get returns
   112  // the result of calling p.New.
   113  func (p *Pool) Get() (x interface{}) {
   114  	l, pid := p.pin()
   115  	if l.pidx > 0 {
   116  		l.pidx--
   117  		x = l.private[l.pidx]
   118  		l.private[l.pidx] = nil
   119  	}
   120  	if x == nil {
   121  		// Try to pop the head of the local shard. We prefer
   122  		// the head over the tail for temporal locality of
   123  		// reuse.
   124  		var b, _ = l.shared[shared].popHead()
   125  		if b == nil {
   126  			b = p.getSlow(pid, shared)
   127  		}
   128  		if b != nil {
   129  			if l.private != nil {
   130  				l.shared[unused].pushHead(l.private)
   131  			}
   132  			l.private = b
   133  			l.pidx = blockSize - 1
   134  			x = l.private[l.pidx]
   135  			l.private[l.pidx] = nil
   136  		}
   137  	}
   138  	runtimex.Unpin()
   139  	if x == nil && p.New != nil {
   140  		atomic.AddInt32(&p.newSize, 1)
   141  		x = p.New()
   142  	}
   143  	return x
   144  }
   145  
   146  func (p *Pool) getSlow(pid int, idx int) *block {
   147  	// See the comment in pin regarding ordering of the loads.
   148  	size := atomic.LoadUintptr(&p.localSize) // load-acquire
   149  	locals := p.local                        // load-consume
   150  	// Try to steal one element from other procs.
   151  	for i := 0; i < int(size); i++ {
   152  		l := indexLocal(locals, (pid+i+1)%int(size))
   153  		if x, _ := l.shared[idx].popTail(); x != nil {
   154  			return x
   155  		}
   156  	}
   157  	return nil
   158  }
   159  
   160  // pin pins the current goroutine to P, disables preemption and
   161  // returns poolLocal pool for the P and the P's id.
   162  // Caller must call runtime_procUnpin() when done with the pool.
   163  func (p *Pool) pin() (*poolLocal, int) {
   164  	pid := runtimex.Pin()
   165  	// In pinSlow we store to local and then to localSize, here we load in opposite order.
   166  	// Since we've disabled preemption, GC cannot happen in between.
   167  	// Thus here we must observe local at least as large localSize.
   168  	// We can observe a newSize/larger local, it is fine (we must observe its zero-initialized-ness).
   169  	s := atomic.LoadUintptr(&p.localSize) // load-acquire
   170  	l := p.local                          // load-consume
   171  	if uintptr(pid) < s {
   172  		return indexLocal(l, pid), pid
   173  	}
   174  	return p.pinSlow()
   175  }
   176  
   177  func (p *Pool) pinSlow() (*poolLocal, int) {
   178  	// Retry under the mutex.
   179  	// Can not lock the mutex while pinned.
   180  	runtimex.Unpin()
   181  	allPoolsMu.Lock()
   182  	defer allPoolsMu.Unlock()
   183  	pid := runtimex.Pin()
   184  	// poolCleanup won't be called while we are pinned.
   185  	s := p.localSize
   186  	l := p.local
   187  	if uintptr(pid) < s {
   188  		return indexLocal(l, pid), pid
   189  	}
   190  	if p.local == nil {
   191  		allPools = append(allPools, p)
   192  	}
   193  	// If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
   194  	size := runtime.GOMAXPROCS(0)
   195  	local := make([]poolLocal, size)
   196  	atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release
   197  	atomic.StoreUintptr(&p.localSize, uintptr(size))         // store-release
   198  	return &local[pid], pid
   199  }
   200  
   201  // GC will follow these rules:
   202  // 1. Mark the tag `newSize`, if `newSize` exists this time, skip GC.
   203  // 2. Calculate the current size, mark as `newSize`.
   204  // 3. if `newSize`  < `oldSize`, skip GC.
   205  // 4. GC size is oldSize/2, the real GC is to throw away a few poolLocals directly.
   206  func (p *Pool) gc() {
   207  	if p.NoGC {
   208  		return
   209  	}
   210  	// 1. check newSize
   211  	if p.newSize > 0 {
   212  		p.newSize = 0
   213  		return
   214  	}
   215  	var newSize int32
   216  	for i := 0; i < int(p.localSize); i++ {
   217  		l := indexLocal(p.local, i)
   218  		newSize += l.shared[shared].size
   219  	}
   220  	// 2. if new < old; old = new
   221  	if newSize < p.gcSize {
   222  		p.gcSize = newSize
   223  		return
   224  	}
   225  	// 3. if new < procs; return
   226  	if newSize <= int32(p.localSize) {
   227  		p.gcSize = newSize
   228  		return
   229  	}
   230  	// 4. gc old/2
   231  	var gcSize int32
   232  	for i := 0; i < int(p.localSize) && gcSize < p.gcSize/2; i++ {
   233  		l := indexLocal(p.local, i)
   234  		gcSize += l.shared[shared].size
   235  		l.shared[shared].size, l.shared[shared].head, l.shared[shared].tail = 0, nil, nil
   236  		l.shared[unused].size, l.shared[unused].head, l.shared[unused].tail = 0, nil, nil
   237  	}
   238  	p.gcSize = newSize - gcSize
   239  }
   240  
   241  var (
   242  	allPoolsMu sync.Mutex
   243  	period     int
   244  	// allPools is the set of pools that have non-empty primary
   245  	// caches. Protected by either 1) allPoolsMu and pinning or 2)
   246  	// STW.
   247  	allPools []*Pool
   248  )
   249  
   250  // GC will be executed every 4 cycles
   251  func poolCleanup() {
   252  	runtime_poolCleanup()
   253  	period++
   254  	if period&0x4 == 0 {
   255  		return
   256  	}
   257  	// This function is called with the world stopped, at the beginning of a garbage collection.
   258  	// It must not allocate and probably should not call any runtime functions.
   259  
   260  	// Because the world is stopped, no pool user can be in a
   261  	// pinned section (in effect, this has all Ps pinned).
   262  
   263  	// Move primary cache to victim cache.
   264  	for _, p := range allPools {
   265  		p.gc()
   266  	}
   267  }
   268  
   269  func init() {
   270  	// FIXME: The linkname here is risky.
   271  	//  If Go renames these func officially, we need to synchronize here, otherwise it may cause OOM.
   272  	runtime_registerPoolCleanup(poolCleanup)
   273  }
   274  
   275  func indexLocal(l unsafe.Pointer, i int) *poolLocal {
   276  	lp := unsafe.Pointer(uintptr(l) + uintptr(i)*unsafe.Sizeof(poolLocal{}))
   277  	return (*poolLocal)(lp)
   278  }