github.com/matrixorigin/matrixone@v1.2.0/pkg/common/mpool/mpool.go (about)

     1  // Copyright 2021 - 2022 Matrix Origin
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package mpool
    16  
    17  import (
    18  	"fmt"
    19  	"runtime/debug"
    20  	"strings"
    21  	"sync"
    22  	"sync/atomic"
    23  	"time"
    24  	"unsafe"
    25  
    26  	"github.com/matrixorigin/matrixone/pkg/common/moerr"
    27  	"github.com/matrixorigin/matrixone/pkg/logutil"
    28  	v2 "github.com/matrixorigin/matrixone/pkg/util/metric/v2"
    29  	"github.com/matrixorigin/matrixone/pkg/util/stack"
    30  )
    31  
    32  // Mo's extremely simple memory pool.
    33  
    34  // Stats
    35  type MPoolStats struct {
    36  	NumAlloc      atomic.Int64 // number of allocations
    37  	NumFree       atomic.Int64 // number of frees
    38  	NumAllocBytes atomic.Int64 // number of bytes allocated
    39  	NumFreeBytes  atomic.Int64 // number of bytes freed
    40  	NumCurrBytes  atomic.Int64 // current number of bytes
    41  	HighWaterMark atomic.Int64 // high water mark
    42  }
    43  
    44  func (s *MPoolStats) Report(tab string) string {
    45  	if s.HighWaterMark.Load() == 0 {
    46  		// empty, reduce noise.
    47  		return ""
    48  	}
    49  
    50  	ret := ""
    51  	ret += fmt.Sprintf("%s allocations : %d\n", tab, s.NumAlloc.Load())
    52  	ret += fmt.Sprintf("%s frees : %d\n", tab, s.NumFree.Load())
    53  	ret += fmt.Sprintf("%s alloc bytes : %d\n", tab, s.NumAllocBytes.Load())
    54  	ret += fmt.Sprintf("%s free bytes : %d\n", tab, s.NumFreeBytes.Load())
    55  	ret += fmt.Sprintf("%s current bytes : %d\n", tab, s.NumCurrBytes.Load())
    56  	ret += fmt.Sprintf("%s high water mark : %d\n", tab, s.HighWaterMark.Load())
    57  	return ret
    58  }
    59  
    60  func (s *MPoolStats) ReportJson() string {
    61  	if s.HighWaterMark.Load() == 0 {
    62  		return ""
    63  	}
    64  	ret := "{"
    65  	ret += fmt.Sprintf("\"alloc\": %d,", s.NumAlloc.Load())
    66  	ret += fmt.Sprintf("\"free\": %d,", s.NumFree.Load())
    67  	ret += fmt.Sprintf("\"allocBytes\": %d,", s.NumAllocBytes.Load())
    68  	ret += fmt.Sprintf("\"freeBytes\": %d,", s.NumFreeBytes.Load())
    69  	ret += fmt.Sprintf("\"currBytes\": %d,", s.NumCurrBytes.Load())
    70  	ret += fmt.Sprintf("\"highWaterMark\": %d", s.HighWaterMark.Load())
    71  	ret += "}"
    72  	return ret
    73  }
    74  
    75  // Update alloc stats, return curr bytes
    76  func (s *MPoolStats) RecordAlloc(tag string, sz int64) int64 {
    77  	s.NumAlloc.Add(1)
    78  	s.NumAllocBytes.Add(sz)
    79  	curr := s.NumCurrBytes.Add(sz)
    80  	hwm := s.HighWaterMark.Load()
    81  	if curr > hwm {
    82  		swapped := s.HighWaterMark.CompareAndSwap(hwm, curr)
    83  		if swapped && curr/GB != hwm/GB {
    84  			logutil.Infof("MPool %s new high watermark\n%s", tag, s.Report("    "))
    85  		}
    86  	}
    87  	return curr
    88  }
    89  
    90  // Update free stats, return curr bytes.
    91  func (s *MPoolStats) RecordFree(tag string, sz int64) int64 {
    92  	if sz < 0 {
    93  		logutil.Errorf("Mpool %s free bug, stats: %s", tag, s.Report("    "))
    94  		panic(moerr.NewInternalErrorNoCtx("mpool freed -1"))
    95  	}
    96  	s.NumFree.Add(1)
    97  	s.NumFreeBytes.Add(sz)
    98  	curr := s.NumCurrBytes.Add(-sz)
    99  	if curr < 0 {
   100  		logutil.Errorf("Mpool %s free bug, stats: %s", tag, s.Report("    "))
   101  		panic(moerr.NewInternalErrorNoCtx("mpool freed more bytes than alloc"))
   102  	}
   103  	return curr
   104  }
   105  
   106  func (s *MPoolStats) RecordManyFrees(tag string, nfree, sz int64) int64 {
   107  	if sz < 0 {
   108  		logutil.Errorf("Mpool %s free bug, stats: %s", tag, s.Report("    "))
   109  		panic(moerr.NewInternalErrorNoCtx("mpool freed -1"))
   110  	}
   111  	s.NumFree.Add(nfree)
   112  	s.NumFreeBytes.Add(sz)
   113  	curr := s.NumCurrBytes.Add(-sz)
   114  	if curr < 0 {
   115  		logutil.Errorf("Mpool %s free many bug, stats: %s", tag, s.Report("    "))
   116  		panic(moerr.NewInternalErrorNoCtx("mpool freemany freed more bytes than alloc"))
   117  	}
   118  	return curr
   119  }
   120  
   121  const (
   122  	NumFixedPool = 5
   123  	kMemHdrSz    = 16
   124  	kStripeSize  = 128
   125  	B            = 1
   126  	KB           = 1024
   127  	MB           = 1024 * KB
   128  	GB           = 1024 * MB
   129  	TB           = 1024 * GB
   130  	PB           = 1024 * TB
   131  )
   132  
   133  // Pool emement size
   134  var PoolElemSize = [NumFixedPool]int32{64, 128, 256, 512, 1024}
   135  
   136  // Memory header, kMemHdrSz bytes.
   137  type memHdr struct {
   138  	poolId       int64
   139  	allocSz      int32
   140  	fixedPoolIdx int8
   141  	guard        [3]uint8
   142  }
   143  
   144  func init() {
   145  	if unsafe.Sizeof(memHdr{}) != kMemHdrSz {
   146  		panic("memory header size assertion failed")
   147  	}
   148  }
   149  
   150  func (pHdr *memHdr) SetGuard() {
   151  	pHdr.guard[0] = 0xDE
   152  	pHdr.guard[1] = 0xAD
   153  	pHdr.guard[2] = 0xBF
   154  }
   155  
   156  func (pHdr *memHdr) CheckGuard() bool {
   157  	return pHdr.guard[0] == 0xDE && pHdr.guard[1] == 0xAD && pHdr.guard[2] == 0xBF
   158  }
   159  
   160  func (pHdr *memHdr) ToSlice(sz, cap int) []byte {
   161  	ptr := unsafe.Add(unsafe.Pointer(pHdr), kMemHdrSz)
   162  	bs := unsafe.Slice((*byte)(ptr), cap)
   163  	return bs[:sz]
   164  }
   165  
   166  // pool for fixed elements.  Note that we preconfigure the pool size.
   167  // We should consider implement some kind of growing logic.
   168  type fixedPool struct {
   169  	m      sync.Mutex
   170  	noLock bool
   171  	fpIdx  int8
   172  	poolId int64
   173  	eleSz  int32
   174  	// holds buffers allocated, it is not really used in alloc/free
   175  	// but hold here for bookkeeping.
   176  	buf   [][]byte
   177  	flist unsafe.Pointer
   178  }
   179  
   180  // Initaialze a fixed pool
   181  func (fp *fixedPool) initPool(tag string, poolid int64, idx int, noLock bool) {
   182  	eleSz := PoolElemSize[idx]
   183  	fp.poolId = poolid
   184  	fp.fpIdx = int8(idx)
   185  	fp.noLock = noLock
   186  	fp.eleSz = eleSz
   187  }
   188  
   189  func (fp *fixedPool) nextPtr(ptr unsafe.Pointer) unsafe.Pointer {
   190  	iptr := *(*unsafe.Pointer)(unsafe.Add(ptr, kMemHdrSz))
   191  	return iptr
   192  }
   193  func (fp *fixedPool) setNextPtr(ptr unsafe.Pointer, next unsafe.Pointer) {
   194  	iptr := (*unsafe.Pointer)(unsafe.Add(ptr, kMemHdrSz))
   195  	*iptr = next
   196  }
   197  
   198  func (fp *fixedPool) alloc(sz int32) *memHdr {
   199  	if !fp.noLock {
   200  		fp.m.Lock()
   201  		defer fp.m.Unlock()
   202  	}
   203  
   204  	if fp.flist == nil {
   205  		buf := make([]byte, kStripeSize*(fp.eleSz+kMemHdrSz))
   206  		fp.buf = append(fp.buf, buf)
   207  		// return the first one
   208  		ret := (unsafe.Pointer)(&buf[0])
   209  		pHdr := (*memHdr)(ret)
   210  		pHdr.poolId = fp.poolId
   211  		pHdr.allocSz = sz
   212  		pHdr.fixedPoolIdx = fp.fpIdx
   213  		pHdr.SetGuard()
   214  
   215  		ptr := unsafe.Add(ret, fp.eleSz+kMemHdrSz)
   216  		// and thread the rest
   217  		for i := 1; i < kStripeSize; i++ {
   218  			pHdr := (*memHdr)(ptr)
   219  			pHdr.poolId = fp.poolId
   220  			pHdr.allocSz = -1
   221  			pHdr.fixedPoolIdx = fp.fpIdx
   222  			pHdr.SetGuard()
   223  			fp.setNextPtr(ptr, fp.flist)
   224  			fp.flist = ptr
   225  			ptr = unsafe.Add(ptr, fp.eleSz+kMemHdrSz)
   226  		}
   227  		return (*memHdr)(ret)
   228  	} else {
   229  		ret := fp.flist
   230  		fp.flist = fp.nextPtr(fp.flist)
   231  		pHdr := (*memHdr)(ret)
   232  		pHdr.allocSz = sz
   233  		// Zero slice.  Go requires slice to be zeroed.
   234  		bs := unsafe.Slice((*byte)(unsafe.Add(ret, kMemHdrSz)), fp.eleSz)
   235  		// the compiler will optimize this loop to memclr
   236  		for i := range bs {
   237  			bs[i] = 0
   238  		}
   239  		return pHdr
   240  	}
   241  }
   242  
   243  func (fp *fixedPool) free(hdr *memHdr) {
   244  	if hdr.poolId != fp.poolId || hdr.fixedPoolIdx != fp.fpIdx ||
   245  		hdr.allocSz < 0 || hdr.allocSz > fp.eleSz ||
   246  		!hdr.CheckGuard() {
   247  		panic(moerr.NewInternalErrorNoCtx("mpool fixed pool hdr corruption.   Possible double free"))
   248  	}
   249  
   250  	if !fp.noLock {
   251  		fp.m.Lock()
   252  		defer fp.m.Unlock()
   253  	}
   254  	ptr := unsafe.Pointer(hdr)
   255  	fp.setNextPtr(ptr, fp.flist)
   256  	fp.flist = ptr
   257  }
   258  
   259  type detailInfo struct {
   260  	cnt, bytes int64
   261  }
   262  
   263  type mpoolDetails struct {
   264  	mu    sync.Mutex
   265  	alloc map[string]detailInfo
   266  	free  map[string]detailInfo
   267  }
   268  
   269  func newMpoolDetails() *mpoolDetails {
   270  	mpd := mpoolDetails{}
   271  	mpd.alloc = make(map[string]detailInfo)
   272  	mpd.free = make(map[string]detailInfo)
   273  	return &mpd
   274  }
   275  
   276  func (d *mpoolDetails) recordAlloc(nb int64) {
   277  	f := stack.Caller(2)
   278  	k := fmt.Sprintf("%v", f)
   279  	d.mu.Lock()
   280  	defer d.mu.Unlock()
   281  
   282  	info := d.alloc[k]
   283  	info.cnt += 1
   284  	info.bytes += nb
   285  	d.alloc[k] = info
   286  }
   287  
   288  func (d *mpoolDetails) recordFree(nb int64) {
   289  	f := stack.Caller(2)
   290  	k := fmt.Sprintf("%v", f)
   291  	d.mu.Lock()
   292  	defer d.mu.Unlock()
   293  
   294  	info := d.free[k]
   295  	info.cnt += 1
   296  	info.bytes += nb
   297  	d.free[k] = info
   298  }
   299  
   300  func (d *mpoolDetails) reportJson() string {
   301  	d.mu.Lock()
   302  	defer d.mu.Unlock()
   303  	ret := `{"alloc": {`
   304  	allocs := make([]string, 0)
   305  	for k, v := range d.alloc {
   306  		kvs := fmt.Sprintf("\"%s\": [%d, %d]", k, v.cnt, v.bytes)
   307  		allocs = append(allocs, kvs)
   308  	}
   309  	ret += strings.Join(allocs, ",")
   310  	ret += `}, "free": {`
   311  	frees := make([]string, 0)
   312  	for k, v := range d.free {
   313  		kvs := fmt.Sprintf("\"%s\": [%d, %d]", k, v.cnt, v.bytes)
   314  		frees = append(frees, kvs)
   315  	}
   316  	ret += strings.Join(frees, ",")
   317  	ret += "}}"
   318  	return ret
   319  }
   320  
   321  // The memory pool.
   322  type MPool struct {
   323  	id         int64      // mpool generated, used to look up the MPool
   324  	tag        string     // user supplied, for debug/inspect
   325  	cap        int64      // pool capacity
   326  	stats      MPoolStats // stats
   327  	noFixed    bool
   328  	noLock     bool
   329  	available  int32 // 0: available, 1: unavailable
   330  	inUseCount int32 // number of in use call
   331  	pools      [NumFixedPool]fixedPool
   332  	details    *mpoolDetails
   333  
   334  	// To remove: this thing is highly unlikely to be of any good use.
   335  	sels *sync.Pool
   336  }
   337  
   338  const (
   339  	NoFixed = 1
   340  	NoLock  = 2
   341  )
   342  
   343  const (
   344  	Available = iota
   345  	Unavailable
   346  )
   347  
   348  func (mp *MPool) PutSels(sels []int64) {
   349  	mp.sels.Put(&sels)
   350  }
   351  func (mp *MPool) GetSels() []int64 {
   352  	ss := mp.sels.Get().(*[]int64)
   353  	return (*ss)[:0]
   354  }
   355  
   356  func (mp *MPool) EnableDetailRecording() {
   357  	if mp.details == nil {
   358  		mp.details = newMpoolDetails()
   359  	}
   360  }
   361  
   362  func (mp *MPool) DisableDetailRecording() {
   363  	mp.details = nil
   364  }
   365  
   366  func (mp *MPool) Stats() *MPoolStats {
   367  	return &mp.stats
   368  }
   369  
   370  func (mp *MPool) Cap() int64 {
   371  	if mp.cap == 0 {
   372  		return PB
   373  	}
   374  	return mp.cap
   375  }
   376  
   377  func (mp *MPool) destroy() (succeed bool) {
   378  	if !atomic.CompareAndSwapInt32(&mp.available, Available, Unavailable) {
   379  		logutil.Errorf("Mpool %s double destroy", mp.tag)
   380  		return false
   381  	}
   382  	if atomic.LoadInt32(&mp.inUseCount) != 0 {
   383  		logutil.Errorf("Mpool %s was still in use", mp.tag)
   384  		atomic.StoreInt32(&mp.available, Available)
   385  		return false
   386  	}
   387  	if mp.stats.NumAlloc.Load() < mp.stats.NumFree.Load() {
   388  		logutil.Errorf("mp error: %s", mp.stats.Report(""))
   389  	}
   390  
   391  	// We do not call each individual fixedPool's destroy
   392  	// because they recorded pooled elements alloc/frees.
   393  	// Those are not reflected in globalStats.
   394  	// Here we just compensate whatever left over in mp.stats
   395  	// into globalStats.
   396  	globalStats.RecordManyFrees(mp.tag,
   397  		mp.stats.NumAlloc.Load()-mp.stats.NumFree.Load(),
   398  		mp.stats.NumCurrBytes.Load())
   399  	return true
   400  }
   401  
   402  // New a MPool.   Tag is user supplied, used for debugging/diagnostics.
   403  func NewMPool(tag string, cap int64, flag int) (*MPool, error) {
   404  	start := time.Now()
   405  	defer func() {
   406  		v2.TxnMpoolNewDurationHistogram.Observe(time.Since(start).Seconds())
   407  	}()
   408  
   409  	if cap > 0 {
   410  		// simple sanity check
   411  		if cap < 1024*1024 {
   412  			return nil, moerr.NewInternalErrorNoCtx("mpool cap %d too small", cap)
   413  		}
   414  		if cap > GlobalCap() {
   415  			return nil, moerr.NewInternalErrorNoCtx("mpool cap %d too big, global cap %d", cap, globalCap)
   416  		}
   417  	}
   418  
   419  	id := atomic.AddInt64(&nextPool, 1)
   420  	var mp MPool
   421  	mp.id = id
   422  	mp.tag = tag
   423  	mp.cap = cap
   424  
   425  	mp.noFixed = (flag & NoFixed) != 0
   426  	mp.noLock = (flag & NoFixed) != 0
   427  
   428  	if !mp.noFixed {
   429  		for i := 0; i < NumFixedPool; i++ {
   430  			mp.pools[i].initPool(mp.tag, mp.id, i, mp.noLock)
   431  		}
   432  	}
   433  
   434  	mp.sels = &sync.Pool{
   435  		New: func() any {
   436  			ss := make([]int64, 0, 16)
   437  			return &ss
   438  		},
   439  	}
   440  
   441  	globalPools.Store(id, &mp)
   442  	// logutil.Infof("creating mpool %s, cap %d, fixed size %v", tag, cap, sz)
   443  	return &mp, nil
   444  }
   445  
   446  func MustNew(tag string) *MPool {
   447  	mp, err := NewMPool(tag, 0, 0)
   448  	if err != nil {
   449  		panic(err)
   450  	}
   451  	return mp
   452  }
   453  
   454  func MustNewZero() *MPool {
   455  	return MustNew("must_new_zero")
   456  }
   457  
   458  func MustNewNoFixed(tag string) *MPool {
   459  	mp, err := NewMPool(tag, 0, NoFixed)
   460  	if err != nil {
   461  		panic(err)
   462  	}
   463  	return mp
   464  }
   465  
   466  func MustNewZeroNoFixed() *MPool {
   467  	return MustNewNoFixed("must_new_zero_no_fixed")
   468  }
   469  
   470  func (mp *MPool) Report() string {
   471  	ret := fmt.Sprintf("    mpool stats: %s", mp.Stats().Report("        "))
   472  	return ret
   473  }
   474  
   475  func (mp *MPool) ReportJson() string {
   476  	ss := mp.stats.ReportJson()
   477  	if ss == "" {
   478  		return fmt.Sprintf("{\"%s\": \"\"}", mp.tag)
   479  	}
   480  	ret := fmt.Sprintf("{\"%s\": %s", mp.tag, ss)
   481  	if mp.details != nil {
   482  		ret += `,\n "detailed_alloc": `
   483  		ret += mp.details.reportJson()
   484  	}
   485  
   486  	return ret + "}"
   487  }
   488  
   489  func (mp *MPool) CurrNB() int64 {
   490  	return mp.stats.NumCurrBytes.Load()
   491  }
   492  
   493  func DeleteMPool(mp *MPool) {
   494  	start := time.Now()
   495  	defer func() {
   496  		v2.TxnMpoolDeleteDurationHistogram.Observe(time.Since(start).Seconds())
   497  	}()
   498  
   499  	if mp == nil {
   500  		return
   501  	}
   502  
   503  	// logutil.Infof("destroy mpool %s, cap %d, stats\n%s", mp.tag, mp.cap, mp.Report())
   504  	if mp.destroy() {
   505  		globalPools.Delete(mp.id)
   506  	}
   507  }
   508  
   509  var nextPool int64
   510  var globalCap int64
   511  var globalStats MPoolStats
   512  var globalPools sync.Map
   513  var crossPoolFreeCounter atomic.Int64
   514  
   515  func InitCap(cap int64) {
   516  	if cap < GB {
   517  		globalCap = GB
   518  	} else {
   519  		globalCap = cap
   520  	}
   521  }
   522  
   523  func TotalCrossPoolFreeCounter() int64 {
   524  	return crossPoolFreeCounter.Load()
   525  }
   526  
   527  func GlobalStats() *MPoolStats {
   528  	return &globalStats
   529  }
   530  func GlobalCap() int64 {
   531  	if globalCap == 0 {
   532  		return PB
   533  	}
   534  	return globalCap
   535  }
   536  
   537  func sizeToIdx(size int) int {
   538  	for i, sz := range PoolElemSize {
   539  		if int32(size) <= sz {
   540  			return i
   541  		}
   542  	}
   543  	return NumFixedPool
   544  }
   545  
   546  func (mp *MPool) Alloc(sz int) ([]byte, error) {
   547  	// reject unexpected alloc size.
   548  	if sz < 0 || sz > GB {
   549  		logutil.Errorf("Invalid alloc size %d: %s", sz, string(debug.Stack()))
   550  		return nil, moerr.NewInternalErrorNoCtx("Invalid alloc size %d", sz)
   551  	}
   552  
   553  	if sz == 0 {
   554  		return nil, nil
   555  	}
   556  
   557  	if atomic.LoadInt32(&mp.available) == Unavailable {
   558  		return nil, moerr.NewInternalErrorNoCtx("mpool %s unavailable for alloc", mp.tag)
   559  	}
   560  
   561  	// update in use count
   562  	atomic.AddInt32(&mp.inUseCount, 1)
   563  	defer atomic.AddInt32(&mp.inUseCount, -1)
   564  
   565  	idx := NumFixedPool
   566  	requiredSpaceWithoutHeader := sz
   567  	if !mp.noFixed {
   568  		idx = sizeToIdx(requiredSpaceWithoutHeader)
   569  		if idx < NumFixedPool {
   570  			requiredSpaceWithoutHeader = int(mp.pools[idx].eleSz)
   571  		}
   572  	}
   573  
   574  	tempSize := int64(requiredSpaceWithoutHeader + kMemHdrSz)
   575  	gcurr := globalStats.RecordAlloc("global", tempSize)
   576  	if gcurr > GlobalCap() {
   577  		globalStats.RecordFree("global", tempSize)
   578  		return nil, moerr.NewOOMNoCtx()
   579  	}
   580  	mycurr := mp.stats.RecordAlloc(mp.tag, tempSize)
   581  	if mycurr > mp.Cap() {
   582  		mp.stats.RecordFree(mp.tag, tempSize)
   583  		globalStats.RecordFree("global", tempSize)
   584  		return nil, moerr.NewInternalErrorNoCtx("mpool out of space, alloc %d bytes, cap %d", sz, mp.cap)
   585  	}
   586  
   587  	// from fixed pool
   588  	if idx < NumFixedPool {
   589  		bs := mp.pools[idx].alloc(int32(requiredSpaceWithoutHeader))
   590  		if mp.details != nil {
   591  			mp.details.recordAlloc(int64(bs.allocSz))
   592  		}
   593  		return bs.ToSlice(sz, int(mp.pools[idx].eleSz)), nil
   594  	}
   595  
   596  	return alloc(sz, requiredSpaceWithoutHeader, mp), nil
   597  }
   598  
   599  func (mp *MPool) Free(bs []byte) {
   600  	if bs == nil || cap(bs) == 0 {
   601  		return
   602  	}
   603  	bs = bs[:1]
   604  	hdr := unsafe.Add((unsafe.Pointer)(&bs[0]), -kMemHdrSz)
   605  	pHdr := (*memHdr)(hdr)
   606  
   607  	if !pHdr.CheckGuard() {
   608  		panic(moerr.NewInternalErrorNoCtx("invalid free, mp header corruption"))
   609  	}
   610  	if atomic.LoadInt32(&mp.available) == Unavailable {
   611  		panic(moerr.NewInternalErrorNoCtx("mpool %s unavailable for free", mp.tag))
   612  	}
   613  
   614  	// if cross pool free.
   615  	if pHdr.poolId != mp.id {
   616  		crossPoolFreeCounter.Add(1)
   617  		otherPool, ok := globalPools.Load(pHdr.poolId)
   618  		if !ok {
   619  			panic(moerr.NewInternalErrorNoCtx("invalid mpool id %d", pHdr.poolId))
   620  		}
   621  		(otherPool.(*MPool)).Free(bs)
   622  		return
   623  	}
   624  
   625  	atomic.AddInt32(&mp.inUseCount, 1)
   626  	defer atomic.AddInt32(&mp.inUseCount, -1)
   627  	// double free check
   628  	if atomic.LoadInt32(&pHdr.allocSz) == -1 {
   629  		panic(moerr.NewInternalErrorNoCtx("free size -1, possible double free"))
   630  	}
   631  
   632  	recordSize := int64(pHdr.allocSz) + kMemHdrSz
   633  	mp.stats.RecordFree(mp.tag, recordSize)
   634  	globalStats.RecordFree("global", recordSize)
   635  	if mp.details != nil {
   636  		mp.details.recordFree(int64(pHdr.allocSz))
   637  	}
   638  
   639  	// free from fixed pool
   640  	if pHdr.fixedPoolIdx < NumFixedPool {
   641  		mp.pools[pHdr.fixedPoolIdx].free(pHdr)
   642  	} else {
   643  		// non fixed pool just mark it freed
   644  		if !atomic.CompareAndSwapInt32(&pHdr.allocSz, pHdr.allocSz, -1) {
   645  			panic(moerr.NewInternalErrorNoCtx("free size -1, possible double free"))
   646  		}
   647  	}
   648  }
   649  
   650  func (mp *MPool) reAlloc(old []byte, sz int) ([]byte, error) {
   651  	if sz <= cap(old) {
   652  		return old[:sz], nil
   653  	}
   654  	ret, err := mp.Alloc(sz)
   655  	if err != nil {
   656  		return nil, err
   657  	}
   658  	copy(ret, old)
   659  	mp.Free(old)
   660  	return ret, nil
   661  }
   662  
   663  // alignUp rounds n up to a multiple of a. a must be a power of 2.
   664  func alignUp(n, a int) int {
   665  	return (n + a - 1) &^ (a - 1)
   666  }
   667  
   668  // divRoundUp returns ceil(n / a).
   669  func divRoundUp(n, a int) int {
   670  	// a is generally a power of two. This will get inlined and
   671  	// the compiler will optimize the division.
   672  	return (n + a - 1) / a
   673  }
   674  
   675  // Returns size of the memory block that mallocgc will allocate if you ask for the size.
   676  func roundupsize(size int) int {
   677  	if size < _MaxSmallSize {
   678  		if size <= smallSizeMax-8 {
   679  			return int(class_to_size[size_to_class8[divRoundUp(size, smallSizeDiv)]])
   680  		} else {
   681  			return int(class_to_size[size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]])
   682  		}
   683  	}
   684  	if size+_PageSize < size {
   685  		return size
   686  	}
   687  	return alignUp(size, _PageSize)
   688  }
   689  
   690  // Grow is like reAlloc, but we try to be a little bit more aggressive on growing
   691  // the slice.
   692  func (mp *MPool) Grow(old []byte, sz int) ([]byte, error) {
   693  	if sz < len(old) {
   694  		return nil, moerr.NewInternalErrorNoCtx("mpool grow actually shrinks, %d, %d", len(old), sz)
   695  	}
   696  	if sz <= cap(old) {
   697  		return old[:sz], nil
   698  	}
   699  	newCap := calculateNewCap(cap(old), sz)
   700  
   701  	ret, err := mp.reAlloc(old, newCap)
   702  	if err != nil {
   703  		return old, err
   704  	}
   705  	return ret[:sz], nil
   706  }
   707  
   708  // copy-paste from go slice grow strategy.
   709  func calculateNewCap(oldCap int, requiredSize int) int {
   710  	newcap := oldCap
   711  	doublecap := newcap + newcap
   712  	if requiredSize > doublecap {
   713  		newcap = requiredSize
   714  	} else {
   715  		const threshold = 256
   716  		if newcap < threshold {
   717  			newcap = doublecap
   718  		} else {
   719  			for 0 < newcap && newcap < requiredSize {
   720  				newcap += (newcap + 3*threshold) / 4
   721  			}
   722  			if newcap <= 0 {
   723  				newcap = requiredSize
   724  			}
   725  		}
   726  	}
   727  	newcap = roundupsize(newcap)
   728  	return newcap
   729  }
   730  
   731  func (mp *MPool) Grow2(old []byte, old2 []byte, sz int) ([]byte, error) {
   732  	len1 := len(old)
   733  	len2 := len(old2)
   734  	if sz < len1+len2 {
   735  		return nil, moerr.NewInternalErrorNoCtx("mpool grow2 actually shrinks, %d+%d, %d", len1, len2, sz)
   736  	}
   737  	ret, err := mp.Grow(old, sz)
   738  	if err != nil {
   739  		return nil, err
   740  	}
   741  	copy(ret[len1:len1+len2], old2)
   742  	return ret, nil
   743  }
   744  
   745  /*
   746  func (mp *MPool) Increase(nb int64) error {
   747  	gcurr := globalStats.RecordAlloc("global", nb)
   748  	if gcurr > GlobalCap() {
   749  		globalStats.RecordFree(mp.tag, nb)
   750  		return moerr.NewOOMNoCtx()
   751  	}
   752  
   753  	// check if it is under my cap
   754  	mycurr := mp.stats.RecordAlloc(mp.tag, nb)
   755  	if mycurr > mp.Cap() {
   756  		mp.stats.RecordFree(mp.tag, nb)
   757  		return moerr.NewInternalErrorNoCtx("mpool out of space, alloc %d bytes, cap %d", nb, mp.cap)
   758  	}
   759  	return nil
   760  }
   761  
   762  func (mp *MPool) Decrease(nb int64) {
   763  	mp.stats.RecordFree(mp.tag, nb)
   764  	globalStats.RecordFree("global", nb)
   765  }
   766  */
   767  
   768  func MakeSliceWithCap[T any](n, cap int, mp *MPool) ([]T, error) {
   769  	var t T
   770  	tsz := unsafe.Sizeof(t)
   771  	bs, err := mp.Alloc(int(tsz) * cap)
   772  	if err != nil {
   773  		return nil, err
   774  	}
   775  	ptr := unsafe.Pointer(&bs[0])
   776  	tptr := (*T)(ptr)
   777  	ret := unsafe.Slice(tptr, cap)
   778  	return ret[:n:cap], nil
   779  }
   780  
   781  func MakeSlice[T any](n int, mp *MPool) ([]T, error) {
   782  	return MakeSliceWithCap[T](n, n, mp)
   783  }
   784  
   785  func MakeSliceArgs[T any](mp *MPool, args ...T) ([]T, error) {
   786  	ret, err := MakeSlice[T](len(args), mp)
   787  	if err != nil {
   788  		return ret, err
   789  	}
   790  	copy(ret, args)
   791  	return ret, nil
   792  }
   793  
   794  // Report memory usage in json.
   795  func ReportMemUsage(tag string) string {
   796  	gstat := fmt.Sprintf("{\"global\":%s}", globalStats.ReportJson())
   797  	if tag == "global" {
   798  		return "[" + gstat + "]"
   799  	}
   800  
   801  	var poolStats []string
   802  	if tag == "" {
   803  		poolStats = append(poolStats, gstat)
   804  	}
   805  
   806  	gather := func(key, value any) bool {
   807  		mp := value.(*MPool)
   808  		if tag == "" || tag == mp.tag {
   809  			poolStats = append(poolStats, mp.ReportJson())
   810  		}
   811  		return true
   812  	}
   813  	globalPools.Range(gather)
   814  
   815  	return "[" + strings.Join(poolStats, ",") + "]"
   816  }
   817  
   818  func MPoolControl(tag string, cmd string) string {
   819  	if tag == "" || tag == "global" {
   820  		return "Cannot enable detail on mpool global stats"
   821  	}
   822  
   823  	cmdFunc := func(key, value any) bool {
   824  		mp := value.(*MPool)
   825  		if tag == mp.tag {
   826  			switch cmd {
   827  			case "enable_detail":
   828  				mp.EnableDetailRecording()
   829  			case "disable_detail":
   830  				mp.DisableDetailRecording()
   831  			}
   832  		}
   833  		return true
   834  	}
   835  
   836  	globalPools.Range(cmdFunc)
   837  	return "ok"
   838  }