github.com/etecs-ru/ristretto@v0.9.1/z/allocator.go (about)

     1  /*
     2   * Copyright 2020 Dgraph Labs, Inc. and Contributors
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  package z
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"math"
    23  	"math/bits"
    24  	"math/rand"
    25  	"strings"
    26  	"sync"
    27  	"sync/atomic"
    28  	"time"
    29  	"unsafe"
    30  
    31  	"github.com/dustin/go-humanize"
    32  )
    33  
    34  // Allocator amortizes the cost of small allocations by allocating memory in
    35  // bigger chunks.  Internally it uses z.Calloc to allocate memory. Once
    36  // allocated, the memory is not moved, so it is safe to use the allocated bytes
    37  // to unsafe cast them to Go struct pointers. Maintaining a freelist is slow.
    38  // Instead, Allocator only allocates memory, with the idea that finally we
    39  // would just release the entire Allocator.
    40  type Allocator struct {
    41  	Tag     string
    42  	buffers [][]byte
    43  	compIdx uint64
    44  	Ref     uint64
    45  	sync.Mutex
    46  }
    47  
    48  // allocs keeps references to all Allocators, so we can safely discard them later.
    49  var allocsMu *sync.Mutex
    50  
    51  var (
    52  	allocRef       uint64                //nolint:unused,varcheck,gochecknoglobals,lll,deadcode,revive // adopt fork, do not touch it
    53  	allocs         map[uint64]*Allocator //nolint:unused,varcheck,gochecknoglobals,lll,deadcode,revive // adopt fork, do not touch it
    54  	calculatedLog2 []int                 //nolint:unused,varcheck,gochecknoglobals,lll,deadcode,revive // adopt fork, do not touch it
    55  )
    56  
    57  func init() {
    58  	allocsMu = new(sync.Mutex)
    59  	allocs = make(map[uint64]*Allocator)
    60  
    61  	// Set up a unique Ref per process.
    62  	rand.Seed(time.Now().UnixNano())
    63  	allocRef = uint64(rand.Int63n(1<<16)) << 48
    64  
    65  	calculatedLog2 = make([]int, 1025)
    66  	for i := 1; i <= 1024; i++ {
    67  		calculatedLog2[i] = int(math.Log2(float64(i)))
    68  	}
    69  }
    70  
    71  // NewAllocator creates an allocator starting with the given size.
    72  func NewAllocator(sz int, tag string) *Allocator {
    73  	ref := atomic.AddUint64(&allocRef, 1)
    74  	// We should not allow a zero sized page because addBufferWithMinSize
    75  	// will run into an infinite loop trying to double the pagesize.
    76  	if sz < 512 {
    77  		sz = 512
    78  	}
    79  	a := &Allocator{
    80  		Ref:     ref,
    81  		buffers: make([][]byte, 64),
    82  		Tag:     tag,
    83  	}
    84  	l2 := uint64(log2(sz))
    85  	if bits.OnesCount64(uint64(sz)) > 1 {
    86  		l2 += 1
    87  	}
    88  	a.buffers[0] = Calloc(1<<l2, a.Tag)
    89  
    90  	allocsMu.Lock()
    91  	allocs[ref] = a
    92  	allocsMu.Unlock()
    93  	return a
    94  }
    95  
    96  func (a *Allocator) Reset() {
    97  	atomic.StoreUint64(&a.compIdx, 0)
    98  }
    99  
   100  func Allocators() string {
   101  	allocsMu.Lock()
   102  	tags := make(map[string]uint64)
   103  	num := make(map[string]int)
   104  	for _, ac := range allocs {
   105  		tags[ac.Tag] += ac.Allocated()
   106  		num[ac.Tag] += 1
   107  	}
   108  
   109  	var buf bytes.Buffer
   110  	for tag, sz := range tags {
   111  		fmt.Fprintf(&buf, "Tag: %s Num: %d Size: %s . ", tag, num[tag], humanize.IBytes(sz))
   112  	}
   113  	allocsMu.Unlock()
   114  	return buf.String()
   115  }
   116  
   117  func (a *Allocator) String() string {
   118  	var s strings.Builder
   119  	s.WriteString(fmt.Sprintf("Allocator: %x\n", a.Ref))
   120  	var cum int
   121  	for i, b := range a.buffers {
   122  		cum += len(b)
   123  		if len(b) == 0 {
   124  			break
   125  		}
   126  		s.WriteString(fmt.Sprintf("idx: %d len: %d cum: %d\n", i, len(b), cum))
   127  	}
   128  	pos := atomic.LoadUint64(&a.compIdx)
   129  	bi, pi := parse(pos)
   130  	s.WriteString(fmt.Sprintf("bi: %d pi: %d\n", bi, pi))
   131  	s.WriteString(fmt.Sprintf("Size: %d\n", a.Size()))
   132  	return s.String()
   133  }
   134  
   135  // AllocatorFrom would return the allocator corresponding to the ref.
   136  func AllocatorFrom(ref uint64) *Allocator {
   137  	allocsMu.Lock()
   138  	a := allocs[ref]
   139  	allocsMu.Unlock()
   140  	return a
   141  }
   142  
   143  func parse(pos uint64) (bufIdx, posIdx int) {
   144  	return int(pos >> 32), int(pos & 0xFFFFFFFF)
   145  }
   146  
   147  // Size returns the size of the allocations so far.
   148  func (a *Allocator) Size() int {
   149  	pos := atomic.LoadUint64(&a.compIdx)
   150  	bi, pi := parse(pos)
   151  	var sz int
   152  	for i, b := range a.buffers {
   153  		if i < bi {
   154  			sz += len(b)
   155  			continue
   156  		}
   157  		sz += pi
   158  		return sz
   159  	}
   160  	panic("Size should not reach here")
   161  }
   162  
   163  func log2(sz int) int {
   164  	if sz < len(calculatedLog2) {
   165  		return calculatedLog2[sz]
   166  	}
   167  	pow := 10
   168  	sz >>= 10
   169  	for sz > 1 {
   170  		sz >>= 1
   171  		pow++
   172  	}
   173  	return pow
   174  }
   175  
   176  func (a *Allocator) Allocated() uint64 {
   177  	var alloc int
   178  	for _, b := range a.buffers {
   179  		alloc += cap(b)
   180  	}
   181  	return uint64(alloc)
   182  }
   183  
   184  func (a *Allocator) TrimTo(max int) {
   185  	var alloc int
   186  	for i, b := range a.buffers {
   187  		if len(b) == 0 {
   188  			break
   189  		}
   190  		alloc += len(b)
   191  		if alloc < max {
   192  			continue
   193  		}
   194  		Free(b)
   195  		a.buffers[i] = nil
   196  	}
   197  }
   198  
   199  // Release would release the memory back. Remember to make this call to avoid memory leaks.
   200  func (a *Allocator) Release() {
   201  	if a == nil {
   202  		return
   203  	}
   204  
   205  	var alloc int
   206  	for _, b := range a.buffers {
   207  		if len(b) == 0 {
   208  			break
   209  		}
   210  		alloc += len(b)
   211  		Free(b)
   212  	}
   213  
   214  	allocsMu.Lock()
   215  	delete(allocs, a.Ref)
   216  	allocsMu.Unlock()
   217  }
   218  
   219  const maxAlloc = 1 << 30
   220  
   221  func (a *Allocator) MaxAlloc() int {
   222  	return maxAlloc
   223  }
   224  
   225  const nodeAlign = unsafe.Sizeof(uint64(0)) - 1
   226  
   227  func (a *Allocator) AllocateAligned(sz int) []byte {
   228  	tsz := sz + int(nodeAlign)
   229  	out := a.Allocate(tsz)
   230  	// We are reusing allocators. In that case, it's important to zero out the memory allocated
   231  	// here. We don't always zero it out (in Allocate), because other functions would be immediately
   232  	// overwriting the allocated slices anyway (see Copy).
   233  	ZeroOut(out, 0, len(out))
   234  
   235  	addr := uintptr(unsafe.Pointer(&out[0]))
   236  	aligned := (addr + nodeAlign) & ^nodeAlign
   237  	start := int(aligned - addr)
   238  
   239  	return out[start : start+sz]
   240  }
   241  
   242  func (a *Allocator) Copy(buf []byte) []byte {
   243  	if a == nil {
   244  		return append([]byte{}, buf...)
   245  	}
   246  	out := a.Allocate(len(buf))
   247  	copy(out, buf)
   248  	return out
   249  }
   250  
   251  func (a *Allocator) addBufferAt(bufIdx, minSz int) {
   252  	for {
   253  		if bufIdx >= len(a.buffers) {
   254  			panic(fmt.Sprintf("Allocator can not allocate more than %d buffers", len(a.buffers)))
   255  		}
   256  		if len(a.buffers[bufIdx]) == 0 {
   257  			break
   258  		}
   259  		if minSz <= len(a.buffers[bufIdx]) {
   260  			// No need to do anything. We already have a buffer which can satisfy minSz.
   261  			return
   262  		}
   263  		bufIdx++
   264  	}
   265  	assert(bufIdx > 0)
   266  	// We need to allocate a new buffer.
   267  	// Make pageSize double of the last allocation.
   268  	pageSize := 2 * len(a.buffers[bufIdx-1])
   269  	// Ensure pageSize is bigger than sz.
   270  	for pageSize < minSz {
   271  		pageSize *= 2
   272  	}
   273  	// If bigger than maxAlloc, trim to maxAlloc.
   274  	if pageSize > maxAlloc {
   275  		pageSize = maxAlloc
   276  	}
   277  
   278  	buf := Calloc(pageSize, a.Tag)
   279  	assert(len(a.buffers[bufIdx]) == 0)
   280  	a.buffers[bufIdx] = buf
   281  }
   282  
   283  func (a *Allocator) Allocate(sz int) []byte {
   284  	if a == nil {
   285  		return make([]byte, sz)
   286  	}
   287  	if sz > maxAlloc {
   288  		panic(fmt.Sprintf("Unable to allocate more than %d\n", maxAlloc))
   289  	}
   290  	if sz == 0 {
   291  		return nil
   292  	}
   293  	for {
   294  		pos := atomic.AddUint64(&a.compIdx, uint64(sz))
   295  		bufIdx, posIdx := parse(pos)
   296  		buf := a.buffers[bufIdx]
   297  		if posIdx > len(buf) {
   298  			a.Lock()
   299  			newPos := atomic.LoadUint64(&a.compIdx)
   300  			newBufIdx, _ := parse(newPos)
   301  			if newBufIdx != bufIdx {
   302  				a.Unlock()
   303  				continue
   304  			}
   305  			a.addBufferAt(bufIdx+1, sz)
   306  			atomic.StoreUint64(&a.compIdx, uint64((bufIdx+1)<<32))
   307  			a.Unlock()
   308  			// We added a new buffer. Let's acquire slice the right way by going back to the top.
   309  			continue
   310  		}
   311  		data := buf[posIdx-sz : posIdx]
   312  		return data
   313  	}
   314  }
   315  
   316  type AllocatorPool struct {
   317  	allocCh chan *Allocator
   318  	closer  *Closer
   319  	numGets int64
   320  }
   321  
   322  func NewAllocatorPool(sz int) *AllocatorPool {
   323  	a := &AllocatorPool{
   324  		allocCh: make(chan *Allocator, sz),
   325  		closer:  NewCloser(1),
   326  	}
   327  	go a.freeupAllocators()
   328  	return a
   329  }
   330  
   331  func (p *AllocatorPool) Get(sz int, tag string) *Allocator {
   332  	if p == nil {
   333  		return NewAllocator(sz, tag)
   334  	}
   335  	atomic.AddInt64(&p.numGets, 1)
   336  	select {
   337  	case alloc := <-p.allocCh:
   338  		alloc.Reset()
   339  		alloc.Tag = tag
   340  		return alloc
   341  	default:
   342  		return NewAllocator(sz, tag)
   343  	}
   344  }
   345  
   346  func (p *AllocatorPool) Return(a *Allocator) {
   347  	if a == nil {
   348  		return
   349  	}
   350  	if p == nil {
   351  		a.Release()
   352  		return
   353  	}
   354  	a.TrimTo(400 << 20)
   355  
   356  	select {
   357  	case p.allocCh <- a:
   358  		return
   359  	default:
   360  		a.Release()
   361  	}
   362  }
   363  
   364  func (p *AllocatorPool) Release() {
   365  	if p == nil {
   366  		return
   367  	}
   368  	p.closer.SignalAndWait()
   369  }
   370  
   371  func (p *AllocatorPool) freeupAllocators() {
   372  	defer p.closer.Done()
   373  
   374  	ticker := time.NewTicker(2 * time.Second)
   375  	defer ticker.Stop()
   376  
   377  	releaseOne := func() bool {
   378  		select {
   379  		case alloc := <-p.allocCh:
   380  			alloc.Release()
   381  			return true
   382  		default:
   383  			return false
   384  		}
   385  	}
   386  
   387  	var last int64
   388  	for {
   389  		select {
   390  		case <-p.closer.HasBeenClosed():
   391  			close(p.allocCh)
   392  			for alloc := range p.allocCh {
   393  				alloc.Release()
   394  			}
   395  			return
   396  
   397  		case <-ticker.C:
   398  			gets := atomic.LoadInt64(&p.numGets)
   399  			if gets != last {
   400  				// Some retrievals were made since the last time. So, let's avoid doing a release.
   401  				last = gets
   402  				continue
   403  			}
   404  			releaseOne()
   405  		}
   406  	}
   407  }