github.com/outcaste-io/ristretto@v0.2.3/z/allocator.go (about)

     1  /*
     2   * Copyright 2020 Dgraph Labs, Inc. and Contributors
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  package z
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"math"
    23  	"math/bits"
    24  	"math/rand"
    25  	"strings"
    26  	"sync"
    27  	"sync/atomic"
    28  	"time"
    29  	"unsafe"
    30  
    31  	"github.com/dustin/go-humanize"
    32  )
    33  
    34  // Allocator amortizes the cost of small allocations by allocating memory in
    35  // bigger chunks.  Internally it uses z.Calloc to allocate memory. Once
    36  // allocated, the memory is not moved, so it is safe to use the allocated bytes
    37  // to unsafe cast them to Go struct pointers. Maintaining a freelist is slow.
    38  // Instead, Allocator only allocates memory, with the idea that finally we
    39  // would just release the entire Allocator.
    40  type Allocator struct {
    41  	sync.Mutex
    42  	compIdx uint64 // Stores bufIdx in 32 MSBs and posIdx in 32 LSBs.
    43  	buffers [][]byte
    44  	Ref     uint64
    45  	Tag     string
    46  }
    47  
    48  // allocs keeps references to all Allocators, so we can safely discard them later.
    49  var allocsMu *sync.Mutex
    50  var allocRef uint64
    51  var allocs map[uint64]*Allocator
    52  var calculatedLog2 []int
    53  
    54  func init() {
    55  	allocsMu = new(sync.Mutex)
    56  	allocs = make(map[uint64]*Allocator)
    57  
    58  	// Set up a unique Ref per process.
    59  	rand.Seed(time.Now().UnixNano())
    60  	allocRef = uint64(rand.Int63n(1<<16)) << 48
    61  
    62  	calculatedLog2 = make([]int, 1025)
    63  	for i := 1; i <= 1024; i++ {
    64  		calculatedLog2[i] = int(math.Log2(float64(i)))
    65  	}
    66  }
    67  
    68  // NewAllocator creates an allocator starting with the given size.
    69  func NewAllocator(sz int, tag string) *Allocator {
    70  	ref := atomic.AddUint64(&allocRef, 1)
    71  	// We should not allow a zero sized page because addBufferWithMinSize
    72  	// will run into an infinite loop trying to double the pagesize.
    73  	if sz < 512 {
    74  		sz = 512
    75  	}
    76  	a := &Allocator{
    77  		Ref:     ref,
    78  		buffers: make([][]byte, 64),
    79  		Tag:     tag,
    80  	}
    81  	l2 := uint64(log2(sz))
    82  	if bits.OnesCount64(uint64(sz)) > 1 {
    83  		l2 += 1
    84  	}
    85  	a.buffers[0] = Calloc(1<<l2, a.Tag)
    86  
    87  	allocsMu.Lock()
    88  	allocs[ref] = a
    89  	allocsMu.Unlock()
    90  	return a
    91  }
    92  
    93  func (a *Allocator) Reset() {
    94  	atomic.StoreUint64(&a.compIdx, 0)
    95  }
    96  
    97  func Allocators() string {
    98  	allocsMu.Lock()
    99  	tags := make(map[string]uint64)
   100  	num := make(map[string]int)
   101  	for _, ac := range allocs {
   102  		tags[ac.Tag] += ac.Allocated()
   103  		num[ac.Tag] += 1
   104  	}
   105  
   106  	var buf bytes.Buffer
   107  	for tag, sz := range tags {
   108  		fmt.Fprintf(&buf, "Tag: %s Num: %d Size: %s . ", tag, num[tag], humanize.IBytes(sz))
   109  	}
   110  	allocsMu.Unlock()
   111  	return buf.String()
   112  }
   113  
   114  func (a *Allocator) String() string {
   115  	var s strings.Builder
   116  	s.WriteString(fmt.Sprintf("Allocator: %x\n", a.Ref))
   117  	var cum int
   118  	for i, b := range a.buffers {
   119  		cum += len(b)
   120  		if len(b) == 0 {
   121  			break
   122  		}
   123  		s.WriteString(fmt.Sprintf("idx: %d len: %d cum: %d\n", i, len(b), cum))
   124  	}
   125  	pos := atomic.LoadUint64(&a.compIdx)
   126  	bi, pi := parse(pos)
   127  	s.WriteString(fmt.Sprintf("bi: %d pi: %d\n", bi, pi))
   128  	s.WriteString(fmt.Sprintf("Size: %d\n", a.Size()))
   129  	return s.String()
   130  }
   131  
   132  // AllocatorFrom would return the allocator corresponding to the ref.
   133  func AllocatorFrom(ref uint64) *Allocator {
   134  	allocsMu.Lock()
   135  	a := allocs[ref]
   136  	allocsMu.Unlock()
   137  	return a
   138  }
   139  
   140  func parse(pos uint64) (bufIdx, posIdx int) {
   141  	return int(pos >> 32), int(pos & 0xFFFFFFFF)
   142  }
   143  
   144  // Size returns the size of the allocations so far.
   145  func (a *Allocator) Size() int {
   146  	pos := atomic.LoadUint64(&a.compIdx)
   147  	bi, pi := parse(pos)
   148  	var sz int
   149  	for i, b := range a.buffers {
   150  		if i < bi {
   151  			sz += len(b)
   152  			continue
   153  		}
   154  		sz += pi
   155  		return sz
   156  	}
   157  	panic("Size should not reach here")
   158  }
   159  
   160  func log2(sz int) int {
   161  	if sz < len(calculatedLog2) {
   162  		return calculatedLog2[sz]
   163  	}
   164  	pow := 10
   165  	sz >>= 10
   166  	for sz > 1 {
   167  		sz >>= 1
   168  		pow++
   169  	}
   170  	return pow
   171  }
   172  
   173  func (a *Allocator) Allocated() uint64 {
   174  	var alloc int
   175  	for _, b := range a.buffers {
   176  		alloc += cap(b)
   177  	}
   178  	return uint64(alloc)
   179  }
   180  
   181  func (a *Allocator) TrimTo(max int) {
   182  	var alloc int
   183  	for i, b := range a.buffers {
   184  		if len(b) == 0 {
   185  			break
   186  		}
   187  		alloc += len(b)
   188  		if alloc < max {
   189  			continue
   190  		}
   191  		Free(b)
   192  		a.buffers[i] = nil
   193  	}
   194  }
   195  
   196  // Release would release the memory back. Remember to make this call to avoid memory leaks.
   197  func (a *Allocator) Release() {
   198  	if a == nil {
   199  		return
   200  	}
   201  
   202  	var alloc int
   203  	for _, b := range a.buffers {
   204  		if len(b) == 0 {
   205  			break
   206  		}
   207  		alloc += len(b)
   208  		Free(b)
   209  	}
   210  
   211  	allocsMu.Lock()
   212  	delete(allocs, a.Ref)
   213  	allocsMu.Unlock()
   214  }
   215  
   216  const maxAlloc = 1 << 30
   217  
   218  func (a *Allocator) MaxAlloc() int {
   219  	return maxAlloc
   220  }
   221  
   222  const nodeAlign = unsafe.Sizeof(uint64(0)) - 1
   223  
   224  func (a *Allocator) AllocateAligned(sz int) []byte {
   225  	tsz := sz + int(nodeAlign)
   226  	out := a.Allocate(tsz)
   227  	// We are reusing allocators. In that case, it's important to zero out the memory allocated
   228  	// here. We don't always zero it out (in Allocate), because other functions would be immediately
   229  	// overwriting the allocated slices anyway (see Copy).
   230  	ZeroOut(out, 0, len(out))
   231  
   232  	addr := uintptr(unsafe.Pointer(&out[0]))
   233  	aligned := (addr + nodeAlign) & ^nodeAlign
   234  	start := int(aligned - addr)
   235  
   236  	return out[start : start+sz]
   237  }
   238  
   239  func (a *Allocator) Copy(buf []byte) []byte {
   240  	if a == nil {
   241  		return append([]byte{}, buf...)
   242  	}
   243  	out := a.Allocate(len(buf))
   244  	copy(out, buf)
   245  	return out
   246  }
   247  
   248  func (a *Allocator) addBufferAt(bufIdx, minSz int) {
   249  	for {
   250  		if bufIdx >= len(a.buffers) {
   251  			panic(fmt.Sprintf("Allocator can not allocate more than %d buffers", len(a.buffers)))
   252  		}
   253  		if len(a.buffers[bufIdx]) == 0 {
   254  			break
   255  		}
   256  		if minSz <= len(a.buffers[bufIdx]) {
   257  			// No need to do anything. We already have a buffer which can satisfy minSz.
   258  			return
   259  		}
   260  		bufIdx++
   261  	}
   262  	assert(bufIdx > 0)
   263  	// We need to allocate a new buffer.
   264  	// Make pageSize double of the last allocation.
   265  	pageSize := 2 * len(a.buffers[bufIdx-1])
   266  	// Ensure pageSize is bigger than sz.
   267  	for pageSize < minSz {
   268  		pageSize *= 2
   269  	}
   270  	// If bigger than maxAlloc, trim to maxAlloc.
   271  	if pageSize > maxAlloc {
   272  		pageSize = maxAlloc
   273  	}
   274  
   275  	buf := Calloc(pageSize, a.Tag)
   276  	assert(len(a.buffers[bufIdx]) == 0)
   277  	a.buffers[bufIdx] = buf
   278  }
   279  
   280  func (a *Allocator) Allocate(sz int) []byte {
   281  	if a == nil {
   282  		return make([]byte, sz)
   283  	}
   284  	if sz > maxAlloc {
   285  		panic(fmt.Sprintf("Unable to allocate more than %d\n", maxAlloc))
   286  	}
   287  	if sz == 0 {
   288  		return nil
   289  	}
   290  	for {
   291  		pos := atomic.AddUint64(&a.compIdx, uint64(sz))
   292  		bufIdx, posIdx := parse(pos)
   293  		buf := a.buffers[bufIdx]
   294  		if posIdx > len(buf) {
   295  			a.Lock()
   296  			newPos := atomic.LoadUint64(&a.compIdx)
   297  			newBufIdx, _ := parse(newPos)
   298  			if newBufIdx != bufIdx {
   299  				a.Unlock()
   300  				continue
   301  			}
   302  			a.addBufferAt(bufIdx+1, sz)
   303  			atomic.StoreUint64(&a.compIdx, uint64((bufIdx+1)<<32))
   304  			a.Unlock()
   305  			// We added a new buffer. Let's acquire slice the right way by going back to the top.
   306  			continue
   307  		}
   308  		data := buf[posIdx-sz : posIdx]
   309  		return data
   310  	}
   311  }
   312  
   313  type AllocatorPool struct {
   314  	numGets int64
   315  	allocCh chan *Allocator
   316  	closer  *Closer
   317  }
   318  
   319  func NewAllocatorPool(sz int) *AllocatorPool {
   320  	a := &AllocatorPool{
   321  		allocCh: make(chan *Allocator, sz),
   322  		closer:  NewCloser(1),
   323  	}
   324  	go a.freeupAllocators()
   325  	return a
   326  }
   327  
   328  func (p *AllocatorPool) Get(sz int, tag string) *Allocator {
   329  	if p == nil {
   330  		return NewAllocator(sz, tag)
   331  	}
   332  	atomic.AddInt64(&p.numGets, 1)
   333  	select {
   334  	case alloc := <-p.allocCh:
   335  		alloc.Reset()
   336  		alloc.Tag = tag
   337  		return alloc
   338  	default:
   339  		return NewAllocator(sz, tag)
   340  	}
   341  }
   342  func (p *AllocatorPool) Return(a *Allocator) {
   343  	if a == nil {
   344  		return
   345  	}
   346  	if p == nil {
   347  		a.Release()
   348  		return
   349  	}
   350  	a.TrimTo(400 << 20)
   351  
   352  	select {
   353  	case p.allocCh <- a:
   354  		return
   355  	default:
   356  		a.Release()
   357  	}
   358  }
   359  
   360  func (p *AllocatorPool) Release() {
   361  	if p == nil {
   362  		return
   363  	}
   364  	p.closer.SignalAndWait()
   365  }
   366  
   367  func (p *AllocatorPool) freeupAllocators() {
   368  	defer p.closer.Done()
   369  
   370  	ticker := time.NewTicker(2 * time.Second)
   371  	defer ticker.Stop()
   372  
   373  	releaseOne := func() bool {
   374  		select {
   375  		case alloc := <-p.allocCh:
   376  			alloc.Release()
   377  			return true
   378  		default:
   379  			return false
   380  		}
   381  	}
   382  
   383  	var last int64
   384  	for {
   385  		select {
   386  		case <-p.closer.HasBeenClosed():
   387  			close(p.allocCh)
   388  			for alloc := range p.allocCh {
   389  				alloc.Release()
   390  			}
   391  			return
   392  
   393  		case <-ticker.C:
   394  			gets := atomic.LoadInt64(&p.numGets)
   395  			if gets != last {
   396  				// Some retrievals were made since the last time. So, let's avoid doing a release.
   397  				last = gets
   398  				continue
   399  			}
   400  			releaseOne()
   401  		}
   402  	}
   403  }