github.com/cznic/memory@v0.0.0-20181122101858-44f9dcde99e8/memory.go (about)

     1  // Copyright 2017 The Memory Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Package memory implements a memory allocator.
     6  //
     7  // Changelog
     8  //
     9  // 2017-10-03 Added alternative, unsafe.Pointer-based API.
    10  //
    11  // Benchmarks
    12  //
    13  // Intel® Core™ i5-4670 CPU @ 3.40GHz × 4
    14  //
    15  //  goos: linux
    16  //  goarch: amd64
    17  //  pkg: github.com/cznic/memory
    18  //  BenchmarkFree16-4           	100000000	        15.3 ns/op	       0 B/op	       0 allocs/op
    19  //  BenchmarkFree32-4           	100000000	        21.3 ns/op	       0 B/op	       0 allocs/op
    20  //  BenchmarkFree64-4           	50000000	        35.9 ns/op	       0 B/op	       0 allocs/op
    21  //  BenchmarkCalloc16-4         	50000000	        26.6 ns/op	       0 B/op	       0 allocs/op
    22  //  BenchmarkCalloc32-4         	50000000	        30.1 ns/op	       0 B/op	       0 allocs/op
    23  //  BenchmarkCalloc64-4         	30000000	        38.1 ns/op	       0 B/op	       0 allocs/op
    24  //  BenchmarkGoCalloc16-4       	50000000	        29.3 ns/op	      16 B/op	       1 allocs/op
    25  //  BenchmarkGoCalloc32-4       	50000000	        30.4 ns/op	      32 B/op	       1 allocs/op
    26  //  BenchmarkGoCalloc64-4       	30000000	        37.9 ns/op	      64 B/op	       1 allocs/op
    27  //  BenchmarkMalloc16-4         	100000000	        15.4 ns/op	       0 B/op	       0 allocs/op
    28  //  BenchmarkMalloc32-4         	100000000	        15.6 ns/op	       0 B/op	       0 allocs/op
    29  //  BenchmarkMalloc64-4         	100000000	        15.9 ns/op	       0 B/op	       0 allocs/op
    30  //  BenchmarkUnsafeFree16-4     	100000000	        14.4 ns/op	       0 B/op	       0 allocs/op
    31  //  BenchmarkUnsafeFree32-4     	100000000	        20.4 ns/op	       0 B/op	       0 allocs/op
    32  //  BenchmarkUnsafeFree64-4     	50000000	        34.1 ns/op	       0 B/op	       0 allocs/op
    33  //  BenchmarkUnsafeCalloc16-4   	50000000	        23.2 ns/op	       0 B/op	       0 allocs/op
    34  //  BenchmarkUnsafeCalloc32-4   	50000000	        28.0 ns/op	       0 B/op	       0 allocs/op
    35  //  BenchmarkUnsafeCalloc64-4   	50000000	        34.1 ns/op	       0 B/op	       0 allocs/op
    36  //  BenchmarkUnsafeMalloc16-4   	100000000	        13.8 ns/op	       0 B/op	       0 allocs/op
    37  //  BenchmarkUnsafeMalloc32-4   	100000000	        14.2 ns/op	       0 B/op	       0 allocs/op
    38  //  BenchmarkUnsafeMalloc64-4   	100000000	        14.0 ns/op	       0 B/op	       0 allocs/op
    39  //  PASS
    40  //  ok  	github.com/cznic/memory	229.054s
    41  package memory
    42  
    43  import (
    44  	"fmt"
    45  	"os"
    46  	"reflect"
    47  	"unsafe"
    48  
    49  	"github.com/cznic/mathutil"
    50  )
    51  
    52  const mallocAllign = 16 // Must be >= 16
    53  
    54  var (
    55  	headerSize  = roundup(int(unsafe.Sizeof(page{})), mallocAllign)
    56  	maxSlotSize = pageAvail >> 1
    57  	osPageMask  = osPageSize - 1
    58  	osPageSize  = os.Getpagesize()
    59  	pageAvail   = pageSize - headerSize
    60  	pageMask    = pageSize - 1
    61  )
    62  
    63  // if n%m != 0 { n += m-n%m }. m must be a power of 2.
    64  func roundup(n, m int) int { return (n + m - 1) &^ (m - 1) }
    65  
    66  type node struct {
    67  	prev, next *node
    68  }
    69  
    70  type page struct {
    71  	brk  int
    72  	log  uint
    73  	size int
    74  	used int
    75  }
    76  
    77  // Allocator allocates and frees memory. Its zero value is ready for use.
    78  type Allocator struct {
    79  	allocs int // # of allocs.
    80  	bytes  int // Asked from OS.
    81  	cap    [64]int
    82  	lists  [64]*node
    83  	mmaps  int // Asked from OS.
    84  	pages  [64]*page
    85  	regs   map[*page]struct{}
    86  }
    87  
    88  func (a *Allocator) mmap(size int) (*page, error) {
    89  	p, size, err := mmap(size)
    90  	if err != nil {
    91  		return nil, err
    92  	}
    93  
    94  	a.mmaps++
    95  	a.bytes += size
    96  	pg := (*page)(unsafe.Pointer(p))
    97  	if a.regs == nil {
    98  		a.regs = map[*page]struct{}{}
    99  	}
   100  	pg.size = size
   101  	a.regs[pg] = struct{}{}
   102  	return pg, nil
   103  }
   104  
   105  func (a *Allocator) newPage(size int) (*page, error) {
   106  	size += headerSize
   107  	p, err := a.mmap(size)
   108  	if err != nil {
   109  		return nil, err
   110  	}
   111  
   112  	p.log = 0
   113  	return p, nil
   114  }
   115  
   116  func (a *Allocator) newSharedPage(log uint) (*page, error) {
   117  	if a.cap[log] == 0 {
   118  		a.cap[log] = pageAvail / (1 << log)
   119  	}
   120  	size := headerSize + a.cap[log]<<log
   121  	p, err := a.mmap(size)
   122  	if err != nil {
   123  		return nil, err
   124  	}
   125  
   126  	a.pages[log] = p
   127  	p.log = log
   128  	return p, nil
   129  }
   130  
   131  func (a *Allocator) unmap(p *page) error {
   132  	delete(a.regs, p)
   133  	a.mmaps--
   134  	return unmap(uintptr(unsafe.Pointer(p)), p.size)
   135  }
   136  
   137  // UintptrCalloc is like Calloc except it returns an uintptr.
   138  func (a *Allocator) UintptrCalloc(size int) (r uintptr, err error) {
   139  	if trace {
   140  		defer func() {
   141  			fmt.Fprintf(os.Stderr, "Calloc(%#x) %#x, %v\n", size, r, err)
   142  		}()
   143  	}
   144  	if r, err = a.UintptrMalloc(size); r == 0 || err != nil {
   145  		return 0, err
   146  	}
   147  	b := ((*rawmem)(unsafe.Pointer(r)))[:size]
   148  	for i := range b {
   149  		b[i] = 0
   150  	}
   151  	return r, nil
   152  }
   153  
   154  // UintptrFree is like Free except its argument is an uintptr, which must have
   155  // been acquired from UintptrCalloc or UintptrMalloc or UintptrRealloc.
   156  func (a *Allocator) UintptrFree(p uintptr) (err error) {
   157  	if trace {
   158  		defer func() {
   159  			fmt.Fprintf(os.Stderr, "Free(%#x) %v\n", p, err)
   160  		}()
   161  	}
   162  	if p == 0 {
   163  		return nil
   164  	}
   165  
   166  	a.allocs--
   167  	pg := (*page)(unsafe.Pointer(p &^ uintptr(pageMask)))
   168  	log := pg.log
   169  	if log == 0 {
   170  		a.bytes -= pg.size
   171  		return a.unmap(pg)
   172  	}
   173  
   174  	n := (*node)(unsafe.Pointer(p))
   175  	n.prev = nil
   176  	n.next = a.lists[log]
   177  	if n.next != nil {
   178  		n.next.prev = n
   179  	}
   180  	a.lists[log] = n
   181  	pg.used--
   182  	if pg.used != 0 {
   183  		return nil
   184  	}
   185  
   186  	for i := 0; i < pg.brk; i++ {
   187  		n := (*node)(unsafe.Pointer(uintptr(unsafe.Pointer(pg)) + uintptr(headerSize+i<<log)))
   188  		switch {
   189  		case n.prev == nil:
   190  			a.lists[log] = n.next
   191  			if n.next != nil {
   192  				n.next.prev = nil
   193  			}
   194  		case n.next == nil:
   195  			n.prev.next = nil
   196  		default:
   197  			n.prev.next = n.next
   198  			n.next.prev = n.prev
   199  		}
   200  	}
   201  
   202  	if a.pages[log] == pg {
   203  		a.pages[log] = nil
   204  	}
   205  	a.bytes -= pg.size
   206  	return a.unmap(pg)
   207  }
   208  
   209  // UintptrMalloc is like Malloc except it returns an uinptr.
   210  func (a *Allocator) UintptrMalloc(size int) (r uintptr, err error) {
   211  	if trace {
   212  		defer func() {
   213  			fmt.Fprintf(os.Stderr, "Malloc(%#x) %#x, %v\n", size, r, err)
   214  		}()
   215  	}
   216  	if size < 0 {
   217  		panic("invalid malloc size")
   218  	}
   219  
   220  	if size == 0 {
   221  		return 0, nil
   222  	}
   223  
   224  	a.allocs++
   225  	log := uint(mathutil.BitLen(roundup(size, mallocAllign) - 1))
   226  	if 1<<log > maxSlotSize {
   227  		p, err := a.newPage(size)
   228  		if err != nil {
   229  			return 0, err
   230  		}
   231  
   232  		return uintptr(unsafe.Pointer(p)) + uintptr(headerSize), nil
   233  	}
   234  
   235  	if a.lists[log] == nil && a.pages[log] == nil {
   236  		if _, err := a.newSharedPage(log); err != nil {
   237  			return 0, err
   238  		}
   239  	}
   240  
   241  	if p := a.pages[log]; p != nil {
   242  		p.used++
   243  		p.brk++
   244  		if p.brk == a.cap[log] {
   245  			a.pages[log] = nil
   246  		}
   247  		return uintptr(unsafe.Pointer(p)) + uintptr(headerSize+(p.brk-1)<<log), nil
   248  	}
   249  
   250  	n := a.lists[log]
   251  	p := (*page)(unsafe.Pointer(uintptr(unsafe.Pointer(n)) &^ uintptr(pageMask)))
   252  	a.lists[log] = n.next
   253  	if n.next != nil {
   254  		n.next.prev = nil
   255  	}
   256  	p.used++
   257  	return uintptr(unsafe.Pointer(n)), nil
   258  }
   259  
   260  // UintptrRealloc is like Realloc except its first argument is an uintptr,
   261  // which must have been returned from UintptrCalloc, UintptrMalloc or
   262  // UintptrRealloc.
   263  func (a *Allocator) UintptrRealloc(p uintptr, size int) (r uintptr, err error) {
   264  	if trace {
   265  		defer func() {
   266  			fmt.Fprintf(os.Stderr, "UnsafeRealloc(%#x, %#x) %#x, %v\n", p, size, r, err)
   267  		}()
   268  	}
   269  	switch {
   270  	case p == 0:
   271  		return a.UintptrMalloc(size)
   272  	case size == 0 && p != 0:
   273  		return 0, a.UintptrFree(p)
   274  	}
   275  
   276  	us := UintptrUsableSize(p)
   277  	if us > size {
   278  		return p, nil
   279  	}
   280  
   281  	if r, err = a.UintptrMalloc(size); err != nil {
   282  		return 0, err
   283  	}
   284  
   285  	if us < size {
   286  		size = us
   287  	}
   288  	copy((*rawmem)(unsafe.Pointer(r))[:size], (*rawmem)(unsafe.Pointer(p))[:size])
   289  	return r, a.UintptrFree(p)
   290  }
   291  
   292  // UintptrUsableSize is like UsableSize except its argument is an uintptr,
   293  // which must have been returned from UintptrCalloc, UintptrMalloc or
   294  // UintptrRealloc.
   295  func UintptrUsableSize(p uintptr) (r int) {
   296  	if trace {
   297  		defer func() {
   298  			fmt.Fprintf(os.Stderr, "UsableSize(%#x) %#x\n", p, r)
   299  		}()
   300  	}
   301  	if p == 0 {
   302  		return 0
   303  	}
   304  
   305  	return usableSize(p)
   306  }
   307  
   308  func usableSize(p uintptr) (r int) {
   309  	pg := (*page)(unsafe.Pointer(p &^ uintptr(pageMask)))
   310  	if pg.log != 0 {
   311  		return 1 << pg.log
   312  	}
   313  
   314  	return pg.size - headerSize
   315  }
   316  
   317  // Calloc is like Malloc except the allocated memory is zeroed.
   318  func (a *Allocator) Calloc(size int) (r []byte, err error) {
   319  	p, err := a.UintptrCalloc(size)
   320  	if err != nil {
   321  		return nil, err
   322  	}
   323  
   324  	var b []byte
   325  	sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
   326  	sh.Cap = usableSize(p)
   327  	sh.Data = p
   328  	sh.Len = size
   329  	return b, nil
   330  }
   331  
   332  // Close releases all OS resources used by a and sets it to its zero value.
   333  //
   334  // It's not necessary to Close the Allocator when exiting a process.
   335  func (a *Allocator) Close() (err error) {
   336  	for p := range a.regs {
   337  		if e := a.unmap(p); e != nil && err == nil {
   338  			err = e
   339  		}
   340  	}
   341  	*a = Allocator{}
   342  	return err
   343  }
   344  
   345  // Free deallocates memory (as in C.free). The argument of Free must have been
   346  // acquired from Calloc or Malloc or Realloc.
   347  func (a *Allocator) Free(b []byte) (err error) {
   348  	if b = b[:cap(b)]; len(b) == 0 {
   349  		return nil
   350  	}
   351  
   352  	return a.UintptrFree(uintptr(unsafe.Pointer(&b[0])))
   353  }
   354  
   355  // Malloc allocates size bytes and returns a byte slice of the allocated
   356  // memory. The memory is not initialized. Malloc panics for size < 0 and
   357  // returns (nil, nil) for zero size.
   358  //
   359  // It's ok to reslice the returned slice but the result of appending to it
   360  // cannot be passed to Free or Realloc as it may refer to a different backing
   361  // array afterwards.
   362  func (a *Allocator) Malloc(size int) (r []byte, err error) {
   363  	p, err := a.UintptrMalloc(size)
   364  	if p == 0 || err != nil {
   365  		return nil, err
   366  	}
   367  
   368  	sh := (*reflect.SliceHeader)(unsafe.Pointer(&r))
   369  	sh.Cap = usableSize(p)
   370  	sh.Data = p
   371  	sh.Len = size
   372  	return r, nil
   373  }
   374  
   375  // Realloc changes the size of the backing array of b to size bytes or returns
   376  // an error, if any.  The contents will be unchanged in the range from the
   377  // start of the region up to the minimum of the old and new  sizes.   If the
   378  // new size is larger than the old size, the added memory will not be
   379  // initialized.  If b's backing array is of zero size, then the call is
   380  // equivalent to Malloc(size), for all values of size; if size is equal to
   381  // zero, and b's backing array is not of zero size, then the call is equivalent
   382  // to Free(b).  Unless b's backing array is of zero size, it must have been
   383  // returned by an earlier call to Malloc, Calloc or Realloc.  If the area
   384  // pointed to was moved, a Free(b) is done.
   385  func (a *Allocator) Realloc(b []byte, size int) (r []byte, err error) {
   386  	var p uintptr
   387  	if b = b[:cap(b)]; len(b) != 0 {
   388  		p = uintptr(unsafe.Pointer(&b[0]))
   389  	}
   390  	if p, err = a.UintptrRealloc(p, size); p == 0 || err != nil {
   391  		return nil, err
   392  	}
   393  
   394  	sh := (*reflect.SliceHeader)(unsafe.Pointer(&r))
   395  	sh.Cap = usableSize(p)
   396  	sh.Data = p
   397  	sh.Len = size
   398  	return r, nil
   399  }
   400  
   401  // UsableSize reports the size of the memory block allocated at p, which must
   402  // point to the first byte of a slice returned from Calloc, Malloc or Realloc.
   403  // The allocated memory block size can be larger than the size originally
   404  // requested from Calloc, Malloc or Realloc.
   405  func UsableSize(p *byte) (r int) { return UintptrUsableSize(uintptr(unsafe.Pointer(p))) }
   406  
   407  // UnsafeCalloc is like Calloc except it returns an unsafe.Pointer.
   408  func (a *Allocator) UnsafeCalloc(size int) (r unsafe.Pointer, err error) {
   409  	p, err := a.UintptrCalloc(size)
   410  	if err != nil {
   411  		return nil, err
   412  	}
   413  
   414  	return unsafe.Pointer(p), nil
   415  }
   416  
   417  // UnsafeFree is like Free except its argument is an unsafe.Pointer, which must
   418  // have been acquired from UnsafeCalloc or UnsafeMalloc or UnsafeRealloc.
   419  func (a *Allocator) UnsafeFree(p unsafe.Pointer) (err error) { return a.UintptrFree(uintptr(p)) }
   420  
   421  // UnsafeMalloc is like Malloc except it returns an unsafe.Pointer.
   422  func (a *Allocator) UnsafeMalloc(size int) (r unsafe.Pointer, err error) {
   423  	p, err := a.UintptrMalloc(size)
   424  	if err != nil {
   425  		return nil, err
   426  	}
   427  
   428  	return unsafe.Pointer(p), nil
   429  }
   430  
   431  // UnsafeRealloc is like Realloc except its first argument is an
   432  // unsafe.Pointer, which must have been returned from UnsafeCalloc,
   433  // UnsafeMalloc or UnsafeRealloc.
   434  func (a *Allocator) UnsafeRealloc(p unsafe.Pointer, size int) (r unsafe.Pointer, err error) {
   435  	q, err := a.UintptrRealloc(uintptr(p), size)
   436  	if err != nil {
   437  		return nil, err
   438  	}
   439  
   440  	return unsafe.Pointer(q), nil
   441  }
   442  
   443  // UnsafeUsableSize is like UsableSize except its argument is an
   444  // unsafe.Pointer, which must have been returned from UnsafeCalloc,
   445  // UnsafeMalloc or UnsafeRealloc.
   446  func UnsafeUsableSize(p unsafe.Pointer) (r int) { return UintptrUsableSize(uintptr(p)) }