github.com/primecitizens/pcz/std@v0.2.1/core/alloc/sbrkalloc/alloc.go (about)

     1  package sbrkalloc
     2  
     3  import (
     4  	"unsafe"
     5  
     6  	"github.com/primecitizens/pcz/std/core/abi"
     7  	"github.com/primecitizens/pcz/std/core/alloc"
     8  	"github.com/primecitizens/pcz/std/core/arch"
     9  	"github.com/primecitizens/pcz/std/core/assert"
    10  	"github.com/primecitizens/pcz/std/core/num"
    11  	"github.com/primecitizens/pcz/std/core/os"
    12  )
    13  
    14  // TODO: need mutex protection once the std supports multi-threading.
    15  
    16  // initialized by platform specific init()
    17  var (
    18  	_start    uintptr // _start is the start of heap memory
    19  	base      uintptr // base is the base address for allocating new memory chunks
    20  	end       uintptr // end is the end of heap memory
    21  	lastFree  *head   // pointer to the last freed memory chunk
    22  	lastAlloc *head   // pointer to the last allocated memory chunk, for adjusting size for alignment
    23  )
    24  
    25  const (
    26  	headSz = unsafe.Sizeof(head{})
    27  )
    28  
    29  // A head is an allocation record put at the start of allocated memory
    30  type head struct {
    31  	nextfree *head
    32  	// total sz of the allocated node (including this node)
    33  	sz uintptr
    34  }
    35  
    36  func findVacant(sz uintptr) unsafe.Pointer {
    37  	if lastFree == nil {
    38  		return nil
    39  	}
    40  
    41  	cur := lastFree
    42  	var fit, fitPrev, last *head
    43  	for ; cur != nil; cur, last = cur.nextfree, cur {
    44  		// TODO: merge neighbor chunks
    45  		if cur.sz < sz {
    46  			continue
    47  		}
    48  
    49  		if fit == nil || fit.sz > cur.sz {
    50  			fit, fitPrev = cur, last
    51  		}
    52  
    53  		if cur.sz == sz {
    54  			break
    55  		}
    56  	}
    57  
    58  	if fit == nil {
    59  		return nil
    60  	}
    61  
    62  	if fit.sz-sz > headSz {
    63  		// can host at least one more record in this chunk
    64  		frag := (*head)(unsafe.Add(unsafe.Pointer(fit), sz))
    65  		*frag = head{
    66  			nextfree: fit.nextfree,
    67  			sz:       fit.sz - sz,
    68  		}
    69  		fit.sz = sz
    70  
    71  		if fitPrev == nil {
    72  			lastFree = frag
    73  		} else {
    74  			fitPrev.nextfree = frag
    75  		}
    76  	} else {
    77  		if fitPrev == nil {
    78  			lastFree = fit.nextfree
    79  		} else {
    80  			fitPrev.nextfree = fit.nextfree
    81  		}
    82  	}
    83  
    84  	fit.nextfree = nil
    85  	return unsafe.Pointer(uintptr(unsafe.Pointer(fit)) + headSz)
    86  }
    87  
    88  // T is the type implementating sbrk allocation.
    89  type T struct{}
    90  
    91  func (*T) Malloc(typ *abi.Type, n uintptr, zeroize bool) unsafe.Pointer {
    92  	if n == 0 || (typ != nil && typ.Size_ == 0) {
    93  		return alloc.ZeroSized()
    94  	}
    95  
    96  	var (
    97  		szReq uintptr // total bytes requested by the caller
    98  		sz    uintptr // total bytes including the head and szReq
    99  	)
   100  	if typ == nil {
   101  		szReq = n
   102  		sz = num.AlignUp(n+headSz, arch.Int64Align)
   103  	} else {
   104  		szReq = n * typ.Size_
   105  		sz = num.AlignUp(n*typ.Size_+headSz, uintptr(max(arch.Int64Align, typ.Align_)))
   106  	}
   107  
   108  	if sz < szReq || sz > os.MaxAlloc {
   109  		assert.Throw("size", "too", "large")
   110  	}
   111  
   112  	if luck := findVacant(sz); luck != nil {
   113  		return luck
   114  	}
   115  
   116  	if end-base < sz {
   117  		end = uintptr(Sbrk(num.AlignUp(sz, arch.DefaultPhysPageSize)))
   118  	}
   119  
   120  	lastAlloc = (*head)(unsafe.Pointer(base))
   121  	*lastAlloc = head{
   122  		nextfree: nil,
   123  		sz:       sz,
   124  	}
   125  
   126  	ptr := unsafe.Pointer(base + headSz)
   127  	base += sz
   128  
   129  	if zeroize {
   130  		clear(unsafe.Slice((*byte)(ptr), szReq))
   131  	}
   132  
   133  	return ptr
   134  }
   135  
   136  func (x *T) Palloc(size uintptr) unsafe.Pointer {
   137  	if size == 0 {
   138  		return alloc.ZeroSized()
   139  	}
   140  
   141  	return x.Malloc(nil, size, true)
   142  }
   143  
   144  // TODO: optimize for freeing last allocation
   145  
   146  func (*T) Free(typ *abi.Type, n uintptr, ptr unsafe.Pointer) alloc.Hint {
   147  	if n == 0 || (typ != nil && typ.Size_ == 0) {
   148  		return 0
   149  	}
   150  
   151  	addr := uintptr(ptr) - headSz
   152  	if addr < _start || addr > (base-headSz) {
   153  		assert.Throw("memory", "not", "managed", "by", "this", "allocator")
   154  	}
   155  
   156  	x := (*head)(unsafe.Pointer(addr))
   157  	if x.nextfree != nil {
   158  		assert.Throw("double", "free")
   159  	}
   160  
   161  	if x.sz == 0 || x.sz > os.MaxAlloc {
   162  		assert.Throw("invalid", "allocation", "record")
   163  	}
   164  
   165  	x.nextfree = lastFree
   166  	lastFree = x
   167  	return 0
   168  }