github.com/pidato/unsafe@v0.1.4/memory/tlsf/heap.go (about)

     1  package tlsf
     2  
     3  import (
     4  	"math"
     5  	"math/bits"
     6  	"unsafe"
     7  )
     8  
     9  // Heap === Heap (Two-Level Segregate Fit) memory allocator ===
    10  //
    11  // Heap is a general purpose dynamic memory allocator specifically designed to meet
    12  // real-time requirements:
    13  //
    14  // 		Bounded Response Time - The worst-case execution time (WCET) of memory allocation
    15  //								and deallocation Has got to be known in advance and be
    16  //								independent of application data. Allocator Has a constant
    17  //								cost O(1).
    18  //
    19  //						 Fast - Additionally to a bounded cost, the allocator Has to be
    20  //								efficient and fast enough. Allocator executes a maximum
    21  //								of 168 processor instructions in a x86 architecture.
    22  //								Depending on the compiler version and optimisation flags,
    23  //								it can be slightly lower or higher.
    24  //
    25  // 		Efficient Memory Use - 	Traditionally, real-time systems run for long periods of
    26  //								time and some (embedded applications), have strong constraints
    27  //								of memory size. Fragmentation can have a significant impact on
    28  //								such systems. It can increase  dramatically, and degrade the
    29  //								system performance. A way to measure this efficiency is the
    30  //								memory fragmentation incurred by the allocator. Allocator has
    31  //								been tested in hundreds of different loads (real-time tasks,
    32  //								general purpose applications, etc.) obtaining an average
    33  //								fragmentation lower than 15 %. The maximum fragmentation
    34  //								measured is lower than 25%.
    35  //
    36  // Memory can be added on demand and is a multiple of 64kb pages. Grow is used to allocate new
    37  // memory to be added to the allocator. Each Grow must provide a contiguous chunk of memory.
    38  // However, the allocator may be comprised of many contiguous chunks which are not contiguous
    39  // of each other. There is not a mechanism for shrinking the memory. Supplied Grow function
    40  // can effectively limit how big the allocator can get. If a zero pointer is returned it will
    41  // cause an out-of-memory situation which is propagated as a nil pointer being returned from
    42  // Alloc. It's up to the application to decide how to handle such scenarios.
    43  //
    44  // see: http://www.gii.upv.es/tlsf/
    45  // see: https://github.com/AssemblyScript/assemblyscript
    46  //
    47  // - `ffs(x)` is equivalent to `ctz(x)` with x != 0
    48  // - `fls(x)` is equivalent to `sizeof(x) * 8 - clz(x) - 1`
    49  //
    50  // ╒══════════════ Block size interpretation (32-bit) ═════════════╕
    51  //    3                   2                   1
    52  //  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0  bits
    53  // ├─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┼─┴─┴─┴─╫─┴─┴─┴─┤
    54  // │ |                    FL                       │ SB = SL + AL  │ ◄─ usize
    55  // └───────────────────────────────────────────────┴───────╨───────┘
    56  // FL: first level, SL: second level, AL: alignment, SB: small block
    57  type Heap struct {
    58  	root      *root
    59  	HeapStart uintptr
    60  	HeapEnd   uintptr
    61  	arena     uintptr
    62  	Grow      Grow
    63  	Slot      uint8
    64  	Stats
    65  }
    66  
    67  // Stats provides the metrics of an Allocator
    68  type Stats struct {
    69  	HeapSize        int64
    70  	AllocSize       int64
    71  	PeakAllocSize   int64
    72  	FreeSize        int64
    73  	Allocs          int32
    74  	InitialPages    int32
    75  	ConsecutiveLow  int32
    76  	ConsecutiveHigh int32
    77  	Pages           int32
    78  	Grows           int32
    79  	fragmentation   float32
    80  }
    81  
    82  func (s *Stats) Fragmentation() float32 {
    83  	if s.HeapSize == 0 || s.PeakAllocSize == 0 {
    84  		return 0
    85  	}
    86  	pct := float64(s.HeapSize-s.PeakAllocSize) / float64(s.HeapSize)
    87  	s.fragmentation = float32(math.Floor(pct*100) / 100)
    88  	return s.fragmentation
    89  }
    90  
    91  // Grow provides the ability to Grow the heap and allocate a contiguous
    92  // chunk of system memory to add to the allocator.
    93  type Grow func(pagesBefore, pagesNeeded int32, minSize uintptr) (pagesAdded int32, start, end uintptr)
    94  
    95  const (
    96  	PageSize = uintptr(64 * 1024)
    97  
    98  	_TLSFAlignU32 = 2
    99  	// All allocation sizes and addresses are aligned to 4 or 8 bytes.
   100  	// 32bit = 2
   101  	// 64bit = 3
   102  	// <expr> = bits.UintSize / 8 / 4 + 1
   103  	_TLSFAlignSizeLog2 uintptr = ((32 << (^uint(0) >> 63)) / 8 / 4) + 1
   104  	_TLSFSizeofPointer         = unsafe.Sizeof(uintptr(0))
   105  
   106  	ALBits uint32  = 4 // 16 bytes to fit up to v128
   107  	ALSize uintptr = 1 << uintptr(ALBits)
   108  	ALMask         = ALSize - 1
   109  
   110  	// Overhead of a memory manager block.
   111  	BlockOverhead = unsafe.Sizeof(BLOCK{})
   112  	// Block constants. A block must have a minimum size of three pointers so it can hold `prev`,
   113  	// `prev` and `back` if free.
   114  	BlockMinSize = ((3*_TLSFSizeofPointer + BlockOverhead + ALMask) & ^ALMask) - BlockOverhead
   115  	// Maximum size of a memory manager block's payload.
   116  	BlockMaxSize = (1 << 30) - BlockOverhead
   117  	//BlockMaxSize = (1 << ((_TLSFAlignSizeLog2 + 1)*10)) - BlockOverhead
   118  
   119  	_TLSFDebug = false
   120  
   121  	_TLSFSLBits uint32 = 4
   122  	_TLSFSLSize uint32 = 1 << _TLSFSLBits
   123  	_TLSFSBBits        = _TLSFSLBits + ALBits
   124  	_TLSFSBSize uint32 = 1 << _TLSFSBBits
   125  	_TLSFFLBits        = 31 - _TLSFSBBits
   126  
   127  	// [00]: < 256B (SB)  [12]: < 1M
   128  	// [01]: < 512B       [13]: < 2M
   129  	// [02]: < 1K         [14]: < 4M
   130  	// [03]: < 2K         [15]: < 8M
   131  	// [04]: < 4K         [16]: < 16M
   132  	// [05]: < 8K         [17]: < 32M
   133  	// [06]: < 16K        [18]: < 64M
   134  	// [07]: < 32K        [19]: < 128M
   135  	// [08]: < 64K        [20]: < 256M
   136  	// [09]: < 128K       [21]: < 512M
   137  	// [10]: < 256K       [22]: <= 1G - OVERHEAD
   138  	// [11]: < 512K
   139  	// WASM VMs limit to 2GB total (currently), making one 1G block max
   140  	// (or three 512M etc.) due to block overhead
   141  
   142  	// Tags stored in otherwise unused alignment bits
   143  	_TLSFFREE     uintptr = 1 << 0
   144  	_TLSFLEFTFREE uintptr = 1 << 1
   145  	TagsMask              = _TLSFFREE | _TLSFLEFTFREE
   146  )
   147  
   148  // Alloc allocates a block of memory that fits the size provided
   149  //goland:noinspection GoVetUnsafePointer
   150  func (a *Heap) Alloc(size uintptr) uintptr {
   151  	if a == nil {
   152  		panic("nil")
   153  	}
   154  	p := uintptr(unsafe.Pointer(a.allocateBlock(size)))
   155  	if p == 0 {
   156  		return 0
   157  	}
   158  	p = p + BlockOverhead
   159  	return p
   160  }
   161  
   162  // AllocZeroed allocates a block of memory that fits the size provided
   163  //goland:noinspection GoVetUnsafePointer
   164  func (a *Heap) AllocZeroed(size uintptr) uintptr {
   165  	p := uintptr(unsafe.Pointer(a.allocateBlock(size)))
   166  	if p == 0 {
   167  		return 0
   168  	}
   169  	p = p + BlockOverhead
   170  	Zero(unsafe.Pointer(p), size)
   171  	return p
   172  }
   173  
   174  // Realloc determines the best way to resize an allocation.
   175  func (a *Heap) Realloc(ptr uintptr, size uintptr) uintptr {
   176  	p := uintptr(unsafe.Pointer(a.moveBlock(checkUsedBlock(ptr), size)))
   177  	if p == 0 {
   178  		return 0
   179  	}
   180  	return p + BlockOverhead
   181  }
   182  
   183  // Free release the allocation back into the free list.
   184  //goland:noinspection GoVetUnsafePointer
   185  func (a *Heap) Free(ptr uintptr) {
   186  	//println("Free", uint(ptr))
   187  	//a.freeBlock((*tlsfBlock)(unsafe.Pointer(ptr - BlockOverhead)))
   188  	a.freeBlock(checkUsedBlock(ptr))
   189  }
   190  
   191  //goland:noinspection GoVetUnsafePointer
   192  func SizeOf(ptr uintptr) uintptr {
   193  	return ((*tlsfBlock)(unsafe.Pointer(ptr - BlockOverhead))).MMInfo & ^TagsMask
   194  }
   195  
   196  // Bootstrap bootstraps the Allocator with the initial block of contiguous memory
   197  // that at least fits the minimum required to fit the bitmap.
   198  //goland:noinspection GoVetUnsafePointer
   199  func Bootstrap(start, end uintptr, pages int32, grow Grow) *Heap {
   200  	start = (start + unsafe.Alignof(unsafe.Pointer(nil)) - 1) &^ (unsafe.Alignof(unsafe.Pointer(nil)) - 1)
   201  
   202  	//if a.T {
   203  	//println("Bootstrap", "pages", pages, uint(start), uint(end), uint(end-start))
   204  	//}
   205  	// init allocator
   206  	a := (*Heap)(unsafe.Pointer(start))
   207  	*a = Heap{
   208  		HeapStart: start,
   209  		HeapEnd:   end,
   210  		Stats: Stats{
   211  			InitialPages: pages,
   212  			Pages:        pages,
   213  		},
   214  		Grow: grow,
   215  	}
   216  
   217  	// init root
   218  	rootOffset := unsafe.Sizeof(Heap{}) + ((start + ALMask) & ^ALMask)
   219  	a.root = (*root)(unsafe.Pointer(rootOffset))
   220  	a.root.init()
   221  
   222  	// add initial memory
   223  	a.addMemory(rootOffset+RootSize, end)
   224  	return a
   225  }
   226  
   227  // Memory manager
   228  
   229  // ╒════════════ Memory manager block layout (32-bit) ═════════════╕
   230  //    3                   2                   1
   231  //  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0  bits
   232  // ├─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┤
   233  // │                           MM info                             │ -4
   234  // ╞>ptr═══════════════════════════════════════════════════════════╡
   235  // │                              ...                              │
   236  type BLOCK struct {
   237  	MMInfo uintptr
   238  }
   239  
   240  // ╒════════════════════ Block layout (32-bit) ════════════════════╕
   241  //    3                   2                   1
   242  //  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0  bits
   243  // ├─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┼─┼─┤            ┐
   244  // │                          size                             │L│F│ ◄─┐ info   overhead
   245  // ╞>ptr═══════════════════════════════════════════════════════╧═╧═╡   │        ┘
   246  // │                        if free: ◄ prev                        │ ◄─┤ usize
   247  // ├───────────────────────────────────────────────────────────────┤   │
   248  // │                        if free: next ►                        │ ◄─┤
   249  // ├───────────────────────────────────────────────────────────────┤   │
   250  // │                             ...                               │   │ >= 0
   251  // ├───────────────────────────────────────────────────────────────┤   │
   252  // │                        if free: back ▲                        │ ◄─┘
   253  // └───────────────────────────────────────────────────────────────┘ >= MIN SIZE
   254  // F: FREE, L: LEFTFREE
   255  type tlsfBlock struct {
   256  	BLOCK
   257  	// Previous free block, if any. Only valid if free, otherwise part of payload.
   258  	//prev *Block
   259  	prev uintptr
   260  	// Next free block, if any. Only valid if free, otherwise part of payload.
   261  	//next *Block
   262  	next uintptr
   263  
   264  	// If the block is free, there is a 'back'reference at its end pointing at its start.
   265  }
   266  
   267  // Gets the left block of a block. Only valid if the left block is free.
   268  func (block *tlsfBlock) getFreeLeft() *tlsfBlock {
   269  	return *(**tlsfBlock)(unsafe.Pointer(uintptr(unsafe.Pointer(block)) - _TLSFSizeofPointer))
   270  }
   271  
   272  // Gets the right block of a block by advancing to the right by its size.
   273  func (block *tlsfBlock) getRight() *tlsfBlock {
   274  	return (*tlsfBlock)(unsafe.Pointer(uintptr(unsafe.Pointer(block)) + BlockOverhead + (block.MMInfo & ^TagsMask)))
   275  }
   276  
   277  // ╒═════════════════════ Root layout (32-bit) ════════════════════╕
   278  //    3                   2                   1
   279  //  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0  bits
   280  // ├─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┤          ┐
   281  // │        0        |           flMap                            S│ ◄────┐
   282  // ╞═══════════════════════════════════════════════════════════════╡      │
   283  // │                           slMap[0] S                          │ ◄─┐  │
   284  // ├───────────────────────────────────────────────────────────────┤   │  │
   285  // │                           slMap[1]                            │ ◄─┤  │
   286  // ├───────────────────────────────────────────────────────────────┤  uint32 │
   287  // │                           slMap[22]                           │ ◄─┘  │
   288  // ╞═══════════════════════════════════════════════════════════════╡    usize
   289  // │                            head[0]                            │ ◄────┤
   290  // ├───────────────────────────────────────────────────────────────┤      │
   291  // │                              ...                              │ ◄────┤
   292  // ├───────────────────────────────────────────────────────────────┤      │
   293  // │                           head[367]                           │ ◄────┤
   294  // ╞═══════════════════════════════════════════════════════════════╡      │
   295  // │                             tail                              │ ◄────┘
   296  // └───────────────────────────────────────────────────────────────┘   SIZE   ┘
   297  // S: Small blocks map
   298  type root struct {
   299  	flMap uintptr
   300  }
   301  
   302  func (r *root) init() {
   303  	r.flMap = 0
   304  	r.setTail(nil)
   305  	for fl := uintptr(0); fl < uintptr(_TLSFFLBits); fl++ {
   306  		r.setSL(fl, 0)
   307  		for sl := uint32(0); sl < _TLSFSLSize; sl++ {
   308  			r.setHead(fl, sl, nil)
   309  		}
   310  	}
   311  }
   312  
   313  const (
   314  	SLStart  = _TLSFSizeofPointer
   315  	SLEnd    = SLStart + (uintptr(_TLSFFLBits) << _TLSFAlignU32)
   316  	HLStart  = (SLEnd + ALMask) &^ ALMask
   317  	HLEnd    = HLStart + uintptr(_TLSFFLBits)*uintptr(_TLSFSLSize)*_TLSFSizeofPointer
   318  	RootSize = HLEnd + _TLSFSizeofPointer
   319  )
   320  
   321  // Gets the second level map of the specified first level.
   322  func (r *root) getSL(fl uintptr) uint32 {
   323  	return *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(r)) + (fl << _TLSFAlignU32) + SLStart))
   324  }
   325  
   326  // Sets the second level map of the specified first level.
   327  func (r *root) setSL(fl uintptr, slMap uint32) {
   328  	*(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(r)) + (fl << _TLSFAlignU32) + SLStart)) = slMap
   329  }
   330  
   331  // Gets the head of the free list for the specified combination of first and second level.
   332  func (r *root) getHead(fl uintptr, sl uint32) *tlsfBlock {
   333  	return *(**tlsfBlock)(unsafe.Pointer(uintptr(unsafe.Pointer(r)) + HLStart +
   334  		(((fl << _TLSFSLBits) + uintptr(sl)) << _TLSFAlignSizeLog2)))
   335  }
   336  
   337  // Sets the head of the free list for the specified combination of first and second level.
   338  func (r *root) setHead(fl uintptr, sl uint32, head *tlsfBlock) {
   339  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(r)) + HLStart +
   340  		(((fl << _TLSFSLBits) + uintptr(sl)) << _TLSFAlignSizeLog2))) = uintptr(unsafe.Pointer(head))
   341  }
   342  
   343  // Gets the tail block.
   344  func (r *root) getTail() *tlsfBlock {
   345  	return *(**tlsfBlock)(unsafe.Pointer(uintptr(unsafe.Pointer(r)) + HLEnd))
   346  }
   347  
   348  // Sets the tail block.
   349  func (r *root) setTail(tail *tlsfBlock) {
   350  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(r)) + HLEnd)) = uintptr(unsafe.Pointer(tail))
   351  }
   352  
   353  // Inserts a previously used block back into the free list.
   354  func (a *Heap) insertBlock(block *tlsfBlock) {
   355  	var (
   356  		r         = a.root
   357  		blockInfo = block.MMInfo
   358  		right     = block.getRight()
   359  		rightInfo = right.MMInfo
   360  	)
   361  	//(blockInfo & FREE)
   362  
   363  	// merge with right block if also free
   364  	if rightInfo&_TLSFFREE != 0 {
   365  		a.removeBlock(right)
   366  		blockInfo = blockInfo + BlockOverhead + (rightInfo & ^TagsMask) // keep block tags
   367  		block.MMInfo = blockInfo
   368  		right = block.getRight()
   369  		rightInfo = right.MMInfo
   370  		// 'back' is Add below
   371  	}
   372  
   373  	// merge with left block if also free
   374  	if blockInfo&_TLSFLEFTFREE != 0 {
   375  		left := block.getFreeLeft()
   376  		leftInfo := left.MMInfo
   377  		if _TLSFDebug {
   378  			assert(leftInfo&_TLSFFREE != 0, "must be free according to right tags")
   379  		}
   380  		a.removeBlock(left)
   381  		block = left
   382  		blockInfo = leftInfo + BlockOverhead + (blockInfo & ^TagsMask) // keep left tags
   383  		block.MMInfo = blockInfo
   384  		// 'back' is Add below
   385  	}
   386  
   387  	right.MMInfo = rightInfo | _TLSFLEFTFREE
   388  	// reference to right is no longer used now, hence rightInfo is not synced
   389  
   390  	// we now know the size of the block
   391  	size := blockInfo & ^TagsMask
   392  
   393  	// Add 'back' to itself at the end of block
   394  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(right)) - _TLSFSizeofPointer)) = uintptr(unsafe.Pointer(block))
   395  
   396  	// mapping_insert
   397  	var (
   398  		fl uintptr
   399  		sl uint32
   400  	)
   401  	if size < uintptr(_TLSFSBSize) {
   402  		fl = 0
   403  		sl = uint32(size >> ALBits)
   404  	} else {
   405  		const inv = _TLSFSizeofPointer*8 - 1
   406  		boundedSize := min(size, BlockMaxSize)
   407  		fl = inv - clz(boundedSize)
   408  		sl = uint32((boundedSize >> (fl - uintptr(_TLSFSLBits))) ^ (1 << _TLSFSLBits))
   409  		fl -= uintptr(_TLSFSBBits) - 1
   410  	}
   411  
   412  	// perform insertion
   413  	head := r.getHead(fl, sl)
   414  	block.prev = 0
   415  	block.next = uintptr(unsafe.Pointer(head))
   416  	if head != nil {
   417  		head.prev = uintptr(unsafe.Pointer(block))
   418  	}
   419  	r.setHead(fl, sl, block)
   420  
   421  	// update first and second level maps
   422  	r.flMap |= 1 << fl
   423  	r.setSL(fl, r.getSL(fl)|(1<<sl))
   424  }
   425  
   426  //goland:noinspection GoVetUnsafePointer
   427  func (a *Heap) removeBlock(block *tlsfBlock) {
   428  	r := a.root
   429  	blockInfo := block.MMInfo
   430  	if _TLSFDebug {
   431  		assert(blockInfo&_TLSFFREE != 0, "must be free")
   432  	}
   433  	size := blockInfo & ^TagsMask
   434  	if _TLSFDebug {
   435  		assert(size >= BlockMinSize, "must be valid")
   436  	}
   437  
   438  	// mapping_insert
   439  	var (
   440  		fl uintptr
   441  		sl uint32
   442  	)
   443  	if size < uintptr(_TLSFSBSize) {
   444  		fl = 0
   445  		sl = uint32(size >> ALBits)
   446  	} else {
   447  		const inv = _TLSFSizeofPointer*8 - 1
   448  		boundedSize := min(size, BlockMaxSize)
   449  		fl = inv - clz(boundedSize)
   450  		sl = uint32((boundedSize >> (fl - uintptr(_TLSFSLBits))) ^ (1 << uintptr(_TLSFSLBits)))
   451  		fl -= uintptr(_TLSFSBBits) - 1
   452  	}
   453  	if _TLSFDebug {
   454  		assert(fl < uintptr(_TLSFFLBits) && sl < _TLSFSLSize, "fl/sl out of range")
   455  	}
   456  
   457  	// link previous and prev free block
   458  	var (
   459  		prev = block.prev
   460  		next = block.next
   461  	)
   462  	if prev != 0 {
   463  		(*tlsfBlock)(unsafe.Pointer(prev)).next = next
   464  	}
   465  	if next != 0 {
   466  		(*tlsfBlock)(unsafe.Pointer(next)).prev = prev
   467  	}
   468  
   469  	// update head if we are removing it
   470  	if block == r.getHead(fl, sl) {
   471  		r.setHead(fl, sl, (*tlsfBlock)(unsafe.Pointer(next)))
   472  
   473  		// clear second level map if head is empty now
   474  		if next == 0 {
   475  			slMap := r.getSL(fl)
   476  			slMap &= ^(1 << sl)
   477  			r.setSL(fl, slMap)
   478  
   479  			// clear first level map if second level is empty now
   480  			if slMap == 0 {
   481  				r.flMap &= ^(1 << fl)
   482  			}
   483  		}
   484  	}
   485  	// note: does not alter left/back because it is likely that splitting
   486  	// is performed afterwards, invalidating those changes. so, the caller
   487  	// must perform those updates.
   488  }
   489  
   490  // Searches for a free block of at least the specified size.
   491  func (a *Heap) searchBlock(size uintptr) *tlsfBlock {
   492  	// mapping_search
   493  	var (
   494  		fl uintptr
   495  		sl uint32
   496  		r  = a.root
   497  	)
   498  	if size < uintptr(_TLSFSBSize) {
   499  		fl = 0
   500  		sl = uint32(size >> ALBits)
   501  	} else {
   502  		const (
   503  			halfMaxSize = BlockMaxSize >> 1 // don't round last fl
   504  			inv         = _TLSFSizeofPointer*8 - 1
   505  			invRound    = inv - uintptr(_TLSFSLBits)
   506  		)
   507  
   508  		var requestSize uintptr
   509  		if size < halfMaxSize {
   510  			requestSize = size + (1 << (invRound - clz(size))) - 1
   511  		} else {
   512  			requestSize = size
   513  		}
   514  
   515  		fl = inv - clz(requestSize)
   516  		sl = uint32((requestSize >> (fl - uintptr(_TLSFSLBits))) ^ (1 << _TLSFSLBits))
   517  		fl -= uintptr(_TLSFSBBits) - 1
   518  	}
   519  	if _TLSFDebug {
   520  		assert(fl < uintptr(_TLSFFLBits) && sl < _TLSFSLSize, "fl/sl out of range")
   521  	}
   522  
   523  	// search second level
   524  	var (
   525  		slMap = r.getSL(fl) & (^uint32(0) << sl)
   526  		head  *tlsfBlock
   527  	)
   528  	if slMap == 0 {
   529  		// search prev larger first level
   530  		flMap := r.flMap & (^uintptr(0) << (fl + 1))
   531  		if flMap == 0 {
   532  			head = nil
   533  		} else {
   534  			fl = ctz(flMap)
   535  			slMap = r.getSL(fl)
   536  			if _TLSFDebug {
   537  				assert(slMap != 0, "can't be zero if fl points here")
   538  			}
   539  			head = r.getHead(fl, ctz32(slMap))
   540  		}
   541  	} else {
   542  		head = r.getHead(fl, ctz32(slMap))
   543  	}
   544  
   545  	return head
   546  }
   547  
   548  func (a *Heap) prepareBlock(block *tlsfBlock, size uintptr) {
   549  	blockInfo := block.MMInfo
   550  	if _TLSFDebug {
   551  		assert(((size+BlockOverhead)&ALMask) == 0,
   552  			"size must be aligned so the New block is")
   553  	}
   554  	// split if the block can hold another MINSIZE block incl. overhead
   555  	remaining := (blockInfo & ^TagsMask) - size
   556  	if remaining >= BlockOverhead+BlockMinSize {
   557  		block.MMInfo = size | (blockInfo & _TLSFLEFTFREE) // also discards FREE
   558  
   559  		spare := (*tlsfBlock)(unsafe.Pointer(uintptr(unsafe.Pointer(block)) + BlockOverhead + size))
   560  		spare.MMInfo = (remaining - BlockOverhead) | _TLSFFREE // not LEFTFREE
   561  		a.insertBlock(spare)                                   // also sets 'back'
   562  
   563  		// otherwise tag block as no longer FREE and right as no longer LEFTFREE
   564  	} else {
   565  		block.MMInfo = blockInfo & ^_TLSFFREE
   566  		block.getRight().MMInfo &= ^_TLSFLEFTFREE
   567  	}
   568  }
   569  
   570  // growMemory grows the pool by a number of 64kb pages to fit the required size
   571  func (a *Heap) growMemory(size uintptr) bool {
   572  	if a.Grow == nil {
   573  		return false
   574  	}
   575  	// Here, both rounding performed in searchBlock ...
   576  	const halfMaxSize = BlockMaxSize >> 1
   577  	if size < halfMaxSize { // don't round last fl
   578  		const invRound = (_TLSFSizeofPointer*8 - 1) - uintptr(_TLSFSLBits)
   579  		size += (1 << (invRound - clz(size))) - 1
   580  	}
   581  	// and additional BLOCK_OVERHEAD must be taken into account. If we are going
   582  	// to merge with the tail block, that's one time, otherwise it's two times.
   583  	var (
   584  		pagesBefore         = a.Pages
   585  		offset      uintptr = 0
   586  	)
   587  	if BlockOverhead != uintptr(unsafe.Pointer(a.root.getTail())) {
   588  		offset = 1
   589  	}
   590  	size += BlockOverhead << ((uintptr(pagesBefore) << 16) - offset)
   591  	pagesNeeded := ((int32(size) + 0xffff) & ^0xffff) >> 16
   592  
   593  	addedPages, start, end := a.Grow(pagesBefore, pagesNeeded, size)
   594  	if start == 0 || end == 0 {
   595  		return false
   596  	}
   597  	if addedPages == 0 {
   598  		addedPages = int32((end - start) / PageSize)
   599  		if (end-start)%PageSize > 0 {
   600  			addedPages++
   601  		}
   602  	}
   603  	a.Pages += addedPages
   604  	a.HeapEnd = end
   605  	a.addMemory(start, end)
   606  	return true
   607  }
   608  
   609  // addMemory adds the newly allocated memory to the Allocator bitmaps
   610  //goland:noinspection GoVetUnsafePointer
   611  func (a *Heap) addMemory(start, end uintptr) bool {
   612  	if _TLSFDebug {
   613  		assert(start <= end, "start must be <= end")
   614  	}
   615  	start = ((start + BlockOverhead + ALMask) & ^ALMask) - BlockOverhead
   616  	end &= ^ALMask
   617  
   618  	var tail = a.root.getTail()
   619  	var tailInfo uintptr = 0
   620  	if tail != nil { // more memory
   621  		if _TLSFDebug {
   622  			assert(start >= uintptr(unsafe.Pointer(tail))+BlockOverhead, "out of bounds")
   623  		}
   624  
   625  		// merge with current tail if adjacent
   626  		const offsetToTail = ALSize
   627  		if start-offsetToTail == uintptr(unsafe.Pointer(tail)) {
   628  			start -= offsetToTail
   629  			tailInfo = tail.MMInfo
   630  		} else {
   631  			// We don't do this, but a user might `memory.Grow` manually
   632  			// leading to non-adjacent pages managed by Allocator.
   633  		}
   634  	} else if _TLSFDebug { // first memory
   635  		assert(start >= uintptr(unsafe.Pointer(a.root))+RootSize, "starts after root")
   636  	}
   637  
   638  	// check if size is large enough for a free block and the tail block
   639  	var size = end - start
   640  	if size < BlockOverhead+BlockMinSize+BlockOverhead {
   641  		return false
   642  	}
   643  
   644  	// left size is total minus its own and the zero-length tail's header
   645  	var (
   646  		leftSize = size - 2*BlockOverhead
   647  		left     = (*tlsfBlock)(unsafe.Pointer(start))
   648  	)
   649  	left.MMInfo = leftSize | _TLSFFREE | (tailInfo & _TLSFLEFTFREE)
   650  	left.prev = 0
   651  	left.next = 0
   652  
   653  	// tail is a zero-length used block
   654  	tail = (*tlsfBlock)(unsafe.Pointer(start + BlockOverhead + leftSize))
   655  	tail.MMInfo = 0 | _TLSFLEFTFREE
   656  	a.root.setTail(tail)
   657  
   658  	a.FreeSize += int64(leftSize)
   659  	a.HeapSize += int64(end - start)
   660  
   661  	// also merges with free left before tail / sets 'back'
   662  	a.insertBlock(left)
   663  
   664  	return true
   665  }
   666  
   667  // Computes the size (excl. header) of a block.
   668  func computeSize(size uintptr) uintptr {
   669  	// Size must be large enough and aligned minus preceeding overhead
   670  	if size <= BlockMinSize {
   671  		return BlockMinSize
   672  	} else {
   673  		return ((size + BlockOverhead + ALMask) & ^ALMask) - BlockOverhead
   674  	}
   675  }
   676  
   677  // Prepares and checks an allocation size.
   678  func prepareSize(size uintptr) uintptr {
   679  	if size > BlockMaxSize {
   680  		panic("allocation too large")
   681  	}
   682  	return computeSize(size)
   683  }
   684  
   685  // Allocates a block of the specified size.
   686  func (a *Heap) allocateBlock(size uintptr) *tlsfBlock {
   687  	var payloadSize = prepareSize(size)
   688  	var block = a.searchBlock(payloadSize)
   689  	if block == nil {
   690  		if !a.growMemory(payloadSize) {
   691  			return nil
   692  		}
   693  		block = a.searchBlock(payloadSize)
   694  		if _TLSFDebug {
   695  			assert(block != nil, "block must be found now")
   696  		}
   697  		if block == nil {
   698  			return nil
   699  		}
   700  	}
   701  	if _TLSFDebug {
   702  		assert((block.MMInfo & ^TagsMask) >= payloadSize, "must fit")
   703  	}
   704  
   705  	a.removeBlock(block)
   706  	a.prepareBlock(block, payloadSize)
   707  
   708  	// update stats
   709  	payloadSize = block.MMInfo & ^TagsMask
   710  	a.AllocSize += int64(payloadSize)
   711  	if a.AllocSize > a.PeakAllocSize {
   712  		a.PeakAllocSize = a.AllocSize
   713  	}
   714  	a.FreeSize -= int64(payloadSize)
   715  	a.Allocs++
   716  
   717  	// return block
   718  	return block
   719  }
   720  
   721  func (a *Heap) reallocateBlock(block *tlsfBlock, size uintptr) *tlsfBlock {
   722  	var (
   723  		payloadSize = prepareSize(size)
   724  		blockInfo   = block.MMInfo
   725  		blockSize   = blockInfo & ^TagsMask
   726  	)
   727  
   728  	// possibly split and update runtime size if it still fits
   729  	if payloadSize <= blockSize {
   730  		a.prepareBlock(block, payloadSize)
   731  		//if (isDefined(ASC_RTRACE)) {
   732  		//	if (payloadSize != blockSize) onresize(block, BLOCK_OVERHEAD + blockSize);
   733  		//}
   734  		return block
   735  	}
   736  
   737  	// merge with right free block if merger is large enough
   738  	var (
   739  		right     = block.getRight()
   740  		rightInfo = right.MMInfo
   741  	)
   742  	if rightInfo&_TLSFFREE != 0 {
   743  		mergeSize := blockSize + BlockOverhead + (rightInfo & ^TagsMask)
   744  		if mergeSize >= payloadSize {
   745  			a.removeBlock(right)
   746  			block.MMInfo = (blockInfo & TagsMask) | mergeSize
   747  			a.prepareBlock(block, payloadSize)
   748  			//if (isDefined(ASC_RTRACE)) onresize(block, BLOCK_OVERHEAD + blockSize);
   749  			return block
   750  		}
   751  	}
   752  
   753  	// otherwise, move the block
   754  	return a.moveBlock(block, size)
   755  }
   756  
   757  func (a *Heap) moveBlock(block *tlsfBlock, newSize uintptr) *tlsfBlock {
   758  	newBlock := a.allocateBlock(newSize)
   759  	if newBlock == nil {
   760  		return nil
   761  	}
   762  
   763  	Copy(unsafe.Pointer(uintptr(unsafe.Pointer(newBlock))+BlockOverhead),
   764  		unsafe.Pointer(uintptr(unsafe.Pointer(block))+BlockOverhead),
   765  		block.MMInfo & ^TagsMask)
   766  
   767  	a.freeBlock(block)
   768  	//maybeFreeBlock(a, block)
   769  
   770  	return newBlock
   771  }
   772  
   773  func (a *Heap) freeBlock(block *tlsfBlock) {
   774  	size := block.MMInfo & ^TagsMask
   775  	a.FreeSize += int64(size)
   776  	a.AllocSize -= int64(size)
   777  	a.Allocs--
   778  
   779  	block.MMInfo = block.MMInfo | _TLSFFREE
   780  	a.insertBlock(block)
   781  }
   782  
   783  func min(l, r uintptr) uintptr {
   784  	if l < r {
   785  		return l
   786  	}
   787  	return r
   788  }
   789  
   790  func clz(value uintptr) uintptr {
   791  	return uintptr(bits.LeadingZeros(uint(value)))
   792  }
   793  
   794  func ctz(value uintptr) uintptr {
   795  	return uintptr(bits.TrailingZeros(uint(value)))
   796  }
   797  
   798  func ctz32(value uint32) uint32 {
   799  	return uint32(bits.TrailingZeros32(value))
   800  }
   801  
   802  //goland:noinspection GoVetUnsafePointer
   803  func checkUsedBlock(ptr uintptr) *tlsfBlock {
   804  	block := (*tlsfBlock)(unsafe.Pointer(ptr - BlockOverhead))
   805  	if !(ptr != 0 && ((ptr & ALMask) == 0) && ((block.MMInfo & _TLSFFREE) == 0)) {
   806  		panic("used block is not valid to be freed or reallocated")
   807  	}
   808  	return block
   809  }
   810  
   811  func PrintDebugInfo() {
   812  	println("ALIGNOF_U32		", int64(_TLSFAlignU32))
   813  	println("ALIGN_SIZE_LOG2	", int64(_TLSFAlignSizeLog2))
   814  	println("U32_MAX			", ^uint32(0))
   815  	println("PTR_MAX			", ^uintptr(0))
   816  	println("AL_BITS			", int64(ALBits))
   817  	println("AL_SIZE			", int64(ALSize))
   818  	println("AL_MASK			", int64(ALMask))
   819  	println("BLOCK_OVERHEAD	", int64(BlockOverhead))
   820  	println("BLOCK_MAXSIZE	", int64(BlockMaxSize))
   821  	println("SL_BITS			", int64(_TLSFSLBits))
   822  	println("SL_SIZE			", int64(_TLSFSLSize))
   823  	println("SB_BITS			", int64(_TLSFSBBits))
   824  	println("SB_SIZE			", int64(_TLSFSBSize))
   825  	println("FL_BITS			", int64(_TLSFFLBits))
   826  	println("FREE			", int64(_TLSFFREE))
   827  	println("LEFTFREE		", int64(_TLSFLEFTFREE))
   828  	println("TAGS_MASK		", int64(TagsMask))
   829  	println("BLOCK_MINSIZE	", int64(BlockMinSize))
   830  	println("SL_START		", int64(SLStart))
   831  	println("SL_END			", int64(SLEnd))
   832  	println("HL_START		", int64(HLStart))
   833  	println("HL_END			", int64(HLEnd))
   834  	println("ROOT_SIZE		", int64(RootSize))
   835  }