github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/mem_plan9.go (about) 1 // Copyright 2010 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import "unsafe" 8 9 const memDebug = false 10 11 var bloc uintptr 12 var blocMax uintptr 13 var memlock mutex 14 15 type memHdr struct { 16 next memHdrPtr 17 size uintptr 18 } 19 20 var memFreelist memHdrPtr // sorted in ascending order 21 22 type memHdrPtr uintptr 23 24 func (p memHdrPtr) ptr() *memHdr { return (*memHdr)(unsafe.Pointer(p)) } 25 func (p *memHdrPtr) set(x *memHdr) { *p = memHdrPtr(unsafe.Pointer(x)) } 26 27 func memAlloc(n uintptr) unsafe.Pointer { 28 n = memRound(n) 29 var prevp *memHdr 30 for p := memFreelist.ptr(); p != nil; p = p.next.ptr() { 31 if p.size >= n { 32 if p.size == n { 33 if prevp != nil { 34 prevp.next = p.next 35 } else { 36 memFreelist = p.next 37 } 38 } else { 39 p.size -= n 40 p = (*memHdr)(add(unsafe.Pointer(p), p.size)) 41 } 42 *p = memHdr{} 43 return unsafe.Pointer(p) 44 } 45 prevp = p 46 } 47 return sbrk(n) 48 } 49 50 func memFree(ap unsafe.Pointer, n uintptr) { 51 n = memRound(n) 52 memclrNoHeapPointers(ap, n) 53 bp := (*memHdr)(ap) 54 bp.size = n 55 bpn := uintptr(ap) 56 if memFreelist == 0 { 57 bp.next = 0 58 memFreelist.set(bp) 59 return 60 } 61 p := memFreelist.ptr() 62 if bpn < uintptr(unsafe.Pointer(p)) { 63 memFreelist.set(bp) 64 if bpn+bp.size == uintptr(unsafe.Pointer(p)) { 65 bp.size += p.size 66 bp.next = p.next 67 *p = memHdr{} 68 } else { 69 bp.next.set(p) 70 } 71 return 72 } 73 for ; p.next != 0; p = p.next.ptr() { 74 if bpn > uintptr(unsafe.Pointer(p)) && bpn < uintptr(unsafe.Pointer(p.next)) { 75 break 76 } 77 } 78 if bpn+bp.size == uintptr(unsafe.Pointer(p.next)) { 79 bp.size += p.next.ptr().size 80 bp.next = p.next.ptr().next 81 *p.next.ptr() = memHdr{} 82 } else { 83 bp.next = p.next 84 } 85 if uintptr(unsafe.Pointer(p))+p.size == bpn { 86 p.size += bp.size 87 p.next = bp.next 88 *bp = memHdr{} 89 } else { 90 p.next.set(bp) 91 } 92 } 93 94 func memCheck() { 95 if memDebug == false { 96 return 97 } 98 for p := memFreelist.ptr(); p != nil && p.next != 0; p = p.next.ptr() { 99 if uintptr(unsafe.Pointer(p)) == uintptr(unsafe.Pointer(p.next)) { 100 print("runtime: ", unsafe.Pointer(p), " == ", unsafe.Pointer(p.next), "\n") 101 throw("mem: infinite loop") 102 } 103 if uintptr(unsafe.Pointer(p)) > uintptr(unsafe.Pointer(p.next)) { 104 print("runtime: ", unsafe.Pointer(p), " > ", unsafe.Pointer(p.next), "\n") 105 throw("mem: unordered list") 106 } 107 if uintptr(unsafe.Pointer(p))+p.size > uintptr(unsafe.Pointer(p.next)) { 108 print("runtime: ", unsafe.Pointer(p), "+", p.size, " > ", unsafe.Pointer(p.next), "\n") 109 throw("mem: overlapping blocks") 110 } 111 for b := add(unsafe.Pointer(p), unsafe.Sizeof(memHdr{})); uintptr(b) < uintptr(unsafe.Pointer(p))+p.size; b = add(b, 1) { 112 if *(*byte)(b) != 0 { 113 print("runtime: value at addr ", b, " with offset ", uintptr(b)-uintptr(unsafe.Pointer(p)), " in block ", p, " of size ", p.size, " is not zero\n") 114 throw("mem: uninitialised memory") 115 } 116 } 117 } 118 } 119 120 func memRound(p uintptr) uintptr { 121 return (p + _PAGESIZE - 1) &^ (_PAGESIZE - 1) 122 } 123 124 func initBloc() { 125 bloc = memRound(firstmoduledata.end) 126 blocMax = bloc 127 } 128 129 func sbrk(n uintptr) unsafe.Pointer { 130 // Plan 9 sbrk from /sys/src/libc/9sys/sbrk.c 131 bl := bloc 132 n = memRound(n) 133 if bl+n > blocMax { 134 if brk_(unsafe.Pointer(bl+n)) < 0 { 135 return nil 136 } 137 blocMax = bl + n 138 } 139 bloc += n 140 return unsafe.Pointer(bl) 141 } 142 143 func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer { 144 lock(&memlock) 145 p := memAlloc(n) 146 memCheck() 147 unlock(&memlock) 148 if p != nil { 149 sysStat.add(int64(n)) 150 } 151 return p 152 } 153 154 func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { 155 sysStat.add(-int64(n)) 156 lock(&memlock) 157 if uintptr(v)+n == bloc { 158 // Address range being freed is at the end of memory, 159 // so record a new lower value for end of memory. 160 // Can't actually shrink address space because segment is shared. 161 memclrNoHeapPointers(v, n) 162 bloc -= n 163 } else { 164 memFree(v, n) 165 memCheck() 166 } 167 unlock(&memlock) 168 } 169 170 func sysUnused(v unsafe.Pointer, n uintptr) { 171 } 172 173 func sysUsed(v unsafe.Pointer, n uintptr) { 174 } 175 176 func sysHugePage(v unsafe.Pointer, n uintptr) { 177 } 178 179 func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { 180 // sysReserve has already allocated all heap memory, 181 // but has not adjusted stats. 182 sysStat.add(int64(n)) 183 } 184 185 func sysFault(v unsafe.Pointer, n uintptr) { 186 } 187 188 func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer { 189 lock(&memlock) 190 var p unsafe.Pointer 191 if uintptr(v) == bloc { 192 // Address hint is the current end of memory, 193 // so try to extend the address space. 194 p = sbrk(n) 195 } 196 if p == nil && v == nil { 197 p = memAlloc(n) 198 memCheck() 199 } 200 unlock(&memlock) 201 return p 202 }