modernc.org/memory@v1.8.0/memory.go (about) 1 // Copyright 2017 The Memory Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Package memory implements a memory allocator. 6 // 7 // # Build status 8 // 9 // available at https://modern-c.appspot.com/-/builder/?importpath=modernc.org%2fmemory 10 // 11 // # Changelog 12 // 13 // 2017-10-03 Added alternative, unsafe.Pointer-based API. 14 // 15 // Package memory implements a memory allocator. 16 // 17 // # Changelog 18 // 19 // 2017-10-03 Added alternative, unsafe.Pointer-based API. 20 // 21 // # Benchmarks 22 // 23 // jnml@3900x:~/src/modernc.org/memory$ date ; go version ; go test -run @ -bench . -benchmem |& tee log 24 // Mon Sep 25 16:02:02 CEST 2023 25 // go version go1.21.1 linux/amd64 26 // goos: linux 27 // goarch: amd64 28 // pkg: modernc.org/memory 29 // cpu: AMD Ryzen 9 3900X 12-Core Processor 30 // BenchmarkFree16-24 123506772 9.802 ns/op 0 B/op 0 allocs/op 31 // BenchmarkFree32-24 73853230 15.08 ns/op 0 B/op 0 allocs/op 32 // BenchmarkFree64-24 43070334 25.15 ns/op 0 B/op 0 allocs/op 33 // BenchmarkCalloc16-24 59353304 18.92 ns/op 0 B/op 0 allocs/op 34 // BenchmarkCalloc32-24 39415004 29.00 ns/op 0 B/op 0 allocs/op 35 // BenchmarkCalloc64-24 35825725 32.02 ns/op 0 B/op 0 allocs/op 36 // BenchmarkGoCalloc16-24 38274313 26.99 ns/op 16 B/op 1 allocs/op 37 // BenchmarkGoCalloc32-24 44590477 33.06 ns/op 32 B/op 1 allocs/op 38 // BenchmarkGoCalloc64-24 44233016 37.20 ns/op 64 B/op 1 allocs/op 39 // BenchmarkMalloc16-24 145736911 7.720 ns/op 0 B/op 0 allocs/op 40 // BenchmarkMalloc32-24 128898334 7.887 ns/op 0 B/op 0 allocs/op 41 // BenchmarkMalloc64-24 149569483 7.994 ns/op 0 B/op 0 allocs/op 42 // BenchmarkUintptrFree16-24 117043012 9.205 ns/op 0 B/op 0 allocs/op 43 // BenchmarkUintptrFree32-24 77399617 14.20 ns/op 0 B/op 0 allocs/op 44 // BenchmarkUintptrFree64-24 48770785 25.04 ns/op 0 B/op 0 allocs/op 45 // BenchmarkUintptrCalloc16-24 79257636 15.44 ns/op 0 B/op 0 allocs/op 46 // BenchmarkUintptrCalloc32-24 49644562 23.62 ns/op 0 B/op 0 allocs/op 47 // BenchmarkUintptrCalloc64-24 39854710 28.22 ns/op 0 B/op 0 allocs/op 48 // BenchmarkUintptrMalloc16-24 252987727 4.525 ns/op 0 B/op 0 allocs/op 49 // BenchmarkUintptrMalloc32-24 241423840 4.433 ns/op 0 B/op 0 allocs/op 50 // BenchmarkUintptrMalloc64-24 256450324 4.669 ns/op 0 B/op 0 allocs/op 51 // PASS 52 // ok modernc.org/memory 93.178s 53 // jnml@3900x:~/src/modernc.org/memory$ 54 package memory // import "modernc.org/memory" 55 56 import ( 57 "fmt" 58 "math/bits" 59 "os" 60 "reflect" 61 "unsafe" 62 ) 63 64 const ( 65 headerSize = unsafe.Sizeof(page{}) 66 mallocAllign = 2 * unsafe.Sizeof(uintptr(0)) 67 maxSlotSize = 1 << maxSlotSizeLog 68 maxSlotSizeLog = pageSizeLog - 2 69 pageAvail = pageSize - headerSize 70 pageMask = pageSize - 1 71 pageSize = 1 << pageSizeLog 72 ) 73 74 func init() { 75 if unsafe.Sizeof(page{})%mallocAllign != 0 { 76 panic("internal error") 77 } 78 } 79 80 // if n%m != 0 { n += m-n%m }. m must be a power of 2. 81 func roundup(n, m int) int { return (n + m - 1) &^ (m - 1) } 82 83 type node struct { 84 prev, next uintptr // *node 85 } 86 87 type page struct { 88 brk int 89 log uint 90 size int 91 used int 92 } 93 94 // Allocator allocates and frees memory. Its zero value is ready for use. The 95 // exported counters are updated only when build tag memory.counters is 96 // present. 97 type Allocator struct { 98 Allocs int // # of allocs. 99 Bytes int // Asked from OS. 100 cap [64]int 101 lists [64]uintptr // *node 102 Mmaps int // Asked from OS. 103 pages [64]uintptr // *page 104 regs map[uintptr]struct{} // map[*page]struct{} 105 } 106 107 func (a *Allocator) mmap(size int) (uintptr /* *page */, error) { 108 p, size, err := mmap(size) 109 if err != nil { 110 return 0, err 111 } 112 113 if counters { 114 a.Mmaps++ 115 a.Bytes += size 116 } 117 if a.regs == nil { 118 a.regs = map[uintptr]struct{}{} 119 } 120 (*page)(unsafe.Pointer(p)).size = size 121 a.regs[p] = struct{}{} 122 return p, nil 123 } 124 125 func (a *Allocator) newPage(size int) (uintptr /* *page */, error) { 126 size += int(headerSize) 127 p, err := a.mmap(size) 128 if err != nil { 129 return 0, err 130 } 131 132 (*page)(unsafe.Pointer(p)).log = 0 133 return p, nil 134 } 135 136 func (a *Allocator) newSharedPage(log uint) (uintptr /* *page */, error) { 137 if a.cap[log] == 0 { 138 a.cap[log] = int(pageAvail) / (1 << log) 139 } 140 size := int(headerSize) + a.cap[log]<<log 141 p, err := a.mmap(size) 142 if err != nil { 143 return 0, err 144 } 145 146 a.pages[log] = p 147 (*page)(unsafe.Pointer(p)).log = log 148 return p, nil 149 } 150 151 func (a *Allocator) unmap(p uintptr /* *page */) error { 152 delete(a.regs, p) 153 if counters { 154 a.Mmaps-- 155 } 156 return unmap(p, (*page)(unsafe.Pointer(p)).size) 157 } 158 159 // UintptrCalloc is like Calloc except it returns an uintptr. 160 func (a *Allocator) UintptrCalloc(size int) (r uintptr, err error) { 161 if trace { 162 defer func() { 163 fmt.Fprintf(os.Stderr, "Calloc(%#x) %#x, %v\n", size, r, err) 164 }() 165 } 166 if r, err = a.UintptrMalloc(size); r == 0 || err != nil { 167 return 0, err 168 } 169 b := ((*rawmem)(unsafe.Pointer(r)))[:size:size] 170 for i := range b { 171 b[i] = 0 172 } 173 return r, nil 174 } 175 176 // UintptrFree is like Free except its argument is an uintptr, which must have 177 // been acquired from UintptrCalloc or UintptrMalloc or UintptrRealloc. 178 func (a *Allocator) UintptrFree(p uintptr) (err error) { 179 if trace { 180 defer func() { 181 fmt.Fprintf(os.Stderr, "Free(%#x) %v\n", p, err) 182 }() 183 } 184 if p == 0 { 185 return nil 186 } 187 188 if counters { 189 a.Allocs-- 190 } 191 pg := p &^ uintptr(pageMask) 192 log := (*page)(unsafe.Pointer(pg)).log 193 if log == 0 { 194 if counters { 195 a.Bytes -= (*page)(unsafe.Pointer(pg)).size 196 } 197 return a.unmap(pg) 198 } 199 200 (*node)(unsafe.Pointer(p)).prev = 0 201 (*node)(unsafe.Pointer(p)).next = a.lists[log] 202 if next := (*node)(unsafe.Pointer(p)).next; next != 0 { 203 (*node)(unsafe.Pointer(next)).prev = p 204 } 205 a.lists[log] = p 206 (*page)(unsafe.Pointer(pg)).used-- 207 if (*page)(unsafe.Pointer(pg)).used != 0 { 208 return nil 209 } 210 211 for i := 0; i < (*page)(unsafe.Pointer(pg)).brk; i++ { 212 n := pg + headerSize + uintptr(i)<<log 213 next := (*node)(unsafe.Pointer(n)).next 214 prev := (*node)(unsafe.Pointer(n)).prev 215 switch { 216 case prev == 0: 217 a.lists[log] = next 218 if next != 0 { 219 (*node)(unsafe.Pointer(next)).prev = 0 220 } 221 case next == 0: 222 (*node)(unsafe.Pointer(prev)).next = 0 223 default: 224 (*node)(unsafe.Pointer(prev)).next = next 225 (*node)(unsafe.Pointer(next)).prev = prev 226 } 227 } 228 229 if a.pages[log] == pg { 230 a.pages[log] = 0 231 } 232 if counters { 233 a.Bytes -= (*page)(unsafe.Pointer(pg)).size 234 } 235 return a.unmap(pg) 236 } 237 238 // UintptrMalloc is like Malloc except it returns an uinptr. 239 func (a *Allocator) UintptrMalloc(size int) (r uintptr, err error) { 240 if trace { 241 defer func() { 242 fmt.Fprintf(os.Stderr, "Malloc(%#x) %#x, %v\n", size, r, err) 243 }() 244 } 245 if size < 0 { 246 panic("invalid malloc size") 247 } 248 249 if size == 0 { 250 return 0, nil 251 } 252 253 if counters { 254 a.Allocs++ 255 } 256 log := uint(bits.Len(uint((size+int(mallocAllign)-1)&^int(mallocAllign-1) - 1))) 257 if log > maxSlotSizeLog { 258 p, err := a.newPage(size) 259 if err != nil { 260 return 0, err 261 } 262 263 return p + headerSize, nil 264 } 265 266 if a.lists[log] == 0 && a.pages[log] == 0 { 267 if _, err := a.newSharedPage(log); err != nil { 268 return 0, err 269 } 270 } 271 272 if p := a.pages[log]; p != 0 { 273 (*page)(unsafe.Pointer(p)).used++ 274 (*page)(unsafe.Pointer(p)).brk++ 275 if (*page)(unsafe.Pointer(p)).brk == a.cap[log] { 276 a.pages[log] = 0 277 } 278 return p + headerSize + uintptr((*page)(unsafe.Pointer(p)).brk-1)<<log, nil 279 } 280 281 n := a.lists[log] 282 p := n &^ uintptr(pageMask) 283 a.lists[log] = (*node)(unsafe.Pointer(n)).next 284 if next := (*node)(unsafe.Pointer(n)).next; next != 0 { 285 (*node)(unsafe.Pointer(next)).prev = 0 286 } 287 (*page)(unsafe.Pointer(p)).used++ 288 return n, nil 289 } 290 291 // UintptrRealloc is like Realloc except its first argument is an uintptr, 292 // which must have been returned from UintptrCalloc, UintptrMalloc or 293 // UintptrRealloc. 294 func (a *Allocator) UintptrRealloc(p uintptr, size int) (r uintptr, err error) { 295 if trace { 296 defer func() { 297 fmt.Fprintf(os.Stderr, "UnsafeRealloc(%#x, %#x) %#x, %v\n", p, size, r, err) 298 }() 299 } 300 switch { 301 case p == 0: 302 return a.UintptrMalloc(size) 303 case size == 0 && p != 0: 304 return 0, a.UintptrFree(p) 305 } 306 307 us := UintptrUsableSize(p) 308 if us >= size { 309 return p, nil 310 } 311 312 if r, err = a.UintptrMalloc(size); err != nil { 313 return 0, err 314 } 315 316 if us < size { 317 size = us 318 } 319 copy((*rawmem)(unsafe.Pointer(r))[:size:size], (*rawmem)(unsafe.Pointer(p))[:size:size]) 320 return r, a.UintptrFree(p) 321 } 322 323 // UintptrUsableSize is like UsableSize except its argument is an uintptr, 324 // which must have been returned from UintptrCalloc, UintptrMalloc or 325 // UintptrRealloc. 326 func UintptrUsableSize(p uintptr) (r int) { 327 if trace { 328 defer func() { 329 fmt.Fprintf(os.Stderr, "UsableSize(%#x) %#x\n", p, r) 330 }() 331 } 332 if p == 0 { 333 return 0 334 } 335 336 return usableSize(p) 337 } 338 339 func usableSize(p uintptr) (r int) { 340 pg := p &^ uintptr(pageMask) 341 if log := (*page)(unsafe.Pointer(pg)).log; log != 0 { 342 return 1 << log 343 } 344 345 return (*page)(unsafe.Pointer(pg)).size - int(headerSize) 346 } 347 348 // Calloc is like Malloc except the allocated memory is zeroed. 349 func (a *Allocator) Calloc(size int) (r []byte, err error) { 350 p, err := a.UintptrCalloc(size) 351 if err != nil { 352 return nil, err 353 } 354 355 var b []byte 356 sh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) 357 sh.Cap = usableSize(p) 358 sh.Data = p 359 sh.Len = size 360 return b, nil 361 } 362 363 // Close releases all OS resources used by a and sets it to its zero value. 364 // 365 // It's not necessary to Close the Allocator when exiting a process. 366 func (a *Allocator) Close() (err error) { 367 for p := range a.regs { 368 if e := a.unmap(p); e != nil && err == nil { 369 err = e 370 } 371 } 372 *a = Allocator{} 373 return err 374 } 375 376 // Free deallocates memory (as in C.free). The argument of Free must have been 377 // acquired from Calloc or Malloc or Realloc. 378 func (a *Allocator) Free(b []byte) (err error) { 379 if b = b[:cap(b)]; len(b) == 0 { 380 return nil 381 } 382 383 return a.UintptrFree(uintptr(unsafe.Pointer(&b[0]))) 384 } 385 386 // Malloc allocates size bytes and returns a byte slice of the allocated 387 // memory. The memory is not initialized. Malloc panics for size < 0 and 388 // returns (nil, nil) for zero size. 389 // 390 // It's ok to reslice the returned slice but the result of appending to it 391 // cannot be passed to Free or Realloc as it may refer to a different backing 392 // array afterwards. 393 func (a *Allocator) Malloc(size int) (r []byte, err error) { 394 p, err := a.UintptrMalloc(size) 395 if p == 0 || err != nil { 396 return nil, err 397 } 398 399 sh := (*reflect.SliceHeader)(unsafe.Pointer(&r)) 400 sh.Cap = usableSize(p) 401 sh.Data = p 402 sh.Len = size 403 return r, nil 404 } 405 406 // Realloc changes the size of the backing array of b to size bytes or returns 407 // an error, if any. The contents will be unchanged in the range from the 408 // start of the region up to the minimum of the old and new sizes. If the 409 // new size is larger than the old size, the added memory will not be 410 // initialized. If b's backing array is of zero size, then the call is 411 // equivalent to Malloc(size), for all values of size; if size is equal to 412 // zero, and b's backing array is not of zero size, then the call is equivalent 413 // to Free(b). Unless b's backing array is of zero size, it must have been 414 // returned by an earlier call to Malloc, Calloc or Realloc. If the area 415 // pointed to was moved, a Free(b) is done. 416 func (a *Allocator) Realloc(b []byte, size int) (r []byte, err error) { 417 var p uintptr 418 if b = b[:cap(b)]; len(b) != 0 { 419 p = uintptr(unsafe.Pointer(&b[0])) 420 } 421 if p, err = a.UintptrRealloc(p, size); p == 0 || err != nil { 422 return nil, err 423 } 424 425 sh := (*reflect.SliceHeader)(unsafe.Pointer(&r)) 426 sh.Cap = usableSize(p) 427 sh.Data = p 428 sh.Len = size 429 return r, nil 430 } 431 432 // UsableSize reports the size of the memory block allocated at p, which must 433 // point to the first byte of a slice returned from Calloc, Malloc or Realloc. 434 // The allocated memory block size can be larger than the size originally 435 // requested from Calloc, Malloc or Realloc. 436 func UsableSize(p *byte) (r int) { return UintptrUsableSize(uintptr(unsafe.Pointer(p))) } 437 438 // UnsafeCalloc is like Calloc except it returns an unsafe.Pointer. 439 func (a *Allocator) UnsafeCalloc(size int) (r unsafe.Pointer, err error) { 440 p, err := a.UintptrCalloc(size) 441 if err != nil { 442 return nil, err 443 } 444 445 return unsafe.Pointer(p), nil 446 } 447 448 // UnsafeFree is like Free except its argument is an unsafe.Pointer, which must 449 // have been acquired from UnsafeCalloc or UnsafeMalloc or UnsafeRealloc. 450 func (a *Allocator) UnsafeFree(p unsafe.Pointer) (err error) { return a.UintptrFree(uintptr(p)) } 451 452 // UnsafeMalloc is like Malloc except it returns an unsafe.Pointer. 453 func (a *Allocator) UnsafeMalloc(size int) (r unsafe.Pointer, err error) { 454 p, err := a.UintptrMalloc(size) 455 if err != nil { 456 return nil, err 457 } 458 459 return unsafe.Pointer(p), nil 460 } 461 462 // UnsafeRealloc is like Realloc except its first argument is an 463 // unsafe.Pointer, which must have been returned from UnsafeCalloc, 464 // UnsafeMalloc or UnsafeRealloc. 465 func (a *Allocator) UnsafeRealloc(p unsafe.Pointer, size int) (r unsafe.Pointer, err error) { 466 q, err := a.UintptrRealloc(uintptr(p), size) 467 if err != nil { 468 return nil, err 469 } 470 471 return unsafe.Pointer(q), nil 472 } 473 474 // UnsafeUsableSize is like UsableSize except its argument is an 475 // unsafe.Pointer, which must have been returned from UnsafeCalloc, 476 // UnsafeMalloc or UnsafeRealloc. 477 func UnsafeUsableSize(p unsafe.Pointer) (r int) { return UintptrUsableSize(uintptr(p)) }