github.com/mtsmfm/go/src@v0.0.0-20221020090648-44bdcb9f8fde/runtime/lfstack_64bit.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 //go:build amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm 6 7 package runtime 8 9 import "unsafe" 10 11 const ( 12 // addrBits is the number of bits needed to represent a virtual address. 13 // 14 // See heapAddrBits for a table of address space sizes on 15 // various architectures. 48 bits is enough for all 16 // architectures except s390x. 17 // 18 // On AMD64, virtual addresses are 48-bit (or 57-bit) numbers sign extended to 64. 19 // We shift the address left 16 to eliminate the sign extended part and make 20 // room in the bottom for the count. 21 // 22 // On s390x, virtual addresses are 64-bit. There's not much we 23 // can do about this, so we just hope that the kernel doesn't 24 // get to really high addresses and panic if it does. 25 addrBits = 48 26 27 // In addition to the 16 bits taken from the top, we can take 3 from the 28 // bottom, because node must be pointer-aligned, giving a total of 19 bits 29 // of count. 30 cntBits = 64 - addrBits + 3 31 32 // On AIX, 64-bit addresses are split into 36-bit segment number and 28-bit 33 // offset in segment. Segment numbers in the range 0x0A0000000-0x0AFFFFFFF(LSA) 34 // are available for mmap. 35 // We assume all lfnode addresses are from memory allocated with mmap. 36 // We use one bit to distinguish between the two ranges. 37 aixAddrBits = 57 38 aixCntBits = 64 - aixAddrBits + 3 39 40 // riscv64 SV57 mode gives 56 bits of userspace VA. 41 // lfstack code supports it, but broader support for SV57 mode is incomplete, 42 // and there may be other issues (see #54104). 43 riscv64AddrBits = 56 44 riscv64CntBits = 64 - riscv64AddrBits + 3 45 ) 46 47 func lfstackPack(node *lfnode, cnt uintptr) uint64 { 48 if GOARCH == "ppc64" && GOOS == "aix" { 49 return uint64(uintptr(unsafe.Pointer(node)))<<(64-aixAddrBits) | uint64(cnt&(1<<aixCntBits-1)) 50 } 51 if GOARCH == "riscv64" { 52 return uint64(uintptr(unsafe.Pointer(node)))<<(64-riscv64AddrBits) | uint64(cnt&(1<<riscv64CntBits-1)) 53 } 54 return uint64(uintptr(unsafe.Pointer(node)))<<(64-addrBits) | uint64(cnt&(1<<cntBits-1)) 55 } 56 57 func lfstackUnpack(val uint64) *lfnode { 58 if GOARCH == "amd64" { 59 // amd64 systems can place the stack above the VA hole, so we need to sign extend 60 // val before unpacking. 61 return (*lfnode)(unsafe.Pointer(uintptr(int64(val) >> cntBits << 3))) 62 } 63 if GOARCH == "ppc64" && GOOS == "aix" { 64 return (*lfnode)(unsafe.Pointer(uintptr((val >> aixCntBits << 3) | 0xa<<56))) 65 } 66 if GOARCH == "riscv64" { 67 return (*lfnode)(unsafe.Pointer(uintptr(val >> riscv64CntBits << 3))) 68 } 69 return (*lfnode)(unsafe.Pointer(uintptr(val >> cntBits << 3))) 70 }