github.com/primecitizens/pcz/std@v0.2.1/runtime/builtin.go (about)

     1  // SPDX-License-Identifier: Apache-2.0
     2  // Copyright 2023 The Prime Citizens
     3  
     4  //go:build pcz
     5  
     6  package runtime
     7  
     8  import (
     9  	"unsafe"
    10  	_ "unsafe" // for go:linkname
    11  
    12  	stdcomplex "github.com/primecitizens/pcz/std/builtin/complex"
    13  	stdgo "github.com/primecitizens/pcz/std/builtin/go"
    14  	stdprint "github.com/primecitizens/pcz/std/builtin/print"
    15  	stdptr "github.com/primecitizens/pcz/std/builtin/ptr"
    16  	stdtype "github.com/primecitizens/pcz/std/builtin/type"
    17  	"github.com/primecitizens/pcz/std/core/abi"
    18  	"github.com/primecitizens/pcz/std/core/alloc"
    19  	"github.com/primecitizens/pcz/std/core/asan"
    20  	"github.com/primecitizens/pcz/std/core/assert"
    21  	"github.com/primecitizens/pcz/std/core/cover"
    22  	"github.com/primecitizens/pcz/std/core/emu64"
    23  	"github.com/primecitizens/pcz/std/core/fuzz"
    24  	"github.com/primecitizens/pcz/std/core/hash"
    25  	"github.com/primecitizens/pcz/std/core/math"
    26  	"github.com/primecitizens/pcz/std/core/mem"
    27  	"github.com/primecitizens/pcz/std/core/msan"
    28  	"github.com/primecitizens/pcz/std/core/race"
    29  )
    30  
    31  //
    32  // rand
    33  //
    34  
    35  func fastrand() uint32 {
    36  	return getg().G().Rand32()
    37  }
    38  
    39  //
    40  // new
    41  //
    42  
    43  func newobject(typ *abi.Type) unsafe.Pointer {
    44  	if typ.Size_ == 0 {
    45  		return alloc.ZeroSized()
    46  	}
    47  
    48  	return getg().G().DefaultAlloc().Malloc(typ, 1, true)
    49  }
    50  
    51  // > Allocate an object of size bytes.
    52  // > Small objects are allocated from the per-P cache's free lists.
    53  // > Large objects (> 32 kB) are allocated straight from the heap.
    54  //
    55  // See ${GOROOT}/src/runtime/malloc.go#func:mallocgc
    56  func mallocgc(size uintptr, typ *abi.Type, needszero bool) unsafe.Pointer {
    57  	if typ.Size_ == 0 {
    58  		return alloc.ZeroSized()
    59  	}
    60  
    61  	assert.Panic("invalid", "implicit", "allocation")
    62  	return nil
    63  }
    64  
    65  //
    66  // goroutine
    67  //
    68  
    69  func gopanic(v any) {
    70  	assert.Panic(v)
    71  }
    72  
    73  // GoSchedGuarded yields the processor like gosched, but also checks
    74  // for forbidden states and opts out of the yield in those cases.
    75  //
    76  //go:nosplit
    77  func goschedguarded() { stdgo.Sched(true) }
    78  
    79  // The implementation of the predeclared function recover.
    80  // Cannot split the stack because it needs to reliably
    81  // find the stack segment of its caller.
    82  //
    83  // TODO(rsc): Once we commit to CopyStackAlways,
    84  // this doesn't need to be nosplit.
    85  //
    86  //go:nosplit
    87  func gorecover(argp uintptr) any { return stdgo.Recover(argp) }
    88  
    89  //
    90  // print
    91  //
    92  
    93  func printbool(b bool)              { stdprint.PrintBool(b) }
    94  func printfloat(n float64)          { stdprint.PrintFloat(n) }
    95  func printint(n int64)              { stdprint.PrintInt(n) }
    96  func printhex(n uint64)             { stdprint.PrintHex(n) }
    97  func printuint(n uint64)            { stdprint.PrintUint(n) }
    98  func printcomplex(n complex128)     { stdprint.PrintComplex(n) }
    99  func printstring(a string)          { stdprint.PrintString(a) }
   100  func printpointer(p unsafe.Pointer) { stdprint.PrintPointer(p) }
   101  func printuintptr(p uintptr)        { stdprint.PrintUintptr(p) }
   102  func printiface(i stdtype.Iface)    { stdprint.PrintIface(i) }
   103  func printeface(e stdtype.Eface)    { stdprint.PrintEface(e) }
   104  func printslice(s []byte)           { stdprint.PrintSlice(s) }
   105  func printnl()                      { stdprint.PrintNewline() }
   106  func printsp()                      { stdprint.PrintSpace() }
   107  func printlock()                    { stdprint.PrintLock() }
   108  func printunlock()                  { stdprint.PrintUnlock() }
   109  
   110  //
   111  // mem
   112  //
   113  
   114  func memmove(to, from unsafe.Pointer, n uintptr) {
   115  	mem.Move(to, from, n)
   116  }
   117  
   118  // See ${GOROOT}/src/runtime/mbarrier.go#func:typedmemmove
   119  func typedmemmove(typ *abi.Type, dst, src unsafe.Pointer) {
   120  	if dst == src {
   121  		return
   122  	}
   123  	if writeBarrier.needed && typ.PtrBytes != 0 {
   124  		bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes)
   125  	}
   126  	// There's a race here: if some other goroutine can write to
   127  	// src, it may change some pointer in src after we've
   128  	// performed the write barrier but before we perform the
   129  	// memory copy. This safe because the write performed by that
   130  	// other goroutine must also be accompanied by a write
   131  	// barrier, so at worst we've unnecessarily greyed the old
   132  	// pointer that was in src.
   133  	mem.Move(dst, src, typ.Size_)
   134  }
   135  
   136  // See ${GOROOT}/src/runtime/stubs.go#func:memclrNoHeapPointers
   137  func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) {
   138  	mem.Clear(ptr, n)
   139  }
   140  
   141  // See ${GOROOT}/src/runtime/mbarrier.go#func:memclrHasPointers
   142  func memclrHasPointers(ptr unsafe.Pointer, n uintptr) {
   143  	bulkBarrierPreWrite(uintptr(ptr), 0, n)
   144  	mem.Clear(ptr, n)
   145  }
   146  
   147  // typedmemclr clears the typed memory at ptr with type typ. The
   148  // memory at ptr must already be initialized (and hence in type-safe
   149  // state). If the memory is being initialized for the first time, see
   150  // memclrNoHeapPointers.
   151  //
   152  // If the caller knows that typ has pointers, it can alternatively
   153  // call memclrHasPointers.
   154  //
   155  // TODO: A "go:nosplitrec" annotation would be perfect for this.
   156  //
   157  // See ${GOROOT}/src/runtime/mbarrier.go#func:typedmemclr
   158  //
   159  //go:nosplit
   160  func typedmemclr(typ *abi.Type, ptr unsafe.Pointer) {
   161  	if writeBarrier.needed && typ.PtrBytes != 0 {
   162  		bulkBarrierPreWrite(uintptr(ptr), 0, typ.PtrBytes)
   163  	}
   164  
   165  	mem.Clear(ptr, typ.Size_)
   166  }
   167  
   168  // See: ${GOROOT}/src/runtime/mbarrier.go#func:typedslicecopy
   169  func typedslicecopy(typ *abi.Type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe.Pointer, srcLen int) int {
   170  	n := dstLen
   171  	if n > srcLen {
   172  		n = srcLen
   173  	}
   174  	if n == 0 {
   175  		return 0
   176  	}
   177  
   178  	// The compiler emits calls to typedslicecopy before
   179  	// instrumentation runs, so unlike the other copying and
   180  	// assignment operations, it's not instrumented in the calling
   181  	// code and needs its own instrumentation.
   182  	if race.Enabled {
   183  		callerpc := getcallerpc()
   184  		pc := abi.FuncPCABIInternal(slicecopy)
   185  		race.WriteRangePC(dstPtr, uintptr(n)*typ.Size_, callerpc, pc)
   186  		race.ReadRangePC(srcPtr, uintptr(n)*typ.Size_, callerpc, pc)
   187  	}
   188  	if msan.Enabled {
   189  		msan.Write(dstPtr, uintptr(n)*typ.Size_)
   190  		msan.Read(srcPtr, uintptr(n)*typ.Size_)
   191  	}
   192  	if asan.Enabled {
   193  		asan.Write(dstPtr, uintptr(n)*typ.Size_)
   194  		asan.Read(srcPtr, uintptr(n)*typ.Size_)
   195  	}
   196  
   197  	if dstPtr == srcPtr {
   198  		return n
   199  	}
   200  
   201  	// Note: No point in checking typ.PtrBytes here:
   202  	// compiler only emits calls to typedslicecopy for types with pointers,
   203  	// and growslice and reflect_typedslicecopy check for pointers
   204  	// before calling typedslicecopy.
   205  	size := uintptr(n) * typ.Size_
   206  	if writeBarrier.needed {
   207  		pwsize := size - typ.Size_ + typ.PtrBytes
   208  		bulkBarrierPreWrite(uintptr(dstPtr), uintptr(srcPtr), pwsize)
   209  	}
   210  	// See typedmemmove for a discussion of the race between the
   211  	// barrier and memmove.
   212  	mem.Move(dstPtr, srcPtr, size)
   213  	return n
   214  }
   215  
   216  //
   217  // math
   218  //
   219  
   220  func mulUintptr(a, b uintptr) (uintptr, bool) {
   221  	return math.MulUintptr(a, b)
   222  }
   223  
   224  func complex128div(num, den complex128) (quo complex128) {
   225  	return stdcomplex.Complex128Div(num, den)
   226  }
   227  
   228  //
   229  // 64bit emulation for 32bit platforms
   230  //
   231  
   232  func int64div(n int64, d int64) int64     { return emu64.Int64Div(n, d) }
   233  func uint64div(n uint64, d uint64) uint64 { return emu64.Uint64Div(n, d) }
   234  func int64mod(n int64, d int64) int64     { return emu64.Int64Div(n, d) }
   235  func uint64mod(n uint64, d uint64) uint64 { return emu64.Uint64Mod(n, d) }
   236  func float64toint64(n float64) uint64     { return emu64.Float64ToInt64(n) }
   237  func float64touint64(n float64) uint64    { return emu64.Float64ToUint64(n) }
   238  func float64touint32(n float64) uint32    { return emu64.Float64ToUint32(n) }
   239  func int64tofloat64(n int64) float64      { return emu64.Int64ToFloat64(n) }
   240  func int64tofloat32(n int64) float32      { return emu64.Int64ToFloat32(n) }
   241  func uint64tofloat64(n uint64) float64    { return emu64.Uint64ToFloat64(n) }
   242  func uint64tofloat32(n uint64) float32    { return emu64.Uint64ToFloat32(n) }
   243  func uint32tofloat64(n uint32) float64    { return emu64.Uint32ToFloat64(n) }
   244  
   245  //
   246  // caller
   247  //
   248  
   249  // implemented as a compiler intrinsic on all platforms except riscv64
   250  //
   251  // see core/caller for riscv64 source code.
   252  //
   253  //go:linkname getcallerpc
   254  //go:noescape
   255  func getcallerpc() uintptr
   256  
   257  // implemented as a compiler intrinsic on all platforms
   258  //
   259  //go:linkname getcallersp
   260  //go:noescape
   261  func getcallersp() uintptr
   262  
   263  // getclosureptr returns the pointer to the current closure.
   264  // getclosureptr can only be used in an assignment statement
   265  // at the entry of a function. Moreover, go:nosplit directive
   266  // must be specified at the declaration of caller function,
   267  // so that the function prolog does not clobber the closure register.
   268  // for example:
   269  //
   270  //	//go:nosplit
   271  //	func f(arg1, arg2, arg3 int) {
   272  //		dx := getclosureptr()
   273  //	}
   274  //
   275  // The compiler rewrites calls to this function into instructions that fetch the
   276  // pointer from a well-known register (DX on x86 architecture, etc.) directly.
   277  //
   278  //go:linkname getclosureptr
   279  //go:noescape
   280  func getclosureptr() uintptr
   281  
   282  //
   283  // equal
   284  //
   285  
   286  // implemented inside core/mem
   287  func memequal_varlen(p, q unsafe.Pointer) bool
   288  func memequal(p, q unsafe.Pointer, sz uintptr) bool { return mem.Equal(p, q, sz) }
   289  func memequal0(p, q unsafe.Pointer) bool            { return true }
   290  func memequal8(p, q unsafe.Pointer) bool            { return *(*int8)(p) == *(*int8)(q) }
   291  func memequal16(p, q unsafe.Pointer) bool           { return *(*int16)(p) == *(*int16)(q) }
   292  func memequal32(p, q unsafe.Pointer) bool           { return *(*int32)(p) == *(*int32)(q) }
   293  func memequal64(p, q unsafe.Pointer) bool           { return *(*int64)(p) == *(*int64)(q) }
   294  func memequal128(p, q unsafe.Pointer) bool          { return *(*[2]int64)(p) == *(*[2]int64)(q) }
   295  func f32equal(p, q unsafe.Pointer) bool             { return *(*float32)(p) == *(*float32)(q) }
   296  func f64equal(p, q unsafe.Pointer) bool             { return *(*float64)(p) == *(*float64)(q) }
   297  func c64equal(p, q unsafe.Pointer) bool             { return *(*complex64)(p) == *(*complex64)(q) }
   298  func c128equal(p, q unsafe.Pointer) bool            { return *(*complex128)(p) == *(*complex128)(q) }
   299  func strequal(p, q unsafe.Pointer) bool             { return *(*string)(p) == *(*string)(q) }
   300  
   301  func interequal(p, q unsafe.Pointer) bool {
   302  	x := *(*stdtype.Iface)(p)
   303  	y := *(*stdtype.Iface)(q)
   304  	return x.Itab == y.Itab && ifaceeq(x.Itab, x.Data, y.Data)
   305  }
   306  
   307  func nilinterequal(p, q unsafe.Pointer) bool {
   308  	x := *(*stdtype.Eface)(p)
   309  	y := *(*stdtype.Eface)(q)
   310  	return x.Type == y.Type && efaceeq(x.Type, x.Data, y.Data)
   311  }
   312  
   313  //
   314  // hashing
   315  //
   316  
   317  func memhash(p unsafe.Pointer, h uintptr, sz uintptr) uintptr { return hash.MemHash(p, h, sz) }
   318  func memhash0(p unsafe.Pointer, h uintptr) uintptr            { return h }
   319  func memhash8(p unsafe.Pointer, h uintptr) uintptr            { return hash.MemHash(p, h, 1) }
   320  func memhash16(p unsafe.Pointer, h uintptr) uintptr           { return hash.MemHash(p, h, 2) }
   321  func memhash32(p unsafe.Pointer, h uintptr) uintptr           { return hash.MemHash32(p, h) }
   322  func memhash64(p unsafe.Pointer, h uintptr) uintptr           { return hash.MemHash64(p, h) }
   323  func memhash128(p unsafe.Pointer, h uintptr) uintptr          { return hash.MemHash(p, h, 16) }
   324  
   325  // See ${GOROOT}/src/runtime/alg.go#func:memhash_varlen
   326  //
   327  //go:nosplit
   328  func memhash_varlen(p unsafe.Pointer, h uintptr) uintptr {
   329  	ptr := getclosureptr()
   330  	size := *(*uintptr)(unsafe.Pointer(ptr + unsafe.Sizeof(h)))
   331  	return memhash(p, h, size)
   332  }
   333  
   334  func f32hash(p unsafe.Pointer, h uintptr) uintptr      { return hash.Float32Hash(p, h) }
   335  func f64hash(p unsafe.Pointer, h uintptr) uintptr      { return hash.Float64Hash(p, h) }
   336  func c64hash(p unsafe.Pointer, h uintptr) uintptr      { return hash.Complex64Hash(p, h) }
   337  func c128hash(p unsafe.Pointer, h uintptr) uintptr     { return hash.Complex128Hash(p, h) }
   338  func strhash(a unsafe.Pointer, h uintptr) uintptr      { return hash.StringHash(a, h) }
   339  func interhash(p unsafe.Pointer, h uintptr) uintptr    { return hash.InterfaceHash(p, h) }
   340  func nilinterhash(p unsafe.Pointer, h uintptr) uintptr { return hash.NilInterfaceHash(p, h) }
   341  
   342  //
   343  // race detection
   344  //
   345  
   346  func racefuncenter(p uintptr)                        { race.FuncEnter(p) }
   347  func racefuncexit()                                  { race.FuncExit() }
   348  func raceread(p unsafe.Pointer)                      { race.Read(p) }
   349  func racewrite(p unsafe.Pointer)                     { race.Write(p) }
   350  func racereadrange(addr unsafe.Pointer, sz uintptr)  { race.ReadRange(addr, sz) }
   351  func racewriterange(addr unsafe.Pointer, sz uintptr) { race.WriteRange(addr, sz) }
   352  
   353  //
   354  // memory sanitizer
   355  //
   356  
   357  func msanread(addr unsafe.Pointer, sz uintptr)  { msan.Read(addr, sz) }
   358  func msanwrite(addr unsafe.Pointer, sz uintptr) { msan.Write(addr, sz) }
   359  func msanmove(dst, src, sz uintptr)             { msan.Move(dst, src, sz) }
   360  
   361  //
   362  // address sanitizer
   363  //
   364  
   365  func asanread(addr unsafe.Pointer, size uintptr)  { asan.Read(addr, size) }
   366  func asanwrite(addr unsafe.Pointer, size uintptr) { asan.Write(addr, size) }
   367  
   368  //
   369  // ptr
   370  //
   371  
   372  func checkptrAlignment(p unsafe.Pointer, elem *abi.Type, n uintptr) {
   373  	stdptr.CheckAlignment(p, elem, n)
   374  }
   375  
   376  func checkptrArithmetic(p unsafe.Pointer, originals []unsafe.Pointer) {
   377  	stdptr.CheckArithmetic(p, originals)
   378  }
   379  
   380  //
   381  // fuzz
   382  //
   383  
   384  func libfuzzerTraceCmp1(arg0, arg1 uint8, fakepc int)       { fuzz.TraceCmp1(arg0, arg1, fakepc) }
   385  func libfuzzerTraceCmp2(arg0, arg1 uint16, fakepc int)      { fuzz.TraceCmp2(arg0, arg1, fakepc) }
   386  func libfuzzerTraceCmp4(arg0, arg1 uint32, fakepc int)      { fuzz.TraceCmp4(arg0, arg1, fakepc) }
   387  func libfuzzerTraceCmp8(arg0, arg1 uint64, fakepc int)      { fuzz.TraceCmp8(arg0, arg1, fakepc) }
   388  func libfuzzerTraceConstCmp1(arg0, arg1 uint8, fakepc int)  { fuzz.TraceConstCmp1(arg0, arg1, fakepc) }
   389  func libfuzzerTraceConstCmp2(arg0, arg1 uint16, fakepc int) { fuzz.TraceConstCmp2(arg0, arg1, fakepc) }
   390  func libfuzzerTraceConstCmp4(arg0, arg1 uint32, fakepc int) { fuzz.TraceConstCmp4(arg0, arg1, fakepc) }
   391  func libfuzzerTraceConstCmp8(arg0, arg1 uint64, fakepc int) { fuzz.TraceConstCmp8(arg0, arg1, fakepc) }
   392  func libfuzzerHookStrCmp(arg0, arg1 string, fakepc int)     { fuzz.HookStrCmp(arg0, arg1, fakepc) }
   393  func libfuzzerHookEqualFold(arg0, arg1 string, fakepc int)  { fuzz.HookEqualFold(arg0, arg1, fakepc) }
   394  
   395  //
   396  // coverage
   397  //
   398  
   399  func addCovMeta(p unsafe.Pointer, dlen uint32, hash [16]byte, pkpath string, pkid int, cmode uint8, cgran uint8) uint32 {
   400  	return cover.AddCovMeta(p, dlen, hash, pkpath, pkid, cmode, cgran)
   401  }