github.com/ice-blockchain/go/src@v0.0.0-20240403114104-1564d284e521/runtime/syscall_windows.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/abi" 9 "internal/goarch" 10 "unsafe" 11 ) 12 13 // cbs stores all registered Go callbacks. 14 var cbs struct { 15 lock mutex // use cbsLock / cbsUnlock for race instrumentation. 16 ctxt [cb_max]winCallback 17 index map[winCallbackKey]int 18 n int 19 } 20 21 func cbsLock() { 22 lock(&cbs.lock) 23 // compileCallback is used by goenvs prior to completion of schedinit. 24 // raceacquire involves a racecallback to get the proc, which is not 25 // safe prior to scheduler initialization. Thus avoid instrumentation 26 // until then. 27 if raceenabled && mainStarted { 28 raceacquire(unsafe.Pointer(&cbs.lock)) 29 } 30 } 31 32 func cbsUnlock() { 33 if raceenabled && mainStarted { 34 racerelease(unsafe.Pointer(&cbs.lock)) 35 } 36 unlock(&cbs.lock) 37 } 38 39 // winCallback records information about a registered Go callback. 40 type winCallback struct { 41 fn *funcval // Go function 42 retPop uintptr // For 386 cdecl, how many bytes to pop on return 43 abiMap abiDesc 44 } 45 46 // abiPartKind is the action an abiPart should take. 47 type abiPartKind int 48 49 const ( 50 abiPartBad abiPartKind = iota 51 abiPartStack // Move a value from memory to the stack. 52 abiPartReg // Move a value from memory to a register. 53 ) 54 55 // abiPart encodes a step in translating between calling ABIs. 56 type abiPart struct { 57 kind abiPartKind 58 srcStackOffset uintptr 59 dstStackOffset uintptr // used if kind == abiPartStack 60 dstRegister int // used if kind == abiPartReg 61 len uintptr 62 } 63 64 func (a *abiPart) tryMerge(b abiPart) bool { 65 if a.kind != abiPartStack || b.kind != abiPartStack { 66 return false 67 } 68 if a.srcStackOffset+a.len == b.srcStackOffset && a.dstStackOffset+a.len == b.dstStackOffset { 69 a.len += b.len 70 return true 71 } 72 return false 73 } 74 75 // abiDesc specifies how to translate from a C frame to a Go 76 // frame. This does not specify how to translate back because 77 // the result is always a uintptr. If the C ABI is fastcall, 78 // this assumes the four fastcall registers were first spilled 79 // to the shadow space. 80 type abiDesc struct { 81 parts []abiPart 82 83 srcStackSize uintptr // stdcall/fastcall stack space tracking 84 dstStackSize uintptr // Go stack space used 85 dstSpill uintptr // Extra stack space for argument spill slots 86 dstRegisters int // Go ABI int argument registers used 87 88 // retOffset is the offset of the uintptr-sized result in the Go 89 // frame. 90 retOffset uintptr 91 } 92 93 func (p *abiDesc) assignArg(t *_type) { 94 if t.Size_ > goarch.PtrSize { 95 // We don't support this right now. In 96 // stdcall/cdecl, 64-bit ints and doubles are 97 // passed as two words (little endian); and 98 // structs are pushed on the stack. In 99 // fastcall, arguments larger than the word 100 // size are passed by reference. On arm, 101 // 8-byte aligned arguments round up to the 102 // next even register and can be split across 103 // registers and the stack. 104 panic("compileCallback: argument size is larger than uintptr") 105 } 106 if k := t.Kind_ & kindMask; GOARCH != "386" && (k == kindFloat32 || k == kindFloat64) { 107 // In fastcall, floating-point arguments in 108 // the first four positions are passed in 109 // floating-point registers, which we don't 110 // currently spill. arm passes floating-point 111 // arguments in VFP registers, which we also 112 // don't support. 113 // So basically we only support 386. 114 panic("compileCallback: float arguments not supported") 115 } 116 117 if t.Size_ == 0 { 118 // The Go ABI aligns for zero-sized types. 119 p.dstStackSize = alignUp(p.dstStackSize, uintptr(t.Align_)) 120 return 121 } 122 123 // In the C ABI, we're already on a word boundary. 124 // Also, sub-word-sized fastcall register arguments 125 // are stored to the least-significant bytes of the 126 // argument word and all supported Windows 127 // architectures are little endian, so srcStackOffset 128 // is already pointing to the right place for smaller 129 // arguments. The same is true on arm. 130 131 oldParts := p.parts 132 if p.tryRegAssignArg(t, 0) { 133 // Account for spill space. 134 // 135 // TODO(mknyszek): Remove this when we no longer have 136 // caller reserved spill space. 137 p.dstSpill = alignUp(p.dstSpill, uintptr(t.Align_)) 138 p.dstSpill += t.Size_ 139 } else { 140 // Register assignment failed. 141 // Undo the work and stack assign. 142 p.parts = oldParts 143 144 // The Go ABI aligns arguments. 145 p.dstStackSize = alignUp(p.dstStackSize, uintptr(t.Align_)) 146 147 // Copy just the size of the argument. Note that this 148 // could be a small by-value struct, but C and Go 149 // struct layouts are compatible, so we can copy these 150 // directly, too. 151 part := abiPart{ 152 kind: abiPartStack, 153 srcStackOffset: p.srcStackSize, 154 dstStackOffset: p.dstStackSize, 155 len: t.Size_, 156 } 157 // Add this step to the adapter. 158 if len(p.parts) == 0 || !p.parts[len(p.parts)-1].tryMerge(part) { 159 p.parts = append(p.parts, part) 160 } 161 // The Go ABI packs arguments. 162 p.dstStackSize += t.Size_ 163 } 164 165 // cdecl, stdcall, fastcall, and arm pad arguments to word size. 166 // TODO(rsc): On arm and arm64 do we need to skip the caller's saved LR? 167 p.srcStackSize += goarch.PtrSize 168 } 169 170 // tryRegAssignArg tries to register-assign a value of type t. 171 // If this type is nested in an aggregate type, then offset is the 172 // offset of this type within its parent type. 173 // Assumes t.size <= goarch.PtrSize and t.size != 0. 174 // 175 // Returns whether the assignment succeeded. 176 func (p *abiDesc) tryRegAssignArg(t *_type, offset uintptr) bool { 177 switch k := t.Kind_ & kindMask; k { 178 case kindBool, kindInt, kindInt8, kindInt16, kindInt32, kindUint, kindUint8, kindUint16, kindUint32, kindUintptr, kindPtr, kindUnsafePointer: 179 // Assign a register for all these types. 180 return p.assignReg(t.Size_, offset) 181 case kindInt64, kindUint64: 182 // Only register-assign if the registers are big enough. 183 if goarch.PtrSize == 8 { 184 return p.assignReg(t.Size_, offset) 185 } 186 case kindArray: 187 at := (*arraytype)(unsafe.Pointer(t)) 188 if at.Len == 1 { 189 return p.tryRegAssignArg(at.Elem, offset) // TODO fix when runtime is fully commoned up w/ abi.Type 190 } 191 case kindStruct: 192 st := (*structtype)(unsafe.Pointer(t)) 193 for i := range st.Fields { 194 f := &st.Fields[i] 195 if !p.tryRegAssignArg(f.Typ, offset+f.Offset) { 196 return false 197 } 198 } 199 return true 200 } 201 // Pointer-sized types such as maps and channels are currently 202 // not supported. 203 panic("compileCallback: type " + toRType(t).string() + " is currently not supported for use in system callbacks") 204 } 205 206 // assignReg attempts to assign a single register for an 207 // argument with the given size, at the given offset into the 208 // value in the C ABI space. 209 // 210 // Returns whether the assignment was successful. 211 func (p *abiDesc) assignReg(size, offset uintptr) bool { 212 if p.dstRegisters >= intArgRegs { 213 return false 214 } 215 p.parts = append(p.parts, abiPart{ 216 kind: abiPartReg, 217 srcStackOffset: p.srcStackSize + offset, 218 dstRegister: p.dstRegisters, 219 len: size, 220 }) 221 p.dstRegisters++ 222 return true 223 } 224 225 type winCallbackKey struct { 226 fn *funcval 227 cdecl bool 228 } 229 230 func callbackasm() 231 232 // callbackasmAddr returns address of runtime.callbackasm 233 // function adjusted by i. 234 // On x86 and amd64, runtime.callbackasm is a series of CALL instructions, 235 // and we want callback to arrive at 236 // correspondent call instruction instead of start of 237 // runtime.callbackasm. 238 // On ARM, runtime.callbackasm is a series of mov and branch instructions. 239 // R12 is loaded with the callback index. Each entry is two instructions, 240 // hence 8 bytes. 241 func callbackasmAddr(i int) uintptr { 242 var entrySize int 243 switch GOARCH { 244 default: 245 panic("unsupported architecture") 246 case "386", "amd64": 247 entrySize = 5 248 case "arm", "arm64": 249 // On ARM and ARM64, each entry is a MOV instruction 250 // followed by a branch instruction 251 entrySize = 8 252 } 253 return abi.FuncPCABI0(callbackasm) + uintptr(i*entrySize) 254 } 255 256 const callbackMaxFrame = 64 * goarch.PtrSize 257 258 // compileCallback converts a Go function fn into a C function pointer 259 // that can be passed to Windows APIs. 260 // 261 // On 386, if cdecl is true, the returned C function will use the 262 // cdecl calling convention; otherwise, it will use stdcall. On amd64, 263 // it always uses fastcall. On arm, it always uses the ARM convention. 264 // 265 //go:linkname compileCallback syscall.compileCallback 266 func compileCallback(fn eface, cdecl bool) (code uintptr) { 267 if GOARCH != "386" { 268 // cdecl is only meaningful on 386. 269 cdecl = false 270 } 271 272 if fn._type == nil || (fn._type.Kind_&kindMask) != kindFunc { 273 panic("compileCallback: expected function with one uintptr-sized result") 274 } 275 ft := (*functype)(unsafe.Pointer(fn._type)) 276 277 // Check arguments and construct ABI translation. 278 var abiMap abiDesc 279 for _, t := range ft.InSlice() { 280 abiMap.assignArg(t) 281 } 282 // The Go ABI aligns the result to the word size. src is 283 // already aligned. 284 abiMap.dstStackSize = alignUp(abiMap.dstStackSize, goarch.PtrSize) 285 abiMap.retOffset = abiMap.dstStackSize 286 287 if len(ft.OutSlice()) != 1 { 288 panic("compileCallback: expected function with one uintptr-sized result") 289 } 290 if ft.OutSlice()[0].Size_ != goarch.PtrSize { 291 panic("compileCallback: expected function with one uintptr-sized result") 292 } 293 if k := ft.OutSlice()[0].Kind_ & kindMask; k == kindFloat32 || k == kindFloat64 { 294 // In cdecl and stdcall, float results are returned in 295 // ST(0). In fastcall, they're returned in XMM0. 296 // Either way, it's not AX. 297 panic("compileCallback: float results not supported") 298 } 299 if intArgRegs == 0 { 300 // Make room for the uintptr-sized result. 301 // If there are argument registers, the return value will 302 // be passed in the first register. 303 abiMap.dstStackSize += goarch.PtrSize 304 } 305 306 // TODO(mknyszek): Remove dstSpill from this calculation when we no longer have 307 // caller reserved spill space. 308 frameSize := alignUp(abiMap.dstStackSize, goarch.PtrSize) 309 frameSize += abiMap.dstSpill 310 if frameSize > callbackMaxFrame { 311 panic("compileCallback: function argument frame too large") 312 } 313 314 // For cdecl, the callee is responsible for popping its 315 // arguments from the C stack. 316 var retPop uintptr 317 if cdecl { 318 retPop = abiMap.srcStackSize 319 } 320 321 key := winCallbackKey{(*funcval)(fn.data), cdecl} 322 323 cbsLock() 324 325 // Check if this callback is already registered. 326 if n, ok := cbs.index[key]; ok { 327 cbsUnlock() 328 return callbackasmAddr(n) 329 } 330 331 // Register the callback. 332 if cbs.index == nil { 333 cbs.index = make(map[winCallbackKey]int) 334 } 335 n := cbs.n 336 if n >= len(cbs.ctxt) { 337 cbsUnlock() 338 throw("too many callback functions") 339 } 340 c := winCallback{key.fn, retPop, abiMap} 341 cbs.ctxt[n] = c 342 cbs.index[key] = n 343 cbs.n++ 344 345 cbsUnlock() 346 return callbackasmAddr(n) 347 } 348 349 type callbackArgs struct { 350 index uintptr 351 // args points to the argument block. 352 // 353 // For cdecl and stdcall, all arguments are on the stack. 354 // 355 // For fastcall, the trampoline spills register arguments to 356 // the reserved spill slots below the stack arguments, 357 // resulting in a layout equivalent to stdcall. 358 // 359 // For arm, the trampoline stores the register arguments just 360 // below the stack arguments, so again we can treat it as one 361 // big stack arguments frame. 362 args unsafe.Pointer 363 // Below are out-args from callbackWrap 364 result uintptr 365 retPop uintptr // For 386 cdecl, how many bytes to pop on return 366 } 367 368 // callbackWrap is called by callbackasm to invoke a registered C callback. 369 func callbackWrap(a *callbackArgs) { 370 c := cbs.ctxt[a.index] 371 a.retPop = c.retPop 372 373 // Convert from C to Go ABI. 374 var regs abi.RegArgs 375 var frame [callbackMaxFrame]byte 376 goArgs := unsafe.Pointer(&frame) 377 for _, part := range c.abiMap.parts { 378 switch part.kind { 379 case abiPartStack: 380 memmove(add(goArgs, part.dstStackOffset), add(a.args, part.srcStackOffset), part.len) 381 case abiPartReg: 382 goReg := unsafe.Pointer(®s.Ints[part.dstRegister]) 383 memmove(goReg, add(a.args, part.srcStackOffset), part.len) 384 default: 385 panic("bad ABI description") 386 } 387 } 388 389 // TODO(mknyszek): Remove this when we no longer have 390 // caller reserved spill space. 391 frameSize := alignUp(c.abiMap.dstStackSize, goarch.PtrSize) 392 frameSize += c.abiMap.dstSpill 393 394 // Even though this is copying back results, we can pass a nil 395 // type because those results must not require write barriers. 396 reflectcall(nil, unsafe.Pointer(c.fn), noescape(goArgs), uint32(c.abiMap.dstStackSize), uint32(c.abiMap.retOffset), uint32(frameSize), ®s) 397 398 // Extract the result. 399 // 400 // There's always exactly one return value, one pointer in size. 401 // If it's on the stack, then we will have reserved space for it 402 // at the end of the frame, otherwise it was passed in a register. 403 if c.abiMap.dstStackSize != c.abiMap.retOffset { 404 a.result = *(*uintptr)(unsafe.Pointer(&frame[c.abiMap.retOffset])) 405 } else { 406 var zero int 407 // On architectures with no registers, Ints[0] would be a compile error, 408 // so we use a dynamic index. These architectures will never take this 409 // branch, so this won't cause a runtime panic. 410 a.result = regs.Ints[zero] 411 } 412 } 413 414 const _LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800 415 416 //go:linkname syscall_loadsystemlibrary syscall.loadsystemlibrary 417 func syscall_loadsystemlibrary(filename *uint16) (handle, err uintptr) { 418 handle, _, err = syscall_SyscallN(uintptr(unsafe.Pointer(_LoadLibraryExW)), uintptr(unsafe.Pointer(filename)), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32) 419 KeepAlive(filename) 420 if handle != 0 { 421 err = 0 422 } 423 return 424 } 425 426 //go:linkname syscall_loadlibrary syscall.loadlibrary 427 func syscall_loadlibrary(filename *uint16) (handle, err uintptr) { 428 handle, _, err = syscall_SyscallN(uintptr(unsafe.Pointer(_LoadLibraryW)), uintptr(unsafe.Pointer(filename))) 429 KeepAlive(filename) 430 if handle != 0 { 431 err = 0 432 } 433 return 434 } 435 436 //go:linkname syscall_getprocaddress syscall.getprocaddress 437 func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uintptr) { 438 outhandle, _, err = syscall_SyscallN(uintptr(unsafe.Pointer(_GetProcAddress)), handle, uintptr(unsafe.Pointer(procname))) 439 KeepAlive(procname) 440 if outhandle != 0 { 441 err = 0 442 } 443 return 444 } 445 446 //go:linkname syscall_Syscall syscall.Syscall 447 //go:nosplit 448 func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) { 449 args := [...]uintptr{a1, a2, a3} 450 return syscall_SyscallN(fn, args[:nargs]...) 451 } 452 453 //go:linkname syscall_Syscall6 syscall.Syscall6 454 //go:nosplit 455 func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { 456 args := [...]uintptr{a1, a2, a3, a4, a5, a6} 457 return syscall_SyscallN(fn, args[:nargs]...) 458 } 459 460 //go:linkname syscall_Syscall9 syscall.Syscall9 461 //go:nosplit 462 func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) { 463 args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9} 464 return syscall_SyscallN(fn, args[:nargs]...) 465 } 466 467 //go:linkname syscall_Syscall12 syscall.Syscall12 468 //go:nosplit 469 func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2, err uintptr) { 470 args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12} 471 return syscall_SyscallN(fn, args[:nargs]...) 472 } 473 474 //go:linkname syscall_Syscall15 syscall.Syscall15 475 //go:nosplit 476 func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { 477 args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15} 478 return syscall_SyscallN(fn, args[:nargs]...) 479 } 480 481 //go:linkname syscall_Syscall18 syscall.Syscall18 482 //go:nosplit 483 func syscall_Syscall18(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2, err uintptr) { 484 args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18} 485 return syscall_SyscallN(fn, args[:nargs]...) 486 } 487 488 // maxArgs should be divisible by 2, as Windows stack 489 // must be kept 16-byte aligned on syscall entry. 490 // 491 // Although it only permits maximum 42 parameters, it 492 // is arguably large enough. 493 const maxArgs = 42 494 495 //go:linkname syscall_SyscallN syscall.SyscallN 496 //go:nosplit 497 func syscall_SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) { 498 if len(args) > maxArgs { 499 panic("runtime: SyscallN has too many arguments") 500 } 501 502 // The cgocall parameters are stored in m instead of in 503 // the stack because the stack can move during if fn 504 // calls back into Go. 505 lockOSThread() 506 defer unlockOSThread() 507 c := &getg().m.syscall 508 c.fn = fn 509 c.n = uintptr(len(args)) 510 if c.n != 0 { 511 c.args = uintptr(noescape(unsafe.Pointer(&args[0]))) 512 } 513 cgocall(asmstdcallAddr, unsafe.Pointer(c)) 514 return c.r1, c.r2, c.err 515 }