github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/runtime/runtime1.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/bytealg" 9 "internal/goarch" 10 "runtime/internal/atomic" 11 "unsafe" 12 ) 13 14 // Keep a cached value to make gotraceback fast, 15 // since we call it on every call to gentraceback. 16 // The cached value is a uint32 in which the low bits 17 // are the "crash" and "all" settings and the remaining 18 // bits are the traceback value (0 off, 1 on, 2 include system). 19 const ( 20 tracebackCrash = 1 << iota 21 tracebackAll 22 tracebackShift = iota 23 ) 24 25 var traceback_cache uint32 = 2 << tracebackShift 26 var traceback_env uint32 27 28 // gotraceback returns the current traceback settings. 29 // 30 // If level is 0, suppress all tracebacks. 31 // If level is 1, show tracebacks, but exclude runtime frames. 32 // If level is 2, show tracebacks including runtime frames. 33 // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine. 34 // If crash is set, crash (core dump, etc) after tracebacking. 35 // 36 //go:nosplit 37 func gotraceback() (level int32, all, crash bool) { 38 gp := getg() 39 t := atomic.Load(&traceback_cache) 40 crash = t&tracebackCrash != 0 41 all = gp.m.throwing >= throwTypeUser || t&tracebackAll != 0 42 if gp.m.traceback != 0 { 43 level = int32(gp.m.traceback) 44 } else if gp.m.throwing >= throwTypeRuntime { 45 // Always include runtime frames in runtime throws unless 46 // otherwise overridden by m.traceback. 47 level = 2 48 } else { 49 level = int32(t >> tracebackShift) 50 } 51 return 52 } 53 54 var ( 55 argc int32 56 argv **byte 57 ) 58 59 // nosplit for use in linux startup sysargs. 60 // 61 //go:nosplit 62 func argv_index(argv **byte, i int32) *byte { 63 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize)) 64 } 65 66 func args(c int32, v **byte) { 67 argc = c 68 argv = v 69 sysargs(c, v) 70 } 71 72 func goargs() { 73 if GOOS == "windows" { 74 return 75 } 76 argslice = make([]string, argc) 77 for i := int32(0); i < argc; i++ { 78 argslice[i] = gostringnocopy(argv_index(argv, i)) 79 } 80 } 81 82 func goenvs_unix() { 83 // TODO(austin): ppc64 in dynamic linking mode doesn't 84 // guarantee env[] will immediately follow argv. Might cause 85 // problems. 86 n := int32(0) 87 for argv_index(argv, argc+1+n) != nil { 88 n++ 89 } 90 91 envs = make([]string, n) 92 for i := int32(0); i < n; i++ { 93 envs[i] = gostring(argv_index(argv, argc+1+i)) 94 } 95 } 96 97 func environ() []string { 98 return envs 99 } 100 101 // TODO: These should be locals in testAtomic64, but we don't 8-byte 102 // align stack variables on 386. 103 var test_z64, test_x64 uint64 104 105 func testAtomic64() { 106 test_z64 = 42 107 test_x64 = 0 108 if atomic.Cas64(&test_z64, test_x64, 1) { 109 throw("cas64 failed") 110 } 111 if test_x64 != 0 { 112 throw("cas64 failed") 113 } 114 test_x64 = 42 115 if !atomic.Cas64(&test_z64, test_x64, 1) { 116 throw("cas64 failed") 117 } 118 if test_x64 != 42 || test_z64 != 1 { 119 throw("cas64 failed") 120 } 121 if atomic.Load64(&test_z64) != 1 { 122 throw("load64 failed") 123 } 124 atomic.Store64(&test_z64, (1<<40)+1) 125 if atomic.Load64(&test_z64) != (1<<40)+1 { 126 throw("store64 failed") 127 } 128 if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 { 129 throw("xadd64 failed") 130 } 131 if atomic.Load64(&test_z64) != (2<<40)+2 { 132 throw("xadd64 failed") 133 } 134 if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 { 135 throw("xchg64 failed") 136 } 137 if atomic.Load64(&test_z64) != (3<<40)+3 { 138 throw("xchg64 failed") 139 } 140 } 141 142 func check() { 143 var ( 144 a int8 145 b uint8 146 c int16 147 d uint16 148 e int32 149 f uint32 150 g int64 151 h uint64 152 i, i1 float32 153 j, j1 float64 154 k unsafe.Pointer 155 l *uint16 156 m [4]byte 157 ) 158 type x1t struct { 159 x uint8 160 } 161 type y1t struct { 162 x1 x1t 163 y uint8 164 } 165 var x1 x1t 166 var y1 y1t 167 168 if unsafe.Sizeof(a) != 1 { 169 throw("bad a") 170 } 171 if unsafe.Sizeof(b) != 1 { 172 throw("bad b") 173 } 174 if unsafe.Sizeof(c) != 2 { 175 throw("bad c") 176 } 177 if unsafe.Sizeof(d) != 2 { 178 throw("bad d") 179 } 180 if unsafe.Sizeof(e) != 4 { 181 throw("bad e") 182 } 183 if unsafe.Sizeof(f) != 4 { 184 throw("bad f") 185 } 186 if unsafe.Sizeof(g) != 8 { 187 throw("bad g") 188 } 189 if unsafe.Sizeof(h) != 8 { 190 throw("bad h") 191 } 192 if unsafe.Sizeof(i) != 4 { 193 throw("bad i") 194 } 195 if unsafe.Sizeof(j) != 8 { 196 throw("bad j") 197 } 198 if unsafe.Sizeof(k) != goarch.PtrSize { 199 throw("bad k") 200 } 201 if unsafe.Sizeof(l) != goarch.PtrSize { 202 throw("bad l") 203 } 204 if unsafe.Sizeof(x1) != 1 { 205 throw("bad unsafe.Sizeof x1") 206 } 207 if unsafe.Offsetof(y1.y) != 1 { 208 throw("bad offsetof y1.y") 209 } 210 if unsafe.Sizeof(y1) != 2 { 211 throw("bad unsafe.Sizeof y1") 212 } 213 214 if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 { 215 throw("bad timediv") 216 } 217 218 var z uint32 219 z = 1 220 if !atomic.Cas(&z, 1, 2) { 221 throw("cas1") 222 } 223 if z != 2 { 224 throw("cas2") 225 } 226 227 z = 4 228 if atomic.Cas(&z, 5, 6) { 229 throw("cas3") 230 } 231 if z != 4 { 232 throw("cas4") 233 } 234 235 z = 0xffffffff 236 if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) { 237 throw("cas5") 238 } 239 if z != 0xfffffffe { 240 throw("cas6") 241 } 242 243 m = [4]byte{1, 1, 1, 1} 244 atomic.Or8(&m[1], 0xf0) 245 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 { 246 throw("atomicor8") 247 } 248 249 m = [4]byte{0xff, 0xff, 0xff, 0xff} 250 atomic.And8(&m[1], 0x1) 251 if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff { 252 throw("atomicand8") 253 } 254 255 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0) 256 if j == j { 257 throw("float64nan") 258 } 259 if !(j != j) { 260 throw("float64nan1") 261 } 262 263 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1) 264 if j == j1 { 265 throw("float64nan2") 266 } 267 if !(j != j1) { 268 throw("float64nan3") 269 } 270 271 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0) 272 if i == i { 273 throw("float32nan") 274 } 275 if i == i { 276 throw("float32nan1") 277 } 278 279 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1) 280 if i == i1 { 281 throw("float32nan2") 282 } 283 if i == i1 { 284 throw("float32nan3") 285 } 286 287 testAtomic64() 288 289 if fixedStack != round2(fixedStack) { 290 throw("FixedStack is not power-of-2") 291 } 292 293 if !checkASM() { 294 throw("assembly checks failed") 295 } 296 } 297 298 type dbgVar struct { 299 name string 300 value *int32 // for variables that can only be set at startup 301 atomic *atomic.Int32 // for variables that can be changed during execution 302 def int32 // default value (ideally zero) 303 } 304 305 // Holds variables parsed from GODEBUG env var, 306 // except for "memprofilerate" since there is an 307 // existing int var for that value, which may 308 // already have an initial value. 309 var debug struct { 310 cgocheck int32 311 clobberfree int32 312 dontfreezetheworld int32 313 efence int32 314 gccheckmark int32 315 gcpacertrace int32 316 gcshrinkstackoff int32 317 gcstoptheworld int32 318 gctrace int32 319 invalidptr int32 320 madvdontneed int32 // for Linux; issue 28466 321 scavtrace int32 322 scheddetail int32 323 schedtrace int32 324 tracebackancestors int32 325 asyncpreemptoff int32 326 harddecommit int32 327 adaptivestackstart int32 328 tracefpunwindoff int32 329 330 // debug.malloc is used as a combined debug check 331 // in the malloc function and should be set 332 // if any of the below debug options is != 0. 333 malloc bool 334 allocfreetrace int32 335 inittrace int32 336 sbrk int32 337 338 panicnil atomic.Int32 339 } 340 341 var dbgvars = []*dbgVar{ 342 {name: "allocfreetrace", value: &debug.allocfreetrace}, 343 {name: "clobberfree", value: &debug.clobberfree}, 344 {name: "cgocheck", value: &debug.cgocheck}, 345 {name: "dontfreezetheworld", value: &debug.dontfreezetheworld}, 346 {name: "efence", value: &debug.efence}, 347 {name: "gccheckmark", value: &debug.gccheckmark}, 348 {name: "gcpacertrace", value: &debug.gcpacertrace}, 349 {name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff}, 350 {name: "gcstoptheworld", value: &debug.gcstoptheworld}, 351 {name: "gctrace", value: &debug.gctrace}, 352 {name: "invalidptr", value: &debug.invalidptr}, 353 {name: "madvdontneed", value: &debug.madvdontneed}, 354 {name: "sbrk", value: &debug.sbrk}, 355 {name: "scavtrace", value: &debug.scavtrace}, 356 {name: "scheddetail", value: &debug.scheddetail}, 357 {name: "schedtrace", value: &debug.schedtrace}, 358 {name: "tracebackancestors", value: &debug.tracebackancestors}, 359 {name: "asyncpreemptoff", value: &debug.asyncpreemptoff}, 360 {name: "inittrace", value: &debug.inittrace}, 361 {name: "harddecommit", value: &debug.harddecommit}, 362 {name: "adaptivestackstart", value: &debug.adaptivestackstart}, 363 {name: "tracefpunwindoff", value: &debug.tracefpunwindoff}, 364 {name: "panicnil", atomic: &debug.panicnil}, 365 } 366 367 func parsedebugvars() { 368 // defaults 369 debug.cgocheck = 1 370 debug.invalidptr = 1 371 debug.adaptivestackstart = 1 // set this to 0 to turn larger initial goroutine stacks off 372 if GOOS == "linux" { 373 // On Linux, MADV_FREE is faster than MADV_DONTNEED, 374 // but doesn't affect many of the statistics that 375 // MADV_DONTNEED does until the memory is actually 376 // reclaimed. This generally leads to poor user 377 // experience, like confusing stats in top and other 378 // monitoring tools; and bad integration with 379 // management systems that respond to memory usage. 380 // Hence, default to MADV_DONTNEED. 381 debug.madvdontneed = 1 382 } 383 384 godebug := gogetenv("GODEBUG") 385 386 p := new(string) 387 *p = godebug 388 godebugEnv.Store(p) 389 390 // apply runtime defaults, if any 391 for _, v := range dbgvars { 392 if v.def != 0 { 393 // Every var should have either v.value or v.atomic set. 394 if v.value != nil { 395 *v.value = v.def 396 } else if v.atomic != nil { 397 v.atomic.Store(v.def) 398 } 399 } 400 } 401 402 // apply compile-time GODEBUG settings 403 parsegodebug(godebugDefault, nil) 404 405 // apply environment settings 406 parsegodebug(godebug, nil) 407 408 debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0 409 410 setTraceback(gogetenv("GOTRACEBACK")) 411 traceback_env = traceback_cache 412 } 413 414 // reparsedebugvars reparses the runtime's debug variables 415 // because the environment variable has been changed to env. 416 func reparsedebugvars(env string) { 417 seen := make(map[string]bool) 418 // apply environment settings 419 parsegodebug(env, seen) 420 // apply compile-time GODEBUG settings for as-yet-unseen variables 421 parsegodebug(godebugDefault, seen) 422 // apply defaults for as-yet-unseen variables 423 for _, v := range dbgvars { 424 if v.atomic != nil && !seen[v.name] { 425 v.atomic.Store(0) 426 } 427 } 428 } 429 430 // parsegodebug parses the godebug string, updating variables listed in dbgvars. 431 // If seen == nil, this is startup time and we process the string left to right 432 // overwriting older settings with newer ones. 433 // If seen != nil, $GODEBUG has changed and we are doing an 434 // incremental update. To avoid flapping in the case where a value is 435 // set multiple times (perhaps in the default and the environment, 436 // or perhaps twice in the environment), we process the string right-to-left 437 // and only change values not already seen. After doing this for both 438 // the environment and the default settings, the caller must also call 439 // cleargodebug(seen) to reset any now-unset values back to their defaults. 440 func parsegodebug(godebug string, seen map[string]bool) { 441 for p := godebug; p != ""; { 442 var field string 443 if seen == nil { 444 // startup: process left to right, overwriting older settings with newer 445 i := bytealg.IndexByteString(p, ',') 446 if i < 0 { 447 field, p = p, "" 448 } else { 449 field, p = p[:i], p[i+1:] 450 } 451 } else { 452 // incremental update: process right to left, updating and skipping seen 453 i := len(p) - 1 454 for i >= 0 && p[i] != ',' { 455 i-- 456 } 457 if i < 0 { 458 p, field = "", p 459 } else { 460 p, field = p[:i], p[i+1:] 461 } 462 } 463 i := bytealg.IndexByteString(field, '=') 464 if i < 0 { 465 continue 466 } 467 key, value := field[:i], field[i+1:] 468 if seen[key] { 469 continue 470 } 471 if seen != nil { 472 seen[key] = true 473 } 474 475 // Update MemProfileRate directly here since it 476 // is int, not int32, and should only be updated 477 // if specified in GODEBUG. 478 if seen == nil && key == "memprofilerate" { 479 if n, ok := atoi(value); ok { 480 MemProfileRate = n 481 } 482 } else { 483 for _, v := range dbgvars { 484 if v.name == key { 485 if n, ok := atoi32(value); ok { 486 if seen == nil && v.value != nil { 487 *v.value = n 488 } else if v.atomic != nil { 489 v.atomic.Store(n) 490 } 491 } 492 } 493 } 494 } 495 } 496 497 if debug.cgocheck > 1 { 498 throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.") 499 } 500 } 501 502 //go:linkname setTraceback runtime/debug.SetTraceback 503 func setTraceback(level string) { 504 var t uint32 505 switch level { 506 case "none": 507 t = 0 508 case "single", "": 509 t = 1 << tracebackShift 510 case "all": 511 t = 1<<tracebackShift | tracebackAll 512 case "system": 513 t = 2<<tracebackShift | tracebackAll 514 case "crash": 515 t = 2<<tracebackShift | tracebackAll | tracebackCrash 516 case "wer": 517 if GOOS == "windows" { 518 t = 2<<tracebackShift | tracebackAll | tracebackCrash 519 enableWER() 520 break 521 } 522 fallthrough 523 default: 524 t = tracebackAll 525 if n, ok := atoi(level); ok && n == int(uint32(n)) { 526 t |= uint32(n) << tracebackShift 527 } 528 } 529 // when C owns the process, simply exit'ing the process on fatal errors 530 // and panics is surprising. Be louder and abort instead. 531 if islibrary || isarchive { 532 t |= tracebackCrash 533 } 534 535 t |= traceback_env 536 537 atomic.Store(&traceback_cache, t) 538 } 539 540 // Poor mans 64-bit division. 541 // This is a very special function, do not use it if you are not sure what you are doing. 542 // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions. 543 // Handles overflow in a time-specific manner. 544 // This keeps us within no-split stack limits on 32-bit processors. 545 // 546 //go:nosplit 547 func timediv(v int64, div int32, rem *int32) int32 { 548 res := int32(0) 549 for bit := 30; bit >= 0; bit-- { 550 if v >= int64(div)<<uint(bit) { 551 v = v - (int64(div) << uint(bit)) 552 // Before this for loop, res was 0, thus all these 553 // power of 2 increments are now just bitsets. 554 res |= 1 << uint(bit) 555 } 556 } 557 if v >= int64(div) { 558 if rem != nil { 559 *rem = 0 560 } 561 return 0x7fffffff 562 } 563 if rem != nil { 564 *rem = int32(v) 565 } 566 return res 567 } 568 569 // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block. 570 571 //go:nosplit 572 func acquirem() *m { 573 gp := getg() 574 gp.m.locks++ 575 return gp.m 576 } 577 578 //go:nosplit 579 func releasem(mp *m) { 580 gp := getg() 581 mp.locks-- 582 if mp.locks == 0 && gp.preempt { 583 // restore the preemption request in case we've cleared it in newstack 584 gp.stackguard0 = stackPreempt 585 } 586 } 587 588 //go:linkname reflect_typelinks reflect.typelinks 589 func reflect_typelinks() ([]unsafe.Pointer, [][]int32) { 590 modules := activeModules() 591 sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)} 592 ret := [][]int32{modules[0].typelinks} 593 for _, md := range modules[1:] { 594 sections = append(sections, unsafe.Pointer(md.types)) 595 ret = append(ret, md.typelinks) 596 } 597 return sections, ret 598 } 599 600 // reflect_resolveNameOff resolves a name offset from a base pointer. 601 // 602 //go:linkname reflect_resolveNameOff reflect.resolveNameOff 603 func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer { 604 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes) 605 } 606 607 // reflect_resolveTypeOff resolves an *rtype offset from a base type. 608 // 609 //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff 610 func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { 611 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off))) 612 } 613 614 // reflect_resolveTextOff resolves a function pointer offset from a base type. 615 // 616 //go:linkname reflect_resolveTextOff reflect.resolveTextOff 617 func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { 618 return toRType((*_type)(rtype)).textOff(textOff(off)) 619 620 } 621 622 // reflectlite_resolveNameOff resolves a name offset from a base pointer. 623 // 624 //go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff 625 func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer { 626 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes) 627 } 628 629 // reflectlite_resolveTypeOff resolves an *rtype offset from a base type. 630 // 631 //go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff 632 func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { 633 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off))) 634 } 635 636 // reflect_addReflectOff adds a pointer to the reflection offset lookup map. 637 // 638 //go:linkname reflect_addReflectOff reflect.addReflectOff 639 func reflect_addReflectOff(ptr unsafe.Pointer) int32 { 640 reflectOffsLock() 641 if reflectOffs.m == nil { 642 reflectOffs.m = make(map[int32]unsafe.Pointer) 643 reflectOffs.minv = make(map[unsafe.Pointer]int32) 644 reflectOffs.next = -1 645 } 646 id, found := reflectOffs.minv[ptr] 647 if !found { 648 id = reflectOffs.next 649 reflectOffs.next-- // use negative offsets as IDs to aid debugging 650 reflectOffs.m[id] = ptr 651 reflectOffs.minv[ptr] = id 652 } 653 reflectOffsUnlock() 654 return id 655 }