github.com/ice-blockchain/go/src@v0.0.0-20240403114104-1564d284e521/runtime/runtime1.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/bytealg" 9 "internal/goarch" 10 "runtime/internal/atomic" 11 "unsafe" 12 ) 13 14 // Keep a cached value to make gotraceback fast, 15 // since we call it on every call to gentraceback. 16 // The cached value is a uint32 in which the low bits 17 // are the "crash" and "all" settings and the remaining 18 // bits are the traceback value (0 off, 1 on, 2 include system). 19 const ( 20 tracebackCrash = 1 << iota 21 tracebackAll 22 tracebackShift = iota 23 ) 24 25 var traceback_cache uint32 = 2 << tracebackShift 26 var traceback_env uint32 27 28 // gotraceback returns the current traceback settings. 29 // 30 // If level is 0, suppress all tracebacks. 31 // If level is 1, show tracebacks, but exclude runtime frames. 32 // If level is 2, show tracebacks including runtime frames. 33 // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine. 34 // If crash is set, crash (core dump, etc) after tracebacking. 35 // 36 //go:nosplit 37 func gotraceback() (level int32, all, crash bool) { 38 gp := getg() 39 t := atomic.Load(&traceback_cache) 40 crash = t&tracebackCrash != 0 41 all = gp.m.throwing >= throwTypeUser || t&tracebackAll != 0 42 if gp.m.traceback != 0 { 43 level = int32(gp.m.traceback) 44 } else if gp.m.throwing >= throwTypeRuntime { 45 // Always include runtime frames in runtime throws unless 46 // otherwise overridden by m.traceback. 47 level = 2 48 } else { 49 level = int32(t >> tracebackShift) 50 } 51 return 52 } 53 54 var ( 55 argc int32 56 argv **byte 57 ) 58 59 // nosplit for use in linux startup sysargs. 60 // 61 //go:nosplit 62 func argv_index(argv **byte, i int32) *byte { 63 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize)) 64 } 65 66 func args(c int32, v **byte) { 67 argc = c 68 argv = v 69 sysargs(c, v) 70 } 71 72 func goargs() { 73 if GOOS == "windows" { 74 return 75 } 76 argslice = make([]string, argc) 77 for i := int32(0); i < argc; i++ { 78 argslice[i] = gostringnocopy(argv_index(argv, i)) 79 } 80 } 81 82 func goenvs_unix() { 83 // TODO(austin): ppc64 in dynamic linking mode doesn't 84 // guarantee env[] will immediately follow argv. Might cause 85 // problems. 86 n := int32(0) 87 for argv_index(argv, argc+1+n) != nil { 88 n++ 89 } 90 91 envs = make([]string, n) 92 for i := int32(0); i < n; i++ { 93 envs[i] = gostring(argv_index(argv, argc+1+i)) 94 } 95 } 96 97 func environ() []string { 98 return envs 99 } 100 101 // TODO: These should be locals in testAtomic64, but we don't 8-byte 102 // align stack variables on 386. 103 var test_z64, test_x64 uint64 104 105 func testAtomic64() { 106 test_z64 = 42 107 test_x64 = 0 108 if atomic.Cas64(&test_z64, test_x64, 1) { 109 throw("cas64 failed") 110 } 111 if test_x64 != 0 { 112 throw("cas64 failed") 113 } 114 test_x64 = 42 115 if !atomic.Cas64(&test_z64, test_x64, 1) { 116 throw("cas64 failed") 117 } 118 if test_x64 != 42 || test_z64 != 1 { 119 throw("cas64 failed") 120 } 121 if atomic.Load64(&test_z64) != 1 { 122 throw("load64 failed") 123 } 124 atomic.Store64(&test_z64, (1<<40)+1) 125 if atomic.Load64(&test_z64) != (1<<40)+1 { 126 throw("store64 failed") 127 } 128 if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 { 129 throw("xadd64 failed") 130 } 131 if atomic.Load64(&test_z64) != (2<<40)+2 { 132 throw("xadd64 failed") 133 } 134 if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 { 135 throw("xchg64 failed") 136 } 137 if atomic.Load64(&test_z64) != (3<<40)+3 { 138 throw("xchg64 failed") 139 } 140 } 141 142 func check() { 143 var ( 144 a int8 145 b uint8 146 c int16 147 d uint16 148 e int32 149 f uint32 150 g int64 151 h uint64 152 i, i1 float32 153 j, j1 float64 154 k unsafe.Pointer 155 l *uint16 156 m [4]byte 157 ) 158 type x1t struct { 159 x uint8 160 } 161 type y1t struct { 162 x1 x1t 163 y uint8 164 } 165 var x1 x1t 166 var y1 y1t 167 168 if unsafe.Sizeof(a) != 1 { 169 throw("bad a") 170 } 171 if unsafe.Sizeof(b) != 1 { 172 throw("bad b") 173 } 174 if unsafe.Sizeof(c) != 2 { 175 throw("bad c") 176 } 177 if unsafe.Sizeof(d) != 2 { 178 throw("bad d") 179 } 180 if unsafe.Sizeof(e) != 4 { 181 throw("bad e") 182 } 183 if unsafe.Sizeof(f) != 4 { 184 throw("bad f") 185 } 186 if unsafe.Sizeof(g) != 8 { 187 throw("bad g") 188 } 189 if unsafe.Sizeof(h) != 8 { 190 throw("bad h") 191 } 192 if unsafe.Sizeof(i) != 4 { 193 throw("bad i") 194 } 195 if unsafe.Sizeof(j) != 8 { 196 throw("bad j") 197 } 198 if unsafe.Sizeof(k) != goarch.PtrSize { 199 throw("bad k") 200 } 201 if unsafe.Sizeof(l) != goarch.PtrSize { 202 throw("bad l") 203 } 204 if unsafe.Sizeof(x1) != 1 { 205 throw("bad unsafe.Sizeof x1") 206 } 207 if unsafe.Offsetof(y1.y) != 1 { 208 throw("bad offsetof y1.y") 209 } 210 if unsafe.Sizeof(y1) != 2 { 211 throw("bad unsafe.Sizeof y1") 212 } 213 214 if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 { 215 throw("bad timediv") 216 } 217 218 var z uint32 219 z = 1 220 if !atomic.Cas(&z, 1, 2) { 221 throw("cas1") 222 } 223 if z != 2 { 224 throw("cas2") 225 } 226 227 z = 4 228 if atomic.Cas(&z, 5, 6) { 229 throw("cas3") 230 } 231 if z != 4 { 232 throw("cas4") 233 } 234 235 z = 0xffffffff 236 if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) { 237 throw("cas5") 238 } 239 if z != 0xfffffffe { 240 throw("cas6") 241 } 242 243 m = [4]byte{1, 1, 1, 1} 244 atomic.Or8(&m[1], 0xf0) 245 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 { 246 throw("atomicor8") 247 } 248 249 m = [4]byte{0xff, 0xff, 0xff, 0xff} 250 atomic.And8(&m[1], 0x1) 251 if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff { 252 throw("atomicand8") 253 } 254 255 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0) 256 if j == j { 257 throw("float64nan") 258 } 259 if !(j != j) { 260 throw("float64nan1") 261 } 262 263 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1) 264 if j == j1 { 265 throw("float64nan2") 266 } 267 if !(j != j1) { 268 throw("float64nan3") 269 } 270 271 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0) 272 if i == i { 273 throw("float32nan") 274 } 275 if i == i { 276 throw("float32nan1") 277 } 278 279 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1) 280 if i == i1 { 281 throw("float32nan2") 282 } 283 if i == i1 { 284 throw("float32nan3") 285 } 286 287 testAtomic64() 288 289 if fixedStack != round2(fixedStack) { 290 throw("FixedStack is not power-of-2") 291 } 292 293 if !checkASM() { 294 throw("assembly checks failed") 295 } 296 } 297 298 type dbgVar struct { 299 name string 300 value *int32 // for variables that can only be set at startup 301 atomic *atomic.Int32 // for variables that can be changed during execution 302 def int32 // default value (ideally zero) 303 } 304 305 // Holds variables parsed from GODEBUG env var, 306 // except for "memprofilerate" since there is an 307 // existing int var for that value, which may 308 // already have an initial value. 309 var debug struct { 310 cgocheck int32 311 clobberfree int32 312 disablethp int32 313 dontfreezetheworld int32 314 efence int32 315 gccheckmark int32 316 gcpacertrace int32 317 gcshrinkstackoff int32 318 gcstoptheworld int32 319 gctrace int32 320 invalidptr int32 321 madvdontneed int32 // for Linux; issue 28466 322 runtimeContentionStacks atomic.Int32 323 scavtrace int32 324 scheddetail int32 325 schedtrace int32 326 tracebackancestors int32 327 asyncpreemptoff int32 328 harddecommit int32 329 adaptivestackstart int32 330 tracefpunwindoff int32 331 traceadvanceperiod int32 332 333 // debug.malloc is used as a combined debug check 334 // in the malloc function and should be set 335 // if any of the below debug options is != 0. 336 malloc bool 337 allocfreetrace int32 338 inittrace int32 339 sbrk int32 340 341 panicnil atomic.Int32 342 343 // asynctimerchan controls whether timer channels 344 // behave asynchronously (as in Go 1.22 and earlier) 345 // instead of their Go 1.23+ synchronous behavior. 346 // The value can change at any time (in response to os.Setenv("GODEBUG")) 347 // and affects all extant timer channels immediately. 348 // Programs wouldn't normally change over an execution, 349 // but allowing it is convenient for testing and for programs 350 // that do an os.Setenv in main.init or main.main. 351 asynctimerchan atomic.Int32 352 } 353 354 var dbgvars = []*dbgVar{ 355 {name: "adaptivestackstart", value: &debug.adaptivestackstart}, 356 {name: "allocfreetrace", value: &debug.allocfreetrace}, 357 {name: "asyncpreemptoff", value: &debug.asyncpreemptoff}, 358 {name: "asynctimerchan", atomic: &debug.asynctimerchan}, 359 {name: "cgocheck", value: &debug.cgocheck}, 360 {name: "clobberfree", value: &debug.clobberfree}, 361 {name: "disablethp", value: &debug.disablethp}, 362 {name: "dontfreezetheworld", value: &debug.dontfreezetheworld}, 363 {name: "efence", value: &debug.efence}, 364 {name: "gccheckmark", value: &debug.gccheckmark}, 365 {name: "gcpacertrace", value: &debug.gcpacertrace}, 366 {name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff}, 367 {name: "gcstoptheworld", value: &debug.gcstoptheworld}, 368 {name: "gctrace", value: &debug.gctrace}, 369 {name: "harddecommit", value: &debug.harddecommit}, 370 {name: "inittrace", value: &debug.inittrace}, 371 {name: "invalidptr", value: &debug.invalidptr}, 372 {name: "madvdontneed", value: &debug.madvdontneed}, 373 {name: "panicnil", atomic: &debug.panicnil}, 374 {name: "runtimecontentionstacks", atomic: &debug.runtimeContentionStacks}, 375 {name: "sbrk", value: &debug.sbrk}, 376 {name: "scavtrace", value: &debug.scavtrace}, 377 {name: "scheddetail", value: &debug.scheddetail}, 378 {name: "schedtrace", value: &debug.schedtrace}, 379 {name: "traceadvanceperiod", value: &debug.traceadvanceperiod}, 380 {name: "tracebackancestors", value: &debug.tracebackancestors}, 381 {name: "tracefpunwindoff", value: &debug.tracefpunwindoff}, 382 } 383 384 func parsedebugvars() { 385 // defaults 386 debug.cgocheck = 1 387 debug.invalidptr = 1 388 debug.adaptivestackstart = 1 // set this to 0 to turn larger initial goroutine stacks off 389 if GOOS == "linux" { 390 // On Linux, MADV_FREE is faster than MADV_DONTNEED, 391 // but doesn't affect many of the statistics that 392 // MADV_DONTNEED does until the memory is actually 393 // reclaimed. This generally leads to poor user 394 // experience, like confusing stats in top and other 395 // monitoring tools; and bad integration with 396 // management systems that respond to memory usage. 397 // Hence, default to MADV_DONTNEED. 398 debug.madvdontneed = 1 399 } 400 debug.traceadvanceperiod = defaultTraceAdvancePeriod 401 402 godebug := gogetenv("GODEBUG") 403 404 p := new(string) 405 *p = godebug 406 godebugEnv.Store(p) 407 408 // apply runtime defaults, if any 409 for _, v := range dbgvars { 410 if v.def != 0 { 411 // Every var should have either v.value or v.atomic set. 412 if v.value != nil { 413 *v.value = v.def 414 } else if v.atomic != nil { 415 v.atomic.Store(v.def) 416 } 417 } 418 } 419 420 // apply compile-time GODEBUG settings 421 parsegodebug(godebugDefault, nil) 422 423 // apply environment settings 424 parsegodebug(godebug, nil) 425 426 debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0 427 428 setTraceback(gogetenv("GOTRACEBACK")) 429 traceback_env = traceback_cache 430 } 431 432 // reparsedebugvars reparses the runtime's debug variables 433 // because the environment variable has been changed to env. 434 func reparsedebugvars(env string) { 435 seen := make(map[string]bool) 436 // apply environment settings 437 parsegodebug(env, seen) 438 // apply compile-time GODEBUG settings for as-yet-unseen variables 439 parsegodebug(godebugDefault, seen) 440 // apply defaults for as-yet-unseen variables 441 for _, v := range dbgvars { 442 if v.atomic != nil && !seen[v.name] { 443 v.atomic.Store(0) 444 } 445 } 446 } 447 448 // parsegodebug parses the godebug string, updating variables listed in dbgvars. 449 // If seen == nil, this is startup time and we process the string left to right 450 // overwriting older settings with newer ones. 451 // If seen != nil, $GODEBUG has changed and we are doing an 452 // incremental update. To avoid flapping in the case where a value is 453 // set multiple times (perhaps in the default and the environment, 454 // or perhaps twice in the environment), we process the string right-to-left 455 // and only change values not already seen. After doing this for both 456 // the environment and the default settings, the caller must also call 457 // cleargodebug(seen) to reset any now-unset values back to their defaults. 458 func parsegodebug(godebug string, seen map[string]bool) { 459 for p := godebug; p != ""; { 460 var field string 461 if seen == nil { 462 // startup: process left to right, overwriting older settings with newer 463 i := bytealg.IndexByteString(p, ',') 464 if i < 0 { 465 field, p = p, "" 466 } else { 467 field, p = p[:i], p[i+1:] 468 } 469 } else { 470 // incremental update: process right to left, updating and skipping seen 471 i := len(p) - 1 472 for i >= 0 && p[i] != ',' { 473 i-- 474 } 475 if i < 0 { 476 p, field = "", p 477 } else { 478 p, field = p[:i], p[i+1:] 479 } 480 } 481 i := bytealg.IndexByteString(field, '=') 482 if i < 0 { 483 continue 484 } 485 key, value := field[:i], field[i+1:] 486 if seen[key] { 487 continue 488 } 489 if seen != nil { 490 seen[key] = true 491 } 492 493 // Update MemProfileRate directly here since it 494 // is int, not int32, and should only be updated 495 // if specified in GODEBUG. 496 if seen == nil && key == "memprofilerate" { 497 if n, ok := atoi(value); ok { 498 MemProfileRate = n 499 } 500 } else { 501 for _, v := range dbgvars { 502 if v.name == key { 503 if n, ok := atoi32(value); ok { 504 if seen == nil && v.value != nil { 505 *v.value = n 506 } else if v.atomic != nil { 507 v.atomic.Store(n) 508 } 509 } 510 } 511 } 512 } 513 } 514 515 if debug.cgocheck > 1 { 516 throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.") 517 } 518 } 519 520 //go:linkname setTraceback runtime/debug.SetTraceback 521 func setTraceback(level string) { 522 var t uint32 523 switch level { 524 case "none": 525 t = 0 526 case "single", "": 527 t = 1 << tracebackShift 528 case "all": 529 t = 1<<tracebackShift | tracebackAll 530 case "system": 531 t = 2<<tracebackShift | tracebackAll 532 case "crash": 533 t = 2<<tracebackShift | tracebackAll | tracebackCrash 534 case "wer": 535 if GOOS == "windows" { 536 t = 2<<tracebackShift | tracebackAll | tracebackCrash 537 enableWER() 538 break 539 } 540 fallthrough 541 default: 542 t = tracebackAll 543 if n, ok := atoi(level); ok && n == int(uint32(n)) { 544 t |= uint32(n) << tracebackShift 545 } 546 } 547 // when C owns the process, simply exit'ing the process on fatal errors 548 // and panics is surprising. Be louder and abort instead. 549 if islibrary || isarchive { 550 t |= tracebackCrash 551 } 552 553 t |= traceback_env 554 555 atomic.Store(&traceback_cache, t) 556 } 557 558 // Poor mans 64-bit division. 559 // This is a very special function, do not use it if you are not sure what you are doing. 560 // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions. 561 // Handles overflow in a time-specific manner. 562 // This keeps us within no-split stack limits on 32-bit processors. 563 // 564 //go:nosplit 565 func timediv(v int64, div int32, rem *int32) int32 { 566 res := int32(0) 567 for bit := 30; bit >= 0; bit-- { 568 if v >= int64(div)<<uint(bit) { 569 v = v - (int64(div) << uint(bit)) 570 // Before this for loop, res was 0, thus all these 571 // power of 2 increments are now just bitsets. 572 res |= 1 << uint(bit) 573 } 574 } 575 if v >= int64(div) { 576 if rem != nil { 577 *rem = 0 578 } 579 return 0x7fffffff 580 } 581 if rem != nil { 582 *rem = int32(v) 583 } 584 return res 585 } 586 587 // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block. 588 589 //go:nosplit 590 func acquirem() *m { 591 gp := getg() 592 gp.m.locks++ 593 return gp.m 594 } 595 596 //go:nosplit 597 func releasem(mp *m) { 598 gp := getg() 599 mp.locks-- 600 if mp.locks == 0 && gp.preempt { 601 // restore the preemption request in case we've cleared it in newstack 602 gp.stackguard0 = stackPreempt 603 } 604 } 605 606 //go:linkname reflect_typelinks reflect.typelinks 607 func reflect_typelinks() ([]unsafe.Pointer, [][]int32) { 608 modules := activeModules() 609 sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)} 610 ret := [][]int32{modules[0].typelinks} 611 for _, md := range modules[1:] { 612 sections = append(sections, unsafe.Pointer(md.types)) 613 ret = append(ret, md.typelinks) 614 } 615 return sections, ret 616 } 617 618 // reflect_resolveNameOff resolves a name offset from a base pointer. 619 // 620 //go:linkname reflect_resolveNameOff reflect.resolveNameOff 621 func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer { 622 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes) 623 } 624 625 // reflect_resolveTypeOff resolves an *rtype offset from a base type. 626 // 627 //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff 628 func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { 629 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off))) 630 } 631 632 // reflect_resolveTextOff resolves a function pointer offset from a base type. 633 // 634 //go:linkname reflect_resolveTextOff reflect.resolveTextOff 635 func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { 636 return toRType((*_type)(rtype)).textOff(textOff(off)) 637 } 638 639 // reflectlite_resolveNameOff resolves a name offset from a base pointer. 640 // 641 //go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff 642 func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer { 643 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes) 644 } 645 646 // reflectlite_resolveTypeOff resolves an *rtype offset from a base type. 647 // 648 //go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff 649 func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { 650 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off))) 651 } 652 653 // reflect_addReflectOff adds a pointer to the reflection offset lookup map. 654 // 655 //go:linkname reflect_addReflectOff reflect.addReflectOff 656 func reflect_addReflectOff(ptr unsafe.Pointer) int32 { 657 reflectOffsLock() 658 if reflectOffs.m == nil { 659 reflectOffs.m = make(map[int32]unsafe.Pointer) 660 reflectOffs.minv = make(map[unsafe.Pointer]int32) 661 reflectOffs.next = -1 662 } 663 id, found := reflectOffs.minv[ptr] 664 if !found { 665 id = reflectOffs.next 666 reflectOffs.next-- // use negative offsets as IDs to aid debugging 667 reflectOffs.m[id] = ptr 668 reflectOffs.minv[ptr] = id 669 } 670 reflectOffsUnlock() 671 return id 672 }