github.com/goplus/igop@v0.25.0/interp.go (about) 1 // Copyright 2013 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Package igop defines an interpreter for the SSA 6 // representation of Go programs. 7 // 8 // This interpreter is provided as an adjunct for testing the SSA 9 // construction algorithm. Its purpose is to provide a minimal 10 // metacircular implementation of the dynamic semantics of each SSA 11 // instruction. It is not, and will never be, a production-quality Go 12 // interpreter. 13 // 14 // The following is a partial list of Go features that are currently 15 // unsupported or incomplete in the interpreter. 16 // 17 // * Unsafe operations, including all uses of unsafe.Pointer, are 18 // impossible to support given the "boxed" value representation we 19 // have chosen. 20 // 21 // * The reflect package is only partially implemented. 22 // 23 // * The "testing" package is no longer supported because it 24 // depends on low-level details that change too often. 25 // 26 // * "sync/atomic" operations are not atomic due to the "boxed" value 27 // representation: it is not possible to read, modify and write an 28 // interface value atomically. As a consequence, Mutexes are currently 29 // broken. 30 // 31 // * recover is only partially implemented. Also, the interpreter 32 // makes no attempt to distinguish target panics from interpreter 33 // crashes. 34 // 35 // * the sizes of the int, uint and uintptr types in the target 36 // program are assumed to be the same as those of the interpreter 37 // itself. 38 // 39 // * all values occupy space, even those of types defined by the spec 40 // to have zero size, e.g. struct{}. This can cause asymptotic 41 // performance degradation. 42 // 43 // * os.Exit is implemented using panic, causing deferred functions to 44 // run. 45 46 package igop 47 48 import ( 49 "fmt" 50 "go/ast" 51 "go/constant" 52 "go/token" 53 "go/types" 54 "reflect" 55 "runtime" 56 "strings" 57 "sync" 58 "sync/atomic" 59 "unsafe" 60 61 "github.com/goplus/igop/load" 62 "github.com/goplus/reflectx" 63 "github.com/visualfc/gid" 64 "github.com/visualfc/xtype" 65 "golang.org/x/tools/go/ssa" 66 ) 67 68 var ( 69 maxMemLen int 70 ) 71 72 const intSize = 32 << (^uint(0) >> 63) 73 74 func init() { 75 if intSize == 32 { 76 maxMemLen = 1<<31 - 1 77 } else { 78 v := int64(1) << 59 79 maxMemLen = int(v) 80 } 81 } 82 83 type Interp struct { 84 ctx *Context 85 mainpkg *ssa.Package // the SSA main package 86 record *TypesRecord // lookup type and ToType 87 globals map[string]value // addresses of global variables (immutable) 88 chkinit map[string]bool // init vars 89 preloadTypes map[types.Type]reflect.Type // preload types.Type -> reflect.Type 90 funcs map[*ssa.Function]*function // ssa.Function -> *function 91 msets map[reflect.Type](map[string]*ssa.Function) // user defined type method sets 92 chexit chan int // call os.Exit code by chan for runtime.Goexit 93 cherror chan PanicError // call by go func error for context 94 deferMap sync.Map // defer goroutine id -> call frame 95 rfuncMap sync.Map // reflect.Value(fn).Pointer -> *function 96 typesMutex sync.RWMutex // findType/toType mutex 97 mainid int64 // main goroutine id 98 exitCode int // call os.Exit code 99 goroutines int32 // atomically updated 100 deferCount int32 // fast has defer check 101 goexited int32 // is call runtime.Goexit 102 exited int32 // is call os.Exit 103 } 104 105 func (i *Interp) MainPkg() *ssa.Package { 106 return i.mainpkg 107 } 108 109 func (i *Interp) installed(path string) (pkg *Package, ok bool) { 110 pkg, ok = i.ctx.Loader.Installed(path) 111 return 112 } 113 114 func (i *Interp) loadFunction(fn *ssa.Function) *function { 115 if pfn, ok := i.funcs[fn]; ok { 116 return pfn 117 } 118 pfn := &function{ 119 Interp: i, 120 Fn: fn, 121 index: make(map[ssa.Value]uint32), 122 instrIndex: make(map[ssa.Instruction][]uint32), 123 narg: len(fn.Params), 124 nenv: len(fn.FreeVars), 125 } 126 if len(fn.Blocks) > 0 { 127 pfn.Main = fn.Blocks[0] 128 } 129 if res := fn.Signature.Results(); res != nil { 130 pfn.nres = res.Len() 131 pfn.stack = make([]value, pfn.nres) 132 } 133 i.funcs[fn] = pfn 134 return pfn 135 } 136 137 func (i *Interp) findType(rt reflect.Type, local bool) (types.Type, bool) { 138 i.typesMutex.Lock() 139 defer i.typesMutex.Unlock() 140 if local { 141 return i.record.LookupLocalTypes(rt) 142 } else { 143 return i.record.LookupTypes(rt) 144 } 145 } 146 147 func (i *Interp) tryDeferFrame() *frame { 148 if i != nil && atomic.LoadInt32(&i.deferCount) != 0 { 149 if f, ok := i.deferMap.Load(goroutineID()); ok { 150 return f.(*frame) 151 } 152 } 153 return &frame{} 154 } 155 156 func (pfn *function) callFunctionByReflect(mtyp reflect.Type, args []reflect.Value, env []interface{}) []reflect.Value { 157 return pfn.Interp.callFunctionByReflect(pfn.Interp.tryDeferFrame(), mtyp, pfn, args, env) 158 } 159 160 func (i *Interp) FindMethod(mtyp reflect.Type, fn *types.Func) func([]reflect.Value) []reflect.Value { 161 typ := fn.Type().(*types.Signature).Recv().Type() 162 if f := i.mainpkg.Prog.LookupMethod(typ, fn.Pkg(), fn.Name()); f != nil { 163 pfn := i.loadFunction(f) 164 return func(args []reflect.Value) []reflect.Value { 165 return pfn.callFunctionByReflect(mtyp, args, nil) 166 } 167 } 168 name := fn.FullName() 169 if v, ok := externValues[name]; ok && v.Kind() == reflect.Func { 170 if v.Type().IsVariadic() { 171 return func(args []reflect.Value) []reflect.Value { 172 return v.CallSlice(args) 173 } 174 } 175 return func(args []reflect.Value) []reflect.Value { 176 return v.Call(args) 177 } 178 } 179 panic(fmt.Sprintf("Not found method %v", fn)) 180 } 181 182 func (pfn *function) makeFunction(typ reflect.Type, env []value) reflect.Value { 183 return reflect.MakeFunc(typ, func(args []reflect.Value) []reflect.Value { 184 return pfn.Interp.callFunctionByReflect(pfn.Interp.tryDeferFrame(), typ, pfn, args, env) 185 }) 186 } 187 188 type _defer struct { 189 fn value 190 tail *_defer 191 args []value 192 ssaArgs []ssa.Value 193 } 194 195 type frame struct { 196 interp *Interp 197 caller *frame 198 callee *frame 199 pfn *function 200 _defer *_defer 201 _panic *_panic 202 block *ssa.BasicBlock 203 stack []value // result args env datas 204 ipc int 205 pred int 206 deferid int64 207 } 208 209 func dumpBlock(block *ssa.BasicBlock, level int, pc ssa.Instruction) { 210 if level == 0 { 211 fmt.Printf("--- %v ---\n", block.Parent()) 212 } 213 fmt.Printf("%v.%v %v Jump:%v Idom:%v\n", strings.Repeat(" ", level), block.Index, block.Comment, block.Succs, block.Idom()) 214 for _, instr := range block.Instrs { 215 var head string 216 if instr == pc { 217 head = " " + strings.Repeat(" ", level) + "=>" 218 } else { 219 head = " " + strings.Repeat(" ", level) 220 } 221 if value, ok := instr.(ssa.Value); ok { 222 fmt.Printf("%v %-20T %-4v = %v %v\n", head, instr, value.Name(), instr, value.Type()) 223 } else { 224 fmt.Printf("%v %-20T %v\n", head, instr, instr) 225 } 226 } 227 for _, v := range block.Dominees() { 228 dumpBlock(v, level+1, pc) 229 } 230 } 231 232 type tasks struct { 233 jumps map[*ssa.BasicBlock]bool 234 } 235 236 func checkJumps(block *ssa.BasicBlock, jumps map[*ssa.BasicBlock]bool, succs map[*ssa.BasicBlock]*ssa.BasicBlock) { 237 if s, ok := succs[block]; ok { 238 if jumps[s] { 239 return 240 } 241 jumps[s] = true 242 checkJumps(s, jumps, succs) 243 return 244 } 245 for _, s := range block.Succs { 246 if jumps[s] { 247 continue 248 } 249 jumps[s] = true 250 checkJumps(s, jumps, succs) 251 } 252 } 253 254 func checkRuns(block *ssa.BasicBlock, jumps map[*ssa.BasicBlock]bool, succs map[*ssa.BasicBlock]*ssa.BasicBlock) { 255 if jumps[block] { 256 return 257 } 258 jumps[block] = true 259 if s, ok := succs[block]; ok { 260 checkRuns(s, jumps, succs) 261 return 262 } 263 for _, s := range block.Dominees() { 264 checkRuns(s, jumps, succs) 265 } 266 } 267 268 func (fr *frame) gc() { 269 alloc := make(map[int]bool) 270 checkAlloc := func(instr ssa.Instruction) { 271 for _, v := range fr.pfn.instrIndex[instr] { 272 vk := kind(v >> 30) 273 if vk.isStatic() { 274 continue 275 } 276 rk := reflect.Kind(v >> 24 & 0x3f) 277 switch rk { 278 case reflect.String, reflect.Func, reflect.Ptr, reflect.Array, reflect.Slice, reflect.Map, reflect.Struct, reflect.Interface: 279 default: 280 continue 281 } 282 alloc[int(v&0xffffff)] = true 283 } 284 } 285 // check params and freevar 286 checkAlloc(nil) 287 // check alloc 288 cur := fr.pfn.ssaInstrs[fr.ipc-1] 289 var remain int 290 for i, instr := range fr.block.Instrs { 291 checkAlloc(instr) 292 if cur == instr { 293 remain = i 294 break 295 } 296 } 297 298 // check 299 seen := make(map[*ssa.BasicBlock]bool) 300 var checkChild func(block *ssa.BasicBlock) 301 checkChild = func(block *ssa.BasicBlock) { 302 for _, child := range block.Dominees() { 303 if seen[child] { 304 continue 305 } 306 seen[child] = true 307 checkChild(child) 308 } 309 } 310 // check block child 311 checkChild(fr.block) 312 313 block := fr.block 314 // check seen 315 for block != nil { 316 idom := block.Idom() 317 if idom == nil { 318 break 319 } 320 var find bool 321 switch idom.Comment { 322 case "for.loop": 323 find = true 324 case "rangeindex.loop": 325 find = true 326 case "rangechan.loop": 327 case "rangeiter.loop": 328 } 329 for _, v := range idom.Succs { 330 if find { 331 seen[v] = true 332 checkChild(v) 333 } 334 if block == v { 335 find = true 336 } 337 } 338 for _, instr := range idom.Instrs { 339 checkAlloc(instr) 340 } 341 block = idom 342 } 343 if fr.block.Comment == "for.done" { 344 delete(seen, fr.block) 345 } 346 // check used in block 347 var ops []*ssa.Value 348 for _, instr := range fr.block.Instrs[remain+1:] { 349 for _, op := range instr.Operands(ops[:0]) { 350 if *op == nil { 351 continue 352 } 353 reg := fr.pfn.regIndex(*op) 354 alloc[int(reg)] = false 355 } 356 } 357 // check unused in seen 358 for block := range seen { 359 var ops []*ssa.Value 360 for _, instr := range block.Instrs { 361 for _, op := range instr.Operands(ops[:0]) { 362 if *op == nil { 363 continue 364 } 365 reg := fr.pfn.regIndex(*op) 366 alloc[int(reg)] = false 367 } 368 } 369 } 370 // remove unused 371 for i, b := range alloc { 372 if !b { 373 continue 374 } 375 fr.stack[i] = nil 376 } 377 } 378 379 func (fr *frame) valid() bool { 380 return fr != nil && fr.pfn != nil && fr.block != nil 381 } 382 383 func (fr *frame) pc() uintptr { 384 return uintptr(fr.pfn.base + fr.ipc) 385 } 386 387 func (fr *frame) aborted() bool { 388 return fr != nil && fr.ipc != -1 389 } 390 391 func (fr *frame) setReg(ir register, v value) { 392 fr.stack[ir] = v 393 } 394 395 func (fr *frame) reg(ir register) value { 396 return fr.stack[ir] 397 } 398 399 func (fr *frame) bytes(ir register) []byte { 400 return xtype.Bytes(fr.stack[ir]) 401 } 402 403 func (fr *frame) runes(ir register) []rune { 404 return xtype.Runes(fr.stack[ir]) 405 } 406 407 func (fr *frame) bool(ir register) bool { 408 return xtype.Bool(fr.stack[ir]) 409 } 410 411 func (fr *frame) int(ir register) int { 412 return xtype.Int(fr.stack[ir]) 413 } 414 415 func (fr *frame) int8(ir register) int8 { 416 return xtype.Int8(fr.stack[ir]) 417 } 418 419 func (fr *frame) int16(ir register) int16 { 420 return xtype.Int16(fr.stack[ir]) 421 } 422 423 func (fr *frame) int32(ir register) int32 { 424 return xtype.Int32(fr.stack[ir]) 425 } 426 427 func (fr *frame) int64(ir register) int64 { 428 return xtype.Int64(fr.stack[ir]) 429 } 430 431 func (fr *frame) uint(ir register) uint { 432 return xtype.Uint(fr.stack[ir]) 433 } 434 435 func (fr *frame) uint8(ir register) uint8 { 436 return xtype.Uint8(fr.stack[ir]) 437 } 438 439 func (fr *frame) uint16(ir register) uint16 { 440 return xtype.Uint16(fr.stack[ir]) 441 } 442 443 func (fr *frame) uint32(ir register) uint32 { 444 return xtype.Uint32(fr.stack[ir]) 445 } 446 447 func (fr *frame) uint64(ir register) uint64 { 448 return xtype.Uint64(fr.stack[ir]) 449 } 450 451 func (fr *frame) uintptr(ir register) uintptr { 452 return xtype.Uintptr(fr.stack[ir]) 453 } 454 455 func (fr *frame) float32(ir register) float32 { 456 return xtype.Float32(fr.stack[ir]) 457 } 458 459 func (fr *frame) float64(ir register) float64 { 460 return xtype.Float64(fr.stack[ir]) 461 } 462 463 func (fr *frame) complex64(ir register) complex64 { 464 return xtype.Complex64(fr.stack[ir]) 465 } 466 467 func (fr *frame) complex128(ir register) complex128 { 468 return xtype.Complex128(fr.stack[ir]) 469 } 470 471 func (fr *frame) string(ir register) string { 472 return xtype.String(fr.stack[ir]) 473 } 474 475 func (fr *frame) pointer(ir register) unsafe.Pointer { 476 return xtype.Pointer(fr.stack[ir]) 477 } 478 479 func (fr *frame) copyReg(dst register, src register) { 480 fr.stack[dst] = fr.stack[src] 481 } 482 483 type _panic struct { 484 arg interface{} 485 link *_panic 486 pcs []uintptr 487 aborted bool 488 recovered bool 489 } 490 491 func (p *_panic) isNil() bool { 492 return p == nil || p.recovered 493 } 494 495 // runDefer runs a deferred call d. 496 // It always returns normally, but may set or clear fr.panic. 497 func (fr *frame) runDefer(d *_defer) (ok bool) { 498 defer func() { 499 if !ok { 500 // Deferred call created a new state of panic. 501 if fr._panic != nil { 502 fr._panic.aborted = true 503 } 504 fr._panic = &_panic{arg: recover(), link: fr._panic} 505 // no tail add callee.pc 506 if d.tail != nil { 507 callee := fr.callee 508 for callee.aborted() { 509 fr._panic.pcs = append([]uintptr{callee.pc()}, fr._panic.pcs...) 510 callee = callee.callee 511 } 512 } 513 } 514 }() 515 fr.interp.callDiscardsResult(fr, d.fn, d.args, d.ssaArgs) 516 ok = true 517 return 518 } 519 520 // runDefers executes fr's deferred function calls in LIFO order. 521 // 522 // On entry, fr.panicking indicates a state of panic; if 523 // true, fr.panic contains the panic value. 524 // 525 // On completion, if a deferred call started a panic, or if no 526 // deferred call recovered from a previous state of panic, then 527 // runDefers itself panics after the last deferred call has run. 528 // 529 // If there was no initial state of panic, or it was recovered from, 530 // runDefers returns normally. 531 func (fr *frame) runDefers() { 532 interp := fr.interp 533 atomic.AddInt32(&interp.deferCount, 1) 534 fr.deferid = goroutineID() 535 interp.deferMap.Store(fr.deferid, fr) 536 for d := fr._defer; d != nil; d = d.tail { 537 fr.runDefer(d) 538 } 539 interp.deferMap.Delete(fr.deferid) 540 atomic.AddInt32(&interp.deferCount, -1) 541 fr.deferid = 0 542 fr._defer = nil 543 // runtime.Goexit() fr.panic == nil 544 if !fr._panic.isNil() { 545 panic(fr._panic.arg) // new panic, or still panicking 546 } 547 } 548 549 // lookupMethod returns the method set for type typ, which may be one 550 // of the interpreter's fake types. 551 func lookupMethod(i *Interp, typ types.Type, meth *types.Func) *ssa.Function { 552 return i.mainpkg.Prog.LookupMethod(typ, meth.Pkg(), meth.Name()) 553 } 554 555 func SetValue(v reflect.Value, x reflect.Value) { 556 switch v.Kind() { 557 case reflect.Bool: 558 v.SetBool(x.Bool()) 559 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: 560 v.SetInt(x.Int()) 561 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: 562 v.SetUint(x.Uint()) 563 case reflect.Uintptr: 564 v.SetUint(x.Uint()) 565 case reflect.Float32, reflect.Float64: 566 v.SetFloat(x.Float()) 567 case reflect.Complex64, reflect.Complex128: 568 v.SetComplex(x.Complex()) 569 case reflect.String: 570 v.SetString(x.String()) 571 case reflect.UnsafePointer: 572 v.SetPointer(unsafe.Pointer(x.Pointer())) 573 default: 574 v.Set(x) 575 } 576 } 577 578 type DebugInfo struct { 579 *ssa.DebugRef 580 fset *token.FileSet 581 toValue func() (*types.Var, interface{}, bool) // var object value 582 } 583 584 func (i *DebugInfo) Position() token.Position { 585 return i.fset.Position(i.Pos()) 586 } 587 588 func (i *DebugInfo) AsVar() (*types.Var, interface{}, bool) { 589 return i.toValue() 590 } 591 592 func (i *DebugInfo) AsFunc() (*types.Func, bool) { 593 v, ok := i.Object().(*types.Func) 594 return v, ok 595 } 596 597 // prepareCall determines the function value and argument values for a 598 // function call in a Call, Go or Defer instruction, performing 599 // interface method lookup if needed. 600 func (i *Interp) prepareCall(fr *frame, call *ssa.CallCommon, iv register, ia []register, ib []register) (fv value, args []value) { 601 if call.Method == nil { 602 switch f := call.Value.(type) { 603 case *ssa.Builtin: 604 fv = f 605 case *ssa.Function: 606 if f.Blocks == nil { 607 ext, ok := findExternFunc(i, f) 608 if !ok { 609 // skip pkg.init 610 if f.Pkg != nil && f.Name() == "init" { 611 fv = func() {} 612 } else { 613 panic(fmt.Errorf("no code for function: %v", f)) 614 } 615 } else { 616 fv = ext 617 } 618 } else { 619 fv = f 620 } 621 case *ssa.MakeClosure: 622 var bindings []value 623 for i := range f.Bindings { 624 bindings = append(bindings, fr.reg(ib[i])) 625 } 626 fv = &closure{i.funcs[f.Fn.(*ssa.Function)], bindings} 627 default: 628 fv = fr.reg(iv) 629 } 630 } else { 631 v := fr.reg(iv) 632 rtype := reflect.TypeOf(v) 633 mname := call.Method.Name() 634 if mset, ok := i.msets[rtype]; ok { 635 if f, ok := mset[mname]; ok { 636 fv = f 637 } else { 638 ext, ok := findUserMethod(rtype, mname) 639 if !ok { 640 panic(fmt.Errorf("no code for method: %v.%v", rtype, mname)) 641 } 642 fv = ext 643 } 644 } else { 645 ext, ok := findExternMethod(rtype, mname) 646 if !ok { 647 panic(fmt.Errorf("no code for method: %v.%v", rtype, mname)) 648 } 649 fv = ext 650 } 651 args = append(args, v) 652 } 653 for i := range call.Args { 654 v := fr.reg(ia[i]) 655 args = append(args, v) 656 } 657 return 658 } 659 660 // call interprets a call to a function (function, builtin or closure) 661 // fn with arguments args, returning its result. 662 // callpos is the position of the callsite. 663 func (i *Interp) call(caller *frame, fn value, args []value, ssaArgs []ssa.Value) value { 664 switch fn := fn.(type) { 665 case *ssa.Function: 666 return i.callFunction(caller, i.funcs[fn], args, nil) 667 case *closure: 668 return i.callFunction(caller, fn.pfn, args, fn.env) 669 case *ssa.Builtin: 670 return i.callBuiltin(caller, fn, args, ssaArgs) 671 case reflect.Value: 672 return i.callExternal(caller, fn, args, nil) 673 default: 674 return i.callExternal(caller, reflect.ValueOf(fn), args, nil) 675 } 676 } 677 678 // call interprets a call to a function (function, builtin or closure) 679 // fn with arguments args, returning its result. 680 // callpos is the position of the callsite. 681 func (i *Interp) callDiscardsResult(caller *frame, fn value, args []value, ssaArgs []ssa.Value) { 682 switch fn := fn.(type) { 683 case *ssa.Function: 684 i.callFunctionDiscardsResult(caller, i.funcs[fn], args, nil) 685 case *closure: 686 i.callFunctionDiscardsResult(caller, fn.pfn, args, fn.env) 687 case *ssa.Builtin: 688 i.callBuiltinDiscardsResult(caller, fn, args, ssaArgs) 689 case reflect.Value: 690 i.callExternalDiscardsResult(caller, fn, args, nil) 691 default: 692 i.callExternalDiscardsResult(caller, reflect.ValueOf(fn), args, nil) 693 } 694 } 695 696 func (i *Interp) callFunction(caller *frame, pfn *function, args []value, env []value) (result value) { 697 fr := pfn.allocFrame(caller) 698 for i := 0; i < pfn.narg; i++ { 699 fr.stack[i+pfn.nres] = args[i] 700 } 701 for i := 0; i < pfn.nenv; i++ { 702 fr.stack[pfn.narg+i+pfn.nres] = env[i] 703 } 704 fr.run() 705 if pfn.nres == 1 { 706 result = fr.stack[0] 707 } else if pfn.nres > 1 { 708 result = tuple(fr.stack[0:pfn.nres]) 709 } 710 pfn.deleteFrame(caller, fr) 711 return 712 } 713 714 func (i *Interp) callFunctionByReflect(caller *frame, typ reflect.Type, pfn *function, args []reflect.Value, env []value) (results []reflect.Value) { 715 fr := pfn.allocFrame(caller) 716 for i := 0; i < pfn.narg; i++ { 717 fr.stack[i+pfn.nres] = args[i].Interface() 718 } 719 for i := 0; i < pfn.nenv; i++ { 720 fr.stack[pfn.narg+i+pfn.nres] = env[i] 721 } 722 fr.run() 723 if pfn.nres > 0 { 724 results = make([]reflect.Value, pfn.nres) 725 for i := 0; i < pfn.nres; i++ { 726 v := fr.stack[i] 727 if v == nil { 728 results[i] = reflect.New(typ.Out(i)).Elem() 729 } else { 730 results[i] = reflect.ValueOf(v) 731 } 732 } 733 } 734 pfn.deleteFrame(caller, fr) 735 return 736 } 737 738 func (i *Interp) callFunctionDiscardsResult(caller *frame, pfn *function, args []value, env []value) { 739 fr := pfn.allocFrame(caller) 740 for i := 0; i < pfn.narg; i++ { 741 fr.stack[i+pfn.nres] = args[i] 742 } 743 for i := 0; i < pfn.nenv; i++ { 744 fr.stack[pfn.narg+i+pfn.nres] = env[i] 745 } 746 fr.run() 747 pfn.deleteFrame(caller, fr) 748 } 749 750 func (i *Interp) callFunctionByStack0(caller *frame, pfn *function, ir register, ia []register) { 751 fr := pfn.allocFrame(caller) 752 for i := 0; i < len(ia); i++ { 753 fr.stack[i] = caller.reg(ia[i]) 754 } 755 fr.run() 756 pfn.deleteFrame(caller, fr) 757 } 758 759 func (i *Interp) callFunctionByStack1(caller *frame, pfn *function, ir register, ia []register) { 760 fr := pfn.allocFrame(caller) 761 for i := 0; i < len(ia); i++ { 762 fr.stack[i+1] = caller.reg(ia[i]) 763 } 764 fr.run() 765 caller.setReg(ir, fr.stack[0]) 766 pfn.deleteFrame(caller, fr) 767 } 768 769 func (i *Interp) callFunctionByStackN(caller *frame, pfn *function, ir register, ia []register) { 770 fr := pfn.allocFrame(caller) 771 for i := 0; i < len(ia); i++ { 772 fr.stack[i+pfn.nres] = caller.reg(ia[i]) 773 } 774 fr.run() 775 caller.setReg(ir, tuple(fr.stack[0:pfn.nres])) 776 pfn.deleteFrame(caller, fr) 777 } 778 779 func (i *Interp) callFunctionByStack(caller *frame, pfn *function, ir register, ia []register) { 780 fr := pfn.allocFrame(caller) 781 for i := 0; i < len(ia); i++ { 782 fr.stack[i+pfn.nres] = caller.reg(ia[i]) 783 } 784 fr.run() 785 if pfn.nres == 1 { 786 caller.setReg(ir, fr.stack[0]) 787 } else if pfn.nres > 1 { 788 caller.setReg(ir, tuple(fr.stack[0:pfn.nres])) 789 } 790 pfn.deleteFrame(caller, fr) 791 } 792 793 func (i *Interp) callFunctionByStackNoRecover0(caller *frame, pfn *function, ir register, ia []register) { 794 fr := pfn.allocFrame(caller) 795 for i := 0; i < len(ia); i++ { 796 fr.stack[i] = caller.reg(ia[i]) 797 } 798 for fr.ipc != -1 { 799 fn := fr.pfn.Instrs[fr.ipc] 800 fr.ipc++ 801 fn(fr) 802 } 803 pfn.deleteFrame(caller, fr) 804 } 805 806 func (i *Interp) callFunctionByStackNoRecover1(caller *frame, pfn *function, ir register, ia []register) { 807 fr := pfn.allocFrame(caller) 808 for i := 0; i < len(ia); i++ { 809 fr.stack[i+1] = caller.reg(ia[i]) 810 } 811 for fr.ipc != -1 { 812 fn := fr.pfn.Instrs[fr.ipc] 813 fr.ipc++ 814 fn(fr) 815 } 816 caller.setReg(ir, fr.stack[0]) 817 pfn.deleteFrame(caller, fr) 818 } 819 820 func (i *Interp) callFunctionByStackNoRecoverN(caller *frame, pfn *function, ir register, ia []register) { 821 fr := pfn.allocFrame(caller) 822 for i := 0; i < len(ia); i++ { 823 fr.stack[i+pfn.nres] = caller.reg(ia[i]) 824 } 825 for fr.ipc != -1 { 826 fn := fr.pfn.Instrs[fr.ipc] 827 fr.ipc++ 828 fn(fr) 829 } 830 caller.setReg(ir, tuple(fr.stack[0:pfn.nres])) 831 pfn.deleteFrame(caller, fr) 832 } 833 834 func (i *Interp) callFunctionByStackWithEnv(caller *frame, pfn *function, ir register, ia []register, env []value) { 835 fr := pfn.allocFrame(caller) 836 for i := 0; i < pfn.narg; i++ { 837 fr.stack[i+pfn.nres] = caller.reg(ia[i]) 838 } 839 for i := 0; i < pfn.nenv; i++ { 840 fr.stack[pfn.narg+i+pfn.nres] = env[i] 841 } 842 fr.run() 843 if pfn.nres == 1 { 844 caller.setReg(ir, fr.stack[0]) 845 } else if pfn.nres > 1 { 846 caller.setReg(ir, tuple(fr.stack[0:pfn.nres])) 847 } 848 pfn.deleteFrame(caller, fr) 849 } 850 851 func (i *Interp) callFunctionByStackNoRecoverWithEnv(caller *frame, pfn *function, ir register, ia []register, env []value) { 852 fr := pfn.allocFrame(caller) 853 for i := 0; i < pfn.narg; i++ { 854 fr.stack[i+pfn.nres] = caller.reg(ia[i]) 855 } 856 for i := 0; i < pfn.nenv; i++ { 857 fr.stack[pfn.narg+i+pfn.nres] = env[i] 858 } 859 for fr.ipc != -1 { 860 fn := fr.pfn.Instrs[fr.ipc] 861 fr.ipc++ 862 fn(fr) 863 } 864 if pfn.nres == 1 { 865 caller.setReg(ir, fr.stack[0]) 866 } else if pfn.nres > 1 { 867 caller.setReg(ir, tuple(fr.stack[0:pfn.nres])) 868 } 869 pfn.deleteFrame(caller, fr) 870 } 871 872 func (i *Interp) callExternal(caller *frame, fn reflect.Value, args []value, env []value) value { 873 if caller != nil && caller.deferid != 0 { 874 i.deferMap.Store(caller.deferid, caller) 875 } 876 var ins []reflect.Value 877 typ := fn.Type() 878 isVariadic := fn.Type().IsVariadic() 879 if isVariadic { 880 for i := 0; i < len(args)-1; i++ { 881 if args[i] == nil { 882 ins = append(ins, reflect.New(typ.In(i)).Elem()) 883 } else { 884 ins = append(ins, reflect.ValueOf(args[i])) 885 } 886 } 887 ins = append(ins, reflect.ValueOf(args[len(args)-1])) 888 } else { 889 ins = make([]reflect.Value, len(args)) 890 for i := 0; i < len(args); i++ { 891 if args[i] == nil { 892 ins[i] = reflect.New(typ.In(i)).Elem() 893 } else { 894 ins[i] = reflect.ValueOf(args[i]) 895 } 896 } 897 } 898 var results []reflect.Value 899 if isVariadic { 900 results = fn.CallSlice(ins) 901 } else { 902 results = fn.Call(ins) 903 } 904 switch len(results) { 905 case 0: 906 return nil 907 case 1: 908 return results[0].Interface() 909 default: 910 var res []value 911 for _, r := range results { 912 res = append(res, r.Interface()) 913 } 914 return tuple(res) 915 } 916 } 917 func (i *Interp) callExternalDiscardsResult(caller *frame, fn reflect.Value, args []value, env []value) { 918 if caller != nil && caller.deferid != 0 { 919 i.deferMap.Store(caller.deferid, caller) 920 } 921 var ins []reflect.Value 922 typ := fn.Type() 923 isVariadic := fn.Type().IsVariadic() 924 if isVariadic { 925 for i := 0; i < len(args)-1; i++ { 926 if args[i] == nil { 927 ins = append(ins, reflect.New(typ.In(i)).Elem()) 928 } else { 929 ins = append(ins, reflect.ValueOf(args[i])) 930 } 931 } 932 ins = append(ins, reflect.ValueOf(args[len(args)-1])) 933 fn.CallSlice(ins) 934 } else { 935 ins = make([]reflect.Value, len(args)) 936 for i := 0; i < len(args); i++ { 937 if args[i] == nil { 938 ins[i] = reflect.New(typ.In(i)).Elem() 939 } else { 940 ins[i] = reflect.ValueOf(args[i]) 941 } 942 } 943 fn.Call(ins) 944 } 945 } 946 947 func (i *Interp) callExternalByStack(caller *frame, fn reflect.Value, ir register, ia []register) { 948 if caller.deferid != 0 { 949 i.deferMap.Store(caller.deferid, caller) 950 } 951 var ins []reflect.Value 952 typ := fn.Type() 953 isVariadic := fn.Type().IsVariadic() 954 if isVariadic { 955 var i int 956 for n := len(ia) - 1; i < n; i++ { 957 arg := caller.reg(ia[i]) 958 if arg == nil { 959 ins = append(ins, reflect.New(typ.In(i)).Elem()) 960 } else { 961 ins = append(ins, reflect.ValueOf(arg)) 962 } 963 } 964 ins = append(ins, reflect.ValueOf(caller.reg(ia[i]))) 965 } else { 966 n := len(ia) 967 ins = make([]reflect.Value, n) 968 for i := 0; i < n; i++ { 969 arg := caller.reg(ia[i]) 970 if arg == nil { 971 ins[i] = reflect.New(typ.In(i)).Elem() 972 } else { 973 ins[i] = reflect.ValueOf(arg) 974 } 975 } 976 } 977 var results []reflect.Value 978 if isVariadic { 979 results = fn.CallSlice(ins) 980 } else { 981 results = fn.Call(ins) 982 } 983 switch len(results) { 984 case 0: 985 case 1: 986 caller.setReg(ir, results[0].Interface()) 987 default: 988 var res []value 989 for _, r := range results { 990 res = append(res, r.Interface()) 991 } 992 caller.setReg(ir, tuple(res)) 993 } 994 } 995 996 func (i *Interp) callExternalWithFrameByStack(caller *frame, fn reflect.Value, ir register, ia []register) { 997 if caller.deferid != 0 { 998 i.deferMap.Store(caller.deferid, caller) 999 } 1000 var ins []reflect.Value 1001 typ := fn.Type() 1002 isVariadic := fn.Type().IsVariadic() 1003 if isVariadic { 1004 ins = append(ins, reflect.ValueOf(caller)) 1005 var i int 1006 for n := len(ia) - 1; i < n; i++ { 1007 arg := caller.reg(ia[i]) 1008 if arg == nil { 1009 ins = append(ins, reflect.New(typ.In(i)).Elem()) 1010 } else { 1011 ins = append(ins, reflect.ValueOf(arg)) 1012 } 1013 } 1014 ins = append(ins, reflect.ValueOf(caller.reg(ia[i]))) 1015 } else { 1016 n := len(ia) 1017 ins = make([]reflect.Value, n+1) 1018 ins[0] = reflect.ValueOf(caller) 1019 for i := 0; i < n; i++ { 1020 arg := caller.reg(ia[i]) 1021 if arg == nil { 1022 ins[i+1] = reflect.New(typ.In(i)).Elem() 1023 } else { 1024 ins[i+1] = reflect.ValueOf(arg) 1025 } 1026 } 1027 } 1028 var results []reflect.Value 1029 if isVariadic { 1030 results = fn.CallSlice(ins) 1031 } else { 1032 results = fn.Call(ins) 1033 } 1034 switch len(results) { 1035 case 0: 1036 case 1: 1037 caller.setReg(ir, results[0].Interface()) 1038 default: 1039 var res []value 1040 for _, r := range results { 1041 res = append(res, r.Interface()) 1042 } 1043 caller.setReg(ir, tuple(res)) 1044 } 1045 } 1046 1047 // runFrame executes SSA instructions starting at fr.block and 1048 // continuing until a return, a panic, or a recovered panic. 1049 // 1050 // After a panic, runFrame panics. 1051 // 1052 // After a normal return, fr.result contains the result of the call 1053 // and fr.block is nil. 1054 // 1055 // A recovered panic in a function without named return parameters 1056 // (NRPs) becomes a normal return of the zero value of the function's 1057 // result type. 1058 // 1059 // After a recovered panic in a function with NRPs, fr.result is 1060 // undefined and fr.block contains the block at which to resume 1061 // control. 1062 func (fr *frame) run() { 1063 if fr.pfn.Recover != nil { 1064 defer func() { 1065 if fr.ipc == -1 || fr._defer == nil { 1066 return // normal return 1067 } 1068 fr._panic = &_panic{arg: recover()} 1069 callee := fr.callee 1070 for callee.aborted() { 1071 if !callee._panic.isNil() { 1072 if !callee._panic.link.isNil() { 1073 fr._panic.link = callee._panic.link 1074 // check panic link 1075 link := callee._panic.link 1076 for link.link != nil { 1077 link = link.link 1078 } 1079 link.pcs = append(link.pcs, callee.pc()) 1080 } else { 1081 fr._panic.pcs = append([]uintptr{callee.pc()}, fr._panic.pcs...) 1082 } 1083 fr._panic.pcs = append(append([]uintptr{}, callee._panic.pcs...), fr._panic.pcs...) 1084 } else { 1085 fr._panic.pcs = append([]uintptr{callee.pc()}, fr._panic.pcs...) 1086 } 1087 callee = callee.callee 1088 } 1089 fr.runDefers() 1090 for _, fn := range fr.pfn.Recover { 1091 fn(fr) 1092 } 1093 }() 1094 } 1095 1096 for fr.ipc != -1 && atomic.LoadInt32(&fr.interp.exited) == 0 { 1097 fn := fr.pfn.Instrs[fr.ipc] 1098 fr.ipc++ 1099 fn(fr) 1100 } 1101 } 1102 1103 // doRecover implements the recover() built-in. 1104 func doRecover(caller *frame) value { 1105 // recover() must be exactly one level beneath the deferred 1106 // function (two levels beneath the panicking function) to 1107 // have any effect. Thus we ignore both "defer recover()" and 1108 // "defer f() -> g() -> recover()". 1109 if caller.interp.ctx.Mode&DisableRecover == 0 && 1110 caller._panic.isNil() && 1111 caller.caller != nil && !caller.caller._panic.isNil() { 1112 p := caller.caller._panic.arg 1113 caller.caller._panic.recovered = true 1114 switch p := p.(type) { 1115 case PanicError: 1116 // The target program explicitly called panic(). 1117 return p.Value 1118 default: 1119 return p 1120 } 1121 } 1122 return nil //iface{} 1123 } 1124 1125 // Interpret interprets the Go program whose main package is mainpkg. 1126 // mode specifies various interpreter options. filename and args are 1127 // the initial values of os.Args for the target program. sizes is the 1128 // effective type-sizing function for this program. 1129 // 1130 // Interpret returns the exit code of the program: 2 for panic (like 1131 // gc does), or the argument to os.Exit for normal termination. 1132 // 1133 // The SSA program must include the "runtime" package. 1134 // 1135 1136 func NewInterp(ctx *Context, mainpkg *ssa.Package) (*Interp, error) { 1137 return newInterp(ctx, mainpkg, nil) 1138 } 1139 1140 func newInterp(ctx *Context, mainpkg *ssa.Package, globals map[string]interface{}) (*Interp, error) { 1141 i := &Interp{ 1142 ctx: ctx, 1143 mainpkg: mainpkg, 1144 globals: make(map[string]value), 1145 chkinit: make(map[string]bool), 1146 goroutines: 1, 1147 preloadTypes: make(map[types.Type]reflect.Type), 1148 funcs: make(map[*ssa.Function]*function), 1149 msets: make(map[reflect.Type](map[string]*ssa.Function)), 1150 chexit: make(chan int), 1151 mainid: goroutineID(), 1152 } 1153 var rctx *reflectx.Context 1154 if ctx.Mode&SupportMultipleInterp == 0 { 1155 reflectx.ResetAll() 1156 rctx = reflectx.Default 1157 } else { 1158 rctx = reflectx.NewContext() 1159 } 1160 i.record = NewTypesRecord(rctx, ctx.Loader, i, ctx.nestedMap) 1161 i.record.Load(mainpkg) 1162 1163 var pkgs []*ssa.Package 1164 1165 for _, pkg := range mainpkg.Prog.AllPackages() { 1166 // skip external pkg 1167 if pkg.Func("init").Blocks == nil { 1168 continue 1169 } 1170 pkgs = append(pkgs, pkg) 1171 // Initialize global storage. 1172 for _, m := range pkg.Members { 1173 switch v := m.(type) { 1174 case *ssa.Global: 1175 typ := i.preToType(deref(v.Type())) 1176 key := v.String() 1177 if ext, ok := findExternValue(i, key); ok && ext.Kind() == reflect.Ptr && ext.Elem().Type() == typ { 1178 i.globals[key] = ext.Interface() 1179 i.chkinit[key] = true 1180 } else { 1181 i.globals[key] = reflect.New(typ).Interface() 1182 } 1183 } 1184 } 1185 } 1186 // check linkname var 1187 var links []*load.LinkSym 1188 for _, sp := range i.ctx.pkgs { 1189 for _, link := range sp.Links { 1190 if link.Kind == ast.Var { 1191 localName, targetName := link.PkgPath+"."+link.Name, link.Linkname.PkgPath+"."+link.Linkname.Name 1192 if _, ok := i.globals[localName]; ok { 1193 if ext, ok := findExternVar(i, link.Linkname.PkgPath, link.Linkname.Name); ok && ext.Kind() == reflect.Ptr { 1194 i.globals[localName] = ext.Interface() 1195 i.chkinit[targetName] = true 1196 links = append(links, link) 1197 } else if v, ok := i.globals[targetName]; ok { 1198 i.globals[localName] = v 1199 links = append(links, link) 1200 } 1201 } 1202 } 1203 } 1204 } 1205 // check globals for repl 1206 if globals != nil { 1207 for k := range i.globals { 1208 if fv, ok := globals[k]; ok { 1209 i.globals[k] = fv 1210 } 1211 } 1212 } 1213 // static types check 1214 err := checkPackages(i, pkgs) 1215 if err != nil { 1216 return i, err 1217 } 1218 // check linkname duplicated definition 1219 for _, link := range links { 1220 localName, targetName := link.PkgPath+"."+link.Name, link.Linkname.PkgPath+"."+link.Linkname.Name 1221 if i.chkinit[localName] && i.chkinit[targetName] { 1222 return i, fmt.Errorf("duplicated definition of symbol %v, from %v and %v", targetName, link.PkgPath, link.Linkname.PkgPath) 1223 } 1224 } 1225 return i, err 1226 } 1227 1228 func (i *Interp) loadType(typ types.Type) { 1229 if _, ok := i.preloadTypes[typ]; !ok { 1230 rt, nested := i.record.ToType(typ) 1231 if nested { 1232 return 1233 } 1234 i.preloadTypes[typ] = rt 1235 } 1236 } 1237 1238 func (i *Interp) preToType(typ types.Type) reflect.Type { 1239 if t, ok := i.preloadTypes[typ]; ok { 1240 return t 1241 } 1242 rt, nested := i.record.ToType(typ) 1243 if !nested { 1244 i.preloadTypes[typ] = rt 1245 } 1246 return rt 1247 } 1248 1249 func (i *Interp) toType(typ types.Type) reflect.Type { 1250 if t, ok := i.preloadTypes[typ]; ok { 1251 return t 1252 } 1253 // log.Panicf("toType %v %p\n", typ, typ) 1254 i.typesMutex.Lock() 1255 defer i.typesMutex.Unlock() 1256 rt, _ := i.record.ToType(typ) 1257 return rt 1258 } 1259 1260 func (i *Interp) RunFunc(name string, args ...Value) (r Value, err error) { 1261 fr := &frame{interp: i} 1262 defer func() { 1263 if i.ctx.Mode&DisableRecover != 0 { 1264 return 1265 } 1266 switch p := recover().(type) { 1267 case nil: 1268 // nothing 1269 case exitPanic: 1270 i.exitCode = int(p) 1271 atomic.StoreInt32(&i.exited, 1) 1272 case goexitPanic: 1273 // check goroutines 1274 if atomic.LoadInt32(&i.goroutines) == 1 { 1275 err = ErrGoexitDeadlock 1276 } else { 1277 i.exitCode = <-i.chexit 1278 atomic.StoreInt32(&i.exited, 1) 1279 } 1280 case runtime.Error: 1281 err = p 1282 case PanicError: 1283 err = p 1284 default: 1285 pfr := fr 1286 for pfr.callee != nil { 1287 pfr = pfr.callee 1288 } 1289 err = PanicError{stack: debugStack(pfr), Value: p} 1290 } 1291 }() 1292 if fn := i.mainpkg.Func(name); fn != nil { 1293 r = i.call(fr, fn, args, nil) 1294 } else { 1295 err = fmt.Errorf("no function %v", name) 1296 } 1297 return 1298 } 1299 1300 func (i *Interp) ExitCode() int { 1301 return i.exitCode 1302 } 1303 1304 func (i *Interp) RunInit() (err error) { 1305 i.goexited = 0 1306 i.exitCode = 0 1307 atomic.StoreInt32(&i.exited, 0) 1308 _, err = i.RunFunc("init") 1309 return 1310 } 1311 1312 // ResetAllIcall is reset all reflectx icall, all interp methods invalid. 1313 func ResetAllIcall() { 1314 reflectx.ResetAll() 1315 } 1316 1317 // IcallStat return reflectx icall allocate stat 1318 func IcallStat() (capacity int, allocate int, aviable int) { 1319 return reflectx.IcallStat() 1320 } 1321 1322 // icall allocate 1323 func (i *Interp) IcallAlloc() int { 1324 return i.record.rctx.IcallAlloc() 1325 } 1326 1327 // ResetIcall is reset reflectx icall, all methods invalid. 1328 func (i *Interp) ResetIcall() { 1329 i.record.rctx.Reset() 1330 } 1331 1332 // UnsafeRelease is unsafe release interp. interp all invalid. 1333 func (i *Interp) UnsafeRelease() { 1334 i.record.Release() 1335 for _, v := range i.funcs { 1336 v.UnsafeRelease() 1337 } 1338 i.funcs = nil 1339 i.msets = nil 1340 i.globals = nil 1341 i.preloadTypes = nil 1342 i.record = nil 1343 } 1344 1345 func (i *Interp) Abort() { 1346 atomic.StoreInt32(&i.exited, 1) 1347 } 1348 1349 func (i *Interp) RunMain() (exitCode int, err error) { 1350 if atomic.LoadInt32(&i.exited) == 1 { 1351 return i.exitCode, nil 1352 } 1353 _, err = i.RunFunc("main") 1354 if err != nil { 1355 exitCode = 2 1356 } 1357 if atomic.LoadInt32(&i.exited) == 1 { 1358 exitCode = i.exitCode 1359 } 1360 return 1361 } 1362 1363 func (i *Interp) GetFunc(key string) (interface{}, bool) { 1364 m, ok := i.mainpkg.Members[key] 1365 if !ok { 1366 return nil, false 1367 } 1368 fn, ok := m.(*ssa.Function) 1369 if !ok { 1370 return nil, false 1371 } 1372 return i.funcs[fn].makeFunction(i.toType(fn.Type()), nil).Interface(), true 1373 } 1374 1375 func (i *Interp) GetVarAddr(key string) (interface{}, bool) { 1376 m, ok := i.mainpkg.Members[key] 1377 if !ok { 1378 return nil, false 1379 } 1380 v, ok := m.(*ssa.Global) 1381 if !ok { 1382 return nil, false 1383 } 1384 p, ok := i.globals[v.String()] 1385 return p, ok 1386 } 1387 1388 func (i *Interp) GetConst(key string) (constant.Value, bool) { 1389 m, ok := i.mainpkg.Members[key] 1390 if !ok { 1391 return nil, false 1392 } 1393 v, ok := m.(*ssa.NamedConst) 1394 if !ok { 1395 return nil, false 1396 } 1397 return v.Value.Value, true 1398 } 1399 1400 func (i *Interp) GetType(key string) (reflect.Type, bool) { 1401 m, ok := i.mainpkg.Members[key] 1402 if !ok { 1403 return nil, false 1404 } 1405 t, ok := m.(*ssa.Type) 1406 if !ok { 1407 return nil, false 1408 } 1409 return i.toType(t.Type()), true 1410 } 1411 1412 func (i *Interp) GetSymbol(key string) (m ssa.Member, v interface{}, ok bool) { 1413 defer func() { 1414 if v := recover(); v != nil { 1415 ok = false 1416 } 1417 }() 1418 ar := strings.Split(key, ".") 1419 var pkg *ssa.Package 1420 switch len(ar) { 1421 case 1: 1422 pkg = i.mainpkg 1423 case 2: 1424 pkgs := i.mainpkg.Prog.AllPackages() 1425 for _, p := range pkgs { 1426 if p.Pkg.Path() == ar[0] || p.Pkg.Name() == ar[0] { 1427 pkg = p 1428 break 1429 } 1430 } 1431 if pkg == nil { 1432 return 1433 } 1434 key = ar[1] 1435 default: 1436 return 1437 } 1438 m, ok = pkg.Members[key] 1439 if !ok { 1440 return 1441 } 1442 switch p := m.(type) { 1443 case *ssa.NamedConst: 1444 v = p.Value.Value 1445 case *ssa.Global: 1446 v, ok = globalToValue(i, p) 1447 case *ssa.Function: 1448 typ := i.toType(p.Type()) 1449 v = i.funcs[p].makeFunction(typ, nil) 1450 case *ssa.Type: 1451 v = i.toType(p.Type()) 1452 } 1453 return 1454 } 1455 1456 func (i *Interp) Exit(code int) { 1457 if i != nil && atomic.LoadInt32(&i.goexited) == 1 { 1458 i.chexit <- code 1459 } else { 1460 panic(exitPanic(code)) 1461 } 1462 } 1463 1464 // deref returns a pointer's element type; otherwise it returns typ. 1465 // TODO(adonovan): Import from ssa? 1466 func deref(typ types.Type) types.Type { 1467 if p, ok := typ.Underlying().(*types.Pointer); ok { 1468 return p.Elem() 1469 } 1470 return typ 1471 } 1472 1473 func goroutineID() int64 { 1474 return gid.Get() 1475 }