github.com/tinygo-org/tinygo@v0.31.3-0.20240404173401-90b0bf646c27/interp/interpreter.go (about)

     1  package interp
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"math"
     7  	"os"
     8  	"strconv"
     9  	"strings"
    10  	"time"
    11  
    12  	"tinygo.org/x/go-llvm"
    13  )
    14  
    15  func (r *runner) run(fn *function, params []value, parentMem *memoryView, indent string) (value, memoryView, *Error) {
    16  	mem := memoryView{r: r, parent: parentMem}
    17  	locals := make([]value, len(fn.locals))
    18  	r.callsExecuted++
    19  
    20  	// Parameters are considered a kind of local values.
    21  	for i, param := range params {
    22  		locals[i] = param
    23  	}
    24  
    25  	// Track what blocks have run instructions at runtime.
    26  	// This is used to prevent unrolling.
    27  	var runtimeBlocks map[int]struct{}
    28  
    29  	// Start with the first basic block and the first instruction.
    30  	// Branch instructions may modify both bb and instIndex when branching.
    31  	bb := fn.blocks[0]
    32  	currentBB := 0
    33  	lastBB := -1 // last basic block is undefined, only defined after a branch
    34  	var operands []value
    35  	startRTInsts := len(mem.instructions)
    36  	for instIndex := 0; instIndex < len(bb.instructions); instIndex++ {
    37  		if instIndex == 0 {
    38  			// This is the start of a new basic block.
    39  			if len(mem.instructions) != startRTInsts {
    40  				if _, ok := runtimeBlocks[lastBB]; ok {
    41  					// This loop has been unrolled.
    42  					// Avoid doing this, as it can result in a large amount of extra machine code.
    43  					// This currently uses the branch from the last block, as there is no available information to give a better location.
    44  					lastBBInsts := fn.blocks[lastBB].instructions
    45  					return nil, mem, r.errorAt(lastBBInsts[len(lastBBInsts)-1], errLoopUnrolled)
    46  				}
    47  
    48  				// Flag the last block as having run stuff at runtime.
    49  				if runtimeBlocks == nil {
    50  					runtimeBlocks = make(map[int]struct{})
    51  				}
    52  				runtimeBlocks[lastBB] = struct{}{}
    53  
    54  				// Reset the block-start runtime instructions counter.
    55  				startRTInsts = len(mem.instructions)
    56  			}
    57  
    58  			// There may be PHI nodes that need to be resolved. Resolve all PHI
    59  			// nodes before continuing with regular instructions.
    60  			// PHI nodes need to be treated specially because they can have a
    61  			// mutual dependency:
    62  			//   for.loop:
    63  			//     %a = phi i8 [ 1, %entry ], [ %b, %for.loop ]
    64  			//     %b = phi i8 [ 3, %entry ], [ %a, %for.loop ]
    65  			// If these PHI nodes are processed like a regular instruction, %a
    66  			// and %b are both 3 on the second iteration of the loop because %b
    67  			// loads the value of %a from the second iteration, while it should
    68  			// load the value from the previous iteration. The correct behavior
    69  			// is that these two values swap each others place on each
    70  			// iteration.
    71  			var phiValues []value
    72  			var phiIndices []int
    73  			for _, inst := range bb.phiNodes {
    74  				var result value
    75  				for i := 0; i < len(inst.operands); i += 2 {
    76  					if int(inst.operands[i].(literalValue).value.(uint32)) == lastBB {
    77  						incoming := inst.operands[i+1]
    78  						if local, ok := incoming.(localValue); ok {
    79  							result = locals[fn.locals[local.value]]
    80  						} else {
    81  							result = incoming
    82  						}
    83  						break
    84  					}
    85  				}
    86  				if r.debug {
    87  					fmt.Fprintln(os.Stderr, indent+"phi", inst.operands, "->", result)
    88  				}
    89  				if result == nil {
    90  					panic("could not find PHI input")
    91  				}
    92  				phiValues = append(phiValues, result)
    93  				phiIndices = append(phiIndices, inst.localIndex)
    94  			}
    95  			for i, value := range phiValues {
    96  				locals[phiIndices[i]] = value
    97  			}
    98  		}
    99  
   100  		inst := bb.instructions[instIndex]
   101  		operands = operands[:0]
   102  		isRuntimeInst := false
   103  		if inst.opcode != llvm.PHI {
   104  			for _, v := range inst.operands {
   105  				if v, ok := v.(localValue); ok {
   106  					index, ok := fn.locals[v.value]
   107  					if !ok {
   108  						// This is a localValue that is not local to the
   109  						// function. An example would be an inline assembly call
   110  						// operand.
   111  						isRuntimeInst = true
   112  						break
   113  					}
   114  					localVal := locals[index]
   115  					if localVal == nil {
   116  						// Trying to read a function-local value before it is
   117  						// set.
   118  						return nil, mem, r.errorAt(inst, errors.New("interp: local not defined"))
   119  					} else {
   120  						operands = append(operands, localVal)
   121  						if _, ok := localVal.(localValue); ok {
   122  							// The function-local value is still just a
   123  							// localValue (which can't be interpreted at compile
   124  							// time). Not sure whether this ever happens in
   125  							// practice.
   126  							isRuntimeInst = true
   127  							break
   128  						}
   129  						continue
   130  					}
   131  				}
   132  				operands = append(operands, v)
   133  			}
   134  		}
   135  		if isRuntimeInst {
   136  			err := r.runAtRuntime(fn, inst, locals, &mem, indent)
   137  			if err != nil {
   138  				return nil, mem, err
   139  			}
   140  			continue
   141  		}
   142  		switch inst.opcode {
   143  		case llvm.Ret:
   144  			if time.Since(r.start) > r.timeout {
   145  				// Running for more than the allowed timeout; This shouldn't happen, but it does.
   146  				// See github.com/tinygo-org/tinygo/issues/2124
   147  				return nil, mem, r.errorAt(fn.blocks[0].instructions[0], fmt.Errorf("interp: running for more than %s, timing out (executed calls: %d)", r.timeout, r.callsExecuted))
   148  			}
   149  
   150  			if len(operands) != 0 {
   151  				if r.debug {
   152  					fmt.Fprintln(os.Stderr, indent+"ret", operands[0])
   153  				}
   154  				// Return instruction has a value to return.
   155  				return operands[0], mem, nil
   156  			}
   157  			if r.debug {
   158  				fmt.Fprintln(os.Stderr, indent+"ret")
   159  			}
   160  			// Return instruction doesn't return anything, it's just 'ret void'.
   161  			return nil, mem, nil
   162  		case llvm.Br:
   163  			switch len(operands) {
   164  			case 1:
   165  				// Unconditional branch: [nextBB]
   166  				lastBB = currentBB
   167  				currentBB = int(operands[0].(literalValue).value.(uint32))
   168  				bb = fn.blocks[currentBB]
   169  				instIndex = -1 // start at 0 the next cycle
   170  				if r.debug {
   171  					fmt.Fprintln(os.Stderr, indent+"br", operands, "->", currentBB)
   172  				}
   173  			case 3:
   174  				// Conditional branch: [cond, thenBB, elseBB]
   175  				lastBB = currentBB
   176  				switch operands[0].Uint() {
   177  				case 1: // true -> thenBB
   178  					currentBB = int(operands[1].(literalValue).value.(uint32))
   179  				case 0: // false -> elseBB
   180  					currentBB = int(operands[2].(literalValue).value.(uint32))
   181  				default:
   182  					panic("bool should be 0 or 1")
   183  				}
   184  				if r.debug {
   185  					fmt.Fprintln(os.Stderr, indent+"br", operands, "->", currentBB)
   186  				}
   187  				bb = fn.blocks[currentBB]
   188  				instIndex = -1 // start at 0 the next cycle
   189  			default:
   190  				panic("unknown operands length")
   191  			}
   192  		case llvm.Switch:
   193  			// Switch statement: [value, defaultLabel, case0, label0, case1, label1, ...]
   194  			value := operands[0].Uint()
   195  			targetLabel := operands[1].Uint() // default label
   196  			// Do a lazy switch by iterating over all cases.
   197  			for i := 2; i < len(operands); i += 2 {
   198  				if value == operands[i].Uint() {
   199  					targetLabel = operands[i+1].Uint()
   200  					break
   201  				}
   202  			}
   203  			lastBB = currentBB
   204  			currentBB = int(targetLabel)
   205  			bb = fn.blocks[currentBB]
   206  			instIndex = -1 // start at 0 the next cycle
   207  			if r.debug {
   208  				fmt.Fprintln(os.Stderr, indent+"switch", operands, "->", currentBB)
   209  			}
   210  		case llvm.Select:
   211  			// Select is much like a ternary operator: it picks a result from
   212  			// the second and third operand based on the boolean first operand.
   213  			var result value
   214  			switch operands[0].Uint() {
   215  			case 1:
   216  				result = operands[1]
   217  			case 0:
   218  				result = operands[2]
   219  			default:
   220  				panic("boolean must be 0 or 1")
   221  			}
   222  			locals[inst.localIndex] = result
   223  			if r.debug {
   224  				fmt.Fprintln(os.Stderr, indent+"select", operands, "->", result)
   225  			}
   226  		case llvm.Call:
   227  			// A call instruction can either be a regular call or a runtime intrinsic.
   228  			fnPtr, err := operands[0].asPointer(r)
   229  			if err != nil {
   230  				return nil, mem, r.errorAt(inst, err)
   231  			}
   232  			callFn := r.getFunction(fnPtr.llvmValue(&mem))
   233  			switch {
   234  			case callFn.name == "runtime.trackPointer":
   235  				// Allocas and such are created as globals, so don't need a
   236  				// runtime.trackPointer.
   237  				// Unless the object is allocated at runtime for example, in
   238  				// which case this call won't even get to this point but will
   239  				// already be emitted in initAll.
   240  				continue
   241  			case strings.HasPrefix(callFn.name, "runtime.print") || callFn.name == "runtime._panic" || callFn.name == "runtime.hashmapGet" || callFn.name == "runtime.hashmapInterfaceHash" ||
   242  				callFn.name == "os.runtime_args" || callFn.name == "internal/task.start" || callFn.name == "internal/task.Current" ||
   243  				callFn.name == "time.startTimer" || callFn.name == "time.stopTimer" || callFn.name == "time.resetTimer":
   244  				// These functions should be run at runtime. Specifically:
   245  				//   * Print and panic functions are best emitted directly without
   246  				//     interpreting them, otherwise we get a ton of putchar (etc.)
   247  				//     calls.
   248  				//   * runtime.hashmapGet tries to access the map value directly.
   249  				//     This is not possible as the map value is treated as a special
   250  				//     kind of object in this package.
   251  				//   * os.runtime_args reads globals that are initialized outside
   252  				//     the view of the interp package so it always needs to be run
   253  				//     at runtime.
   254  				//   * internal/task.start, internal/task.Current: start and read shcheduler state,
   255  				//     which is modified elsewhere.
   256  				//   * Timer functions access runtime internal state which may
   257  				//     not be initialized.
   258  				err := r.runAtRuntime(fn, inst, locals, &mem, indent)
   259  				if err != nil {
   260  					return nil, mem, err
   261  				}
   262  			case callFn.name == "internal/task.Pause":
   263  				// Task scheduling isn't possible at compile time.
   264  				return nil, mem, r.errorAt(inst, errUnsupportedRuntimeInst)
   265  			case callFn.name == "runtime.nanotime" && r.pkgName == "time":
   266  				// The time package contains a call to runtime.nanotime.
   267  				// This appears to be to work around a limitation in Windows
   268  				// Server 2008:
   269  				//   > Monotonic times are reported as offsets from startNano.
   270  				//   > We initialize startNano to runtimeNano() - 1 so that on systems where
   271  				//   > monotonic time resolution is fairly low (e.g. Windows 2008
   272  				//   > which appears to have a default resolution of 15ms),
   273  				//   > we avoid ever reporting a monotonic time of 0.
   274  				//   > (Callers may want to use 0 as "time not set".)
   275  				// Simply let runtime.nanotime return 0 in this case, which
   276  				// should be fine and avoids a call to runtime.nanotime. It
   277  				// means that monotonic time in the time package is counted from
   278  				// time.Time{}.Sub(1), which should be fine.
   279  				locals[inst.localIndex] = literalValue{uint64(0)}
   280  			case callFn.name == "runtime.alloc":
   281  				// Allocate heap memory. At compile time, this is instead done
   282  				// by creating a global variable.
   283  
   284  				// Get the requested memory size to be allocated.
   285  				size := operands[1].Uint()
   286  
   287  				// Get the object layout, if it is available.
   288  				llvmLayoutType := r.getLLVMTypeFromLayout(operands[2])
   289  
   290  				// Create the object.
   291  				alloc := object{
   292  					globalName:     r.pkgName + "$alloc",
   293  					llvmLayoutType: llvmLayoutType,
   294  					buffer:         newRawValue(uint32(size)),
   295  					size:           uint32(size),
   296  				}
   297  				index := len(r.objects)
   298  				r.objects = append(r.objects, alloc)
   299  
   300  				// And create a pointer to this object, for working with it (so
   301  				// that stores to it copy it, etc).
   302  				ptr := newPointerValue(r, index, 0)
   303  				if r.debug {
   304  					fmt.Fprintln(os.Stderr, indent+"runtime.alloc:", size, "->", ptr)
   305  				}
   306  				locals[inst.localIndex] = ptr
   307  			case callFn.name == "runtime.sliceCopy":
   308  				// sliceCopy implements the built-in copy function for slices.
   309  				// It is implemented here so that it can be used even if the
   310  				// runtime implementation is not available. Doing it this way
   311  				// may also be faster.
   312  				// Code:
   313  				// func sliceCopy(dst, src unsafe.Pointer, dstLen, srcLen uintptr, elemSize uintptr) int {
   314  				//     n := srcLen
   315  				//     if n > dstLen {
   316  				//         n = dstLen
   317  				//     }
   318  				//     memmove(dst, src, n*elemSize)
   319  				//     return int(n)
   320  				// }
   321  				dstLen := operands[3].Uint()
   322  				srcLen := operands[4].Uint()
   323  				elemSize := operands[5].Uint()
   324  				n := srcLen
   325  				if n > dstLen {
   326  					n = dstLen
   327  				}
   328  				if r.debug {
   329  					fmt.Fprintln(os.Stderr, indent+"copy:", operands[1], operands[2], n)
   330  				}
   331  				if n != 0 {
   332  					// Only try to copy bytes when there are any bytes to copy.
   333  					// This is not just an optimization. If one of the slices
   334  					// (or both) are nil, the asPointer method call will fail
   335  					// even though copying a nil slice is allowed.
   336  					dst, err := operands[1].asPointer(r)
   337  					if err != nil {
   338  						return nil, mem, r.errorAt(inst, err)
   339  					}
   340  					src, err := operands[2].asPointer(r)
   341  					if err != nil {
   342  						return nil, mem, r.errorAt(inst, err)
   343  					}
   344  					if mem.hasExternalStore(src) || mem.hasExternalLoadOrStore(dst) {
   345  						// These are the same checks as there are on llvm.Load
   346  						// and llvm.Store in the interpreter. Copying is
   347  						// essentially loading from the source array and storing
   348  						// to the destination array, hence why we need to do the
   349  						// same checks here.
   350  						// This fixes the following bug:
   351  						// https://github.com/tinygo-org/tinygo/issues/3890
   352  						err := r.runAtRuntime(fn, inst, locals, &mem, indent)
   353  						if err != nil {
   354  							return nil, mem, err
   355  						}
   356  						continue
   357  					}
   358  					nBytes := uint32(n * elemSize)
   359  					dstObj := mem.getWritable(dst.index())
   360  					dstBuf := dstObj.buffer.asRawValue(r)
   361  					srcBuf := mem.get(src.index()).buffer.asRawValue(r)
   362  					copy(dstBuf.buf[dst.offset():dst.offset()+nBytes], srcBuf.buf[src.offset():])
   363  					dstObj.buffer = dstBuf
   364  					mem.put(dst.index(), dstObj)
   365  				}
   366  				locals[inst.localIndex] = makeLiteralInt(n, inst.llvmInst.Type().IntTypeWidth())
   367  			case strings.HasPrefix(callFn.name, "llvm.memcpy.p0") || strings.HasPrefix(callFn.name, "llvm.memmove.p0"):
   368  				// Copy a block of memory from one pointer to another.
   369  				dst, err := operands[1].asPointer(r)
   370  				if err != nil {
   371  					return nil, mem, r.errorAt(inst, err)
   372  				}
   373  				src, err := operands[2].asPointer(r)
   374  				if err != nil {
   375  					return nil, mem, r.errorAt(inst, err)
   376  				}
   377  				nBytes := uint32(operands[3].Uint())
   378  				dstObj := mem.getWritable(dst.index())
   379  				dstBuf := dstObj.buffer.asRawValue(r)
   380  				if mem.get(src.index()).buffer == nil {
   381  					// Looks like the source buffer is not defined.
   382  					// This can happen with //extern or //go:embed.
   383  					return nil, mem, r.errorAt(inst, errUnsupportedRuntimeInst)
   384  				}
   385  				srcBuf := mem.get(src.index()).buffer.asRawValue(r)
   386  				copy(dstBuf.buf[dst.offset():dst.offset()+nBytes], srcBuf.buf[src.offset():])
   387  				dstObj.buffer = dstBuf
   388  				mem.put(dst.index(), dstObj)
   389  			case callFn.name == "runtime.typeAssert":
   390  				// This function must be implemented manually as it is normally
   391  				// implemented by the interface lowering pass.
   392  				if r.debug {
   393  					fmt.Fprintln(os.Stderr, indent+"typeassert:", operands[1:])
   394  				}
   395  				assertedType, err := operands[2].toLLVMValue(inst.llvmInst.Operand(1).Type(), &mem)
   396  				if err != nil {
   397  					return nil, mem, r.errorAt(inst, err)
   398  				}
   399  				actualType, err := operands[1].toLLVMValue(inst.llvmInst.Operand(0).Type(), &mem)
   400  				if err != nil {
   401  					return nil, mem, r.errorAt(inst, err)
   402  				}
   403  				if !actualType.IsAConstantInt().IsNil() && actualType.ZExtValue() == 0 {
   404  					locals[inst.localIndex] = literalValue{uint8(0)}
   405  					break
   406  				}
   407  				// Strip pointer casts (bitcast, getelementptr).
   408  				for !actualType.IsAConstantExpr().IsNil() {
   409  					opcode := actualType.Opcode()
   410  					if opcode != llvm.GetElementPtr && opcode != llvm.BitCast {
   411  						break
   412  					}
   413  					actualType = actualType.Operand(0)
   414  				}
   415  				if strings.TrimPrefix(actualType.Name(), "reflect/types.type:") == strings.TrimPrefix(assertedType.Name(), "reflect/types.typeid:") {
   416  					locals[inst.localIndex] = literalValue{uint8(1)}
   417  				} else {
   418  					locals[inst.localIndex] = literalValue{uint8(0)}
   419  				}
   420  			case strings.HasSuffix(callFn.name, ".$typeassert"):
   421  				if r.debug {
   422  					fmt.Fprintln(os.Stderr, indent+"interface assert:", operands[1:])
   423  				}
   424  
   425  				// Load various values for the interface implements check below.
   426  				typecodePtr, err := operands[1].asPointer(r)
   427  				if err != nil {
   428  					return nil, mem, r.errorAt(inst, err)
   429  				}
   430  				typecodePtrOffset, err := typecodePtr.addOffset(-int64(r.pointerSize))
   431  				if err != nil {
   432  					return nil, mem, r.errorAt(inst, err) // unlikely
   433  				}
   434  				methodSetPtr, err := mem.load(typecodePtrOffset, r.pointerSize).asPointer(r)
   435  				if err != nil {
   436  					return nil, mem, r.errorAt(inst, err)
   437  				}
   438  				methodSet := mem.get(methodSetPtr.index()).llvmGlobal.Initializer()
   439  				numMethods := int(r.builder.CreateExtractValue(methodSet, 0, "").ZExtValue())
   440  				llvmFn := inst.llvmInst.CalledValue()
   441  				methodSetAttr := llvmFn.GetStringAttributeAtIndex(-1, "tinygo-methods")
   442  				methodSetString := methodSetAttr.GetStringValue()
   443  
   444  				// Make a set of all the methods on the concrete type, for
   445  				// easier checking in the next step.
   446  				concreteTypeMethods := map[string]struct{}{}
   447  				for i := 0; i < numMethods; i++ {
   448  					methodInfo := r.builder.CreateExtractValue(methodSet, 1, "")
   449  					name := r.builder.CreateExtractValue(methodInfo, i, "").Name()
   450  					concreteTypeMethods[name] = struct{}{}
   451  				}
   452  
   453  				// Check whether all interface methods are also in the list
   454  				// of defined methods calculated above. This is the interface
   455  				// assert itself.
   456  				assertOk := uint8(1) // i1 true
   457  				for _, name := range strings.Split(methodSetString, "; ") {
   458  					if _, ok := concreteTypeMethods[name]; !ok {
   459  						// There is a method on the interface that is not
   460  						// implemented by the type. The assertion will fail.
   461  						assertOk = 0 // i1 false
   462  						break
   463  					}
   464  				}
   465  				// If assertOk is still 1, the assertion succeeded.
   466  				locals[inst.localIndex] = literalValue{assertOk}
   467  			case strings.HasSuffix(callFn.name, "$invoke"):
   468  				// This thunk is the interface method dispatcher: it is called
   469  				// with all regular parameters and a type code. It will then
   470  				// call the concrete method for it.
   471  				if r.debug {
   472  					fmt.Fprintln(os.Stderr, indent+"invoke method:", operands[1:])
   473  				}
   474  
   475  				// Load the type code and method set of the interface value.
   476  				typecodePtr, err := operands[len(operands)-2].asPointer(r)
   477  				if err != nil {
   478  					return nil, mem, r.errorAt(inst, err)
   479  				}
   480  				typecodePtrOffset, err := typecodePtr.addOffset(-int64(r.pointerSize))
   481  				if err != nil {
   482  					return nil, mem, r.errorAt(inst, err)
   483  				}
   484  				methodSetPtr, err := mem.load(typecodePtrOffset, r.pointerSize).asPointer(r)
   485  				if err != nil {
   486  					return nil, mem, r.errorAt(inst, err)
   487  				}
   488  				methodSet := mem.get(methodSetPtr.index()).llvmGlobal.Initializer()
   489  
   490  				// We don't need to load the interface method set.
   491  
   492  				// Load the signature of the to-be-called function.
   493  				llvmFn := inst.llvmInst.CalledValue()
   494  				invokeAttr := llvmFn.GetStringAttributeAtIndex(-1, "tinygo-invoke")
   495  				invokeName := invokeAttr.GetStringValue()
   496  				signature := r.mod.NamedGlobal(invokeName)
   497  
   498  				// Iterate through all methods, looking for the one method that
   499  				// should be returned.
   500  				numMethods := int(r.builder.CreateExtractValue(methodSet, 0, "").ZExtValue())
   501  				var method llvm.Value
   502  				for i := 0; i < numMethods; i++ {
   503  					methodSignatureAgg := r.builder.CreateExtractValue(methodSet, 1, "")
   504  					methodSignature := r.builder.CreateExtractValue(methodSignatureAgg, i, "")
   505  					if methodSignature == signature {
   506  						methodAgg := r.builder.CreateExtractValue(methodSet, 2, "")
   507  						method = r.builder.CreateExtractValue(methodAgg, i, "")
   508  					}
   509  				}
   510  				if method.IsNil() {
   511  					return nil, mem, r.errorAt(inst, errors.New("could not find method: "+invokeName))
   512  				}
   513  
   514  				// Change the to-be-called function to the underlying method to
   515  				// be called and fall through to the default case.
   516  				callFn = r.getFunction(method)
   517  				fallthrough
   518  			default:
   519  				if len(callFn.blocks) == 0 {
   520  					// Call to a function declaration without a definition
   521  					// available.
   522  					err := r.runAtRuntime(fn, inst, locals, &mem, indent)
   523  					if err != nil {
   524  						return nil, mem, err
   525  					}
   526  					continue
   527  				}
   528  				// Call a function with a definition available. Run it as usual,
   529  				// possibly trying to recover from it if it failed to execute.
   530  				if r.debug {
   531  					argStrings := make([]string, len(operands)-1)
   532  					for i, v := range operands[1:] {
   533  						argStrings[i] = v.String()
   534  					}
   535  					fmt.Fprintln(os.Stderr, indent+"call:", callFn.name+"("+strings.Join(argStrings, ", ")+")")
   536  				}
   537  				retval, callMem, callErr := r.run(callFn, operands[1:], &mem, indent+"    ")
   538  				if callErr != nil {
   539  					if isRecoverableError(callErr.Err) {
   540  						// This error can be recovered by doing the call at
   541  						// runtime instead of at compile time. But we need to
   542  						// revert any changes made by the call first.
   543  						if r.debug {
   544  							fmt.Fprintln(os.Stderr, indent+"!! revert because of error:", callErr.Err)
   545  						}
   546  						callMem.revert()
   547  						err := r.runAtRuntime(fn, inst, locals, &mem, indent)
   548  						if err != nil {
   549  							return nil, mem, err
   550  						}
   551  						continue
   552  					}
   553  					// Add to the traceback, so that error handling code can see
   554  					// how this function got called.
   555  					callErr.Traceback = append(callErr.Traceback, ErrorLine{
   556  						Pos:  getPosition(inst.llvmInst),
   557  						Inst: inst.llvmInst.String(),
   558  					})
   559  					return nil, mem, callErr
   560  				}
   561  				locals[inst.localIndex] = retval
   562  				mem.extend(callMem)
   563  			}
   564  		case llvm.Load:
   565  			// Load instruction, loading some data from the topmost memory view.
   566  			ptr, err := operands[0].asPointer(r)
   567  			if err != nil {
   568  				return nil, mem, r.errorAt(inst, err)
   569  			}
   570  			size := operands[1].(literalValue).value.(uint64)
   571  			if inst.llvmInst.IsVolatile() || inst.llvmInst.Ordering() != llvm.AtomicOrderingNotAtomic || mem.hasExternalStore(ptr) {
   572  				// If there could be an external store (for example, because a
   573  				// pointer to the object was passed to a function that could not
   574  				// be interpreted at compile time) then the load must be done at
   575  				// runtime.
   576  				err := r.runAtRuntime(fn, inst, locals, &mem, indent)
   577  				if err != nil {
   578  					return nil, mem, err
   579  				}
   580  				continue
   581  			}
   582  			result := mem.load(ptr, uint32(size))
   583  			if result == nil {
   584  				err := r.runAtRuntime(fn, inst, locals, &mem, indent)
   585  				if err != nil {
   586  					return nil, mem, err
   587  				}
   588  				continue
   589  			}
   590  			if r.debug {
   591  				fmt.Fprintln(os.Stderr, indent+"load:", ptr, "->", result)
   592  			}
   593  			locals[inst.localIndex] = result
   594  		case llvm.Store:
   595  			// Store instruction. Create a new object in the memory view and
   596  			// store to that, to make it possible to roll back this store.
   597  			ptr, err := operands[1].asPointer(r)
   598  			if err != nil {
   599  				return nil, mem, r.errorAt(inst, err)
   600  			}
   601  			if inst.llvmInst.IsVolatile() || inst.llvmInst.Ordering() != llvm.AtomicOrderingNotAtomic || mem.hasExternalLoadOrStore(ptr) {
   602  				err := r.runAtRuntime(fn, inst, locals, &mem, indent)
   603  				if err != nil {
   604  					return nil, mem, err
   605  				}
   606  				continue
   607  			}
   608  			val := operands[0]
   609  			if r.debug {
   610  				fmt.Fprintln(os.Stderr, indent+"store:", val, ptr)
   611  			}
   612  			ok := mem.store(val, ptr)
   613  			if !ok {
   614  				// Could not store the value, do it at runtime.
   615  				err := r.runAtRuntime(fn, inst, locals, &mem, indent)
   616  				if err != nil {
   617  					return nil, mem, err
   618  				}
   619  			}
   620  		case llvm.Alloca:
   621  			// Alloca normally allocates some stack memory. In the interpreter,
   622  			// it allocates a global instead.
   623  			// This can likely be optimized, as all it really needs is an alloca
   624  			// in the initAll function and creating a global is wasteful for
   625  			// this purpose.
   626  
   627  			// Create the new object.
   628  			size := operands[0].(literalValue).value.(uint64)
   629  			alloca := object{
   630  				llvmType:   inst.llvmInst.AllocatedType(),
   631  				globalName: r.pkgName + "$alloca",
   632  				buffer:     newRawValue(uint32(size)),
   633  				size:       uint32(size),
   634  			}
   635  			index := len(r.objects)
   636  			r.objects = append(r.objects, alloca)
   637  
   638  			// Create a pointer to this object (an alloca produces a pointer).
   639  			ptr := newPointerValue(r, index, 0)
   640  			if r.debug {
   641  				fmt.Fprintln(os.Stderr, indent+"alloca:", operands, "->", ptr)
   642  			}
   643  			locals[inst.localIndex] = ptr
   644  		case llvm.GetElementPtr:
   645  			// GetElementPtr does pointer arithmetic, changing the offset of the
   646  			// pointer into the underlying object.
   647  			var offset int64
   648  			for i := 1; i < len(operands); i += 2 {
   649  				index := operands[i].Int()
   650  				elementSize := operands[i+1].Int()
   651  				if elementSize < 0 {
   652  					// This is a struct field.
   653  					offset += index
   654  				} else {
   655  					// This is a normal GEP, probably an array index.
   656  					offset += elementSize * index
   657  				}
   658  			}
   659  			ptr, err := operands[0].asPointer(r)
   660  			if err != nil {
   661  				if err != errIntegerAsPointer {
   662  					return nil, mem, r.errorAt(inst, err)
   663  				}
   664  				// GEP on fixed pointer value (for example, memory-mapped I/O).
   665  				ptrValue := operands[0].Uint() + uint64(offset)
   666  				locals[inst.localIndex] = makeLiteralInt(ptrValue, int(operands[0].len(r)*8))
   667  				continue
   668  			}
   669  			ptr, err = ptr.addOffset(int64(offset))
   670  			if err != nil {
   671  				return nil, mem, r.errorAt(inst, err)
   672  			}
   673  			locals[inst.localIndex] = ptr
   674  			if r.debug {
   675  				fmt.Fprintln(os.Stderr, indent+"gep:", operands, "->", ptr)
   676  			}
   677  		case llvm.BitCast, llvm.IntToPtr, llvm.PtrToInt:
   678  			// Various bitcast-like instructions that all keep the same bits
   679  			// while changing the LLVM type.
   680  			// Because interp doesn't preserve the type, these operations are
   681  			// identity operations.
   682  			if r.debug {
   683  				fmt.Fprintln(os.Stderr, indent+instructionNameMap[inst.opcode]+":", operands[0])
   684  			}
   685  			locals[inst.localIndex] = operands[0]
   686  		case llvm.ExtractValue:
   687  			agg := operands[0].asRawValue(r)
   688  			offset := operands[1].(literalValue).value.(uint64)
   689  			size := operands[2].(literalValue).value.(uint64)
   690  			elt := rawValue{
   691  				buf: agg.buf[offset : offset+size],
   692  			}
   693  			if r.debug {
   694  				fmt.Fprintln(os.Stderr, indent+"extractvalue:", operands, "->", elt)
   695  			}
   696  			locals[inst.localIndex] = elt
   697  		case llvm.InsertValue:
   698  			agg := operands[0].asRawValue(r)
   699  			elt := operands[1].asRawValue(r)
   700  			offset := int(operands[2].(literalValue).value.(uint64))
   701  			newagg := newRawValue(uint32(len(agg.buf)))
   702  			copy(newagg.buf, agg.buf)
   703  			copy(newagg.buf[offset:], elt.buf)
   704  			if r.debug {
   705  				fmt.Fprintln(os.Stderr, indent+"insertvalue:", operands, "->", newagg)
   706  			}
   707  			locals[inst.localIndex] = newagg
   708  		case llvm.ICmp:
   709  			predicate := llvm.IntPredicate(operands[2].(literalValue).value.(uint8))
   710  			lhs := operands[0]
   711  			rhs := operands[1]
   712  			result := r.interpretICmp(lhs, rhs, predicate)
   713  			if result {
   714  				locals[inst.localIndex] = literalValue{uint8(1)}
   715  			} else {
   716  				locals[inst.localIndex] = literalValue{uint8(0)}
   717  			}
   718  			if r.debug {
   719  				fmt.Fprintln(os.Stderr, indent+"icmp:", operands[0], intPredicateString(predicate), operands[1], "->", result)
   720  			}
   721  		case llvm.FCmp:
   722  			predicate := llvm.FloatPredicate(operands[2].(literalValue).value.(uint8))
   723  			var result bool
   724  			var lhs, rhs float64
   725  			switch operands[0].len(r) {
   726  			case 8:
   727  				lhs = math.Float64frombits(operands[0].Uint())
   728  				rhs = math.Float64frombits(operands[1].Uint())
   729  			case 4:
   730  				lhs = float64(math.Float32frombits(uint32(operands[0].Uint())))
   731  				rhs = float64(math.Float32frombits(uint32(operands[1].Uint())))
   732  			default:
   733  				panic("unknown float type")
   734  			}
   735  			switch predicate {
   736  			case llvm.FloatOEQ:
   737  				result = lhs == rhs
   738  			case llvm.FloatUNE:
   739  				result = lhs != rhs
   740  			case llvm.FloatOGT:
   741  				result = lhs > rhs
   742  			case llvm.FloatOGE:
   743  				result = lhs >= rhs
   744  			case llvm.FloatOLT:
   745  				result = lhs < rhs
   746  			case llvm.FloatOLE:
   747  				result = lhs <= rhs
   748  			default:
   749  				return nil, mem, r.errorAt(inst, errors.New("interp: unsupported fcmp"))
   750  			}
   751  			if result {
   752  				locals[inst.localIndex] = literalValue{uint8(1)}
   753  			} else {
   754  				locals[inst.localIndex] = literalValue{uint8(0)}
   755  			}
   756  			if r.debug {
   757  				fmt.Fprintln(os.Stderr, indent+"fcmp:", operands[0], predicate, operands[1], "->", result)
   758  			}
   759  		case llvm.Add, llvm.Sub, llvm.Mul, llvm.UDiv, llvm.SDiv, llvm.URem, llvm.SRem, llvm.Shl, llvm.LShr, llvm.AShr, llvm.And, llvm.Or, llvm.Xor:
   760  			// Integer binary operations.
   761  			lhs := operands[0]
   762  			rhs := operands[1]
   763  			lhsPtr, err := lhs.asPointer(r)
   764  			if err == nil {
   765  				// The lhs is a pointer. This sometimes happens for particular
   766  				// pointer tricks.
   767  				if inst.opcode == llvm.Add {
   768  					// This likely means this is part of a
   769  					// unsafe.Pointer(uintptr(ptr) + offset) pattern.
   770  					lhsPtr, err = lhsPtr.addOffset(int64(rhs.Uint()))
   771  					if err != nil {
   772  						return nil, mem, r.errorAt(inst, err)
   773  					}
   774  					locals[inst.localIndex] = lhsPtr
   775  				} else if inst.opcode == llvm.Xor && rhs.Uint() == 0 {
   776  					// Special workaround for strings.noescape, see
   777  					// src/strings/builder.go in the Go source tree. This is
   778  					// the identity operator, so we can return the input.
   779  					locals[inst.localIndex] = lhs
   780  				} else if inst.opcode == llvm.And && rhs.Uint() < 8 {
   781  					// This is probably part of a pattern to get the lower bits
   782  					// of a pointer for pointer tagging, like this:
   783  					//     uintptr(unsafe.Pointer(t)) & 0b11
   784  					// We can actually support this easily by ANDing with the
   785  					// pointer offset.
   786  					result := uint64(lhsPtr.offset()) & rhs.Uint()
   787  					locals[inst.localIndex] = makeLiteralInt(result, int(lhs.len(r)*8))
   788  				} else {
   789  					// Catch-all for weird operations that should just be done
   790  					// at runtime.
   791  					err := r.runAtRuntime(fn, inst, locals, &mem, indent)
   792  					if err != nil {
   793  						return nil, mem, err
   794  					}
   795  				}
   796  				continue
   797  			}
   798  			var result uint64
   799  			switch inst.opcode {
   800  			case llvm.Add:
   801  				result = lhs.Uint() + rhs.Uint()
   802  			case llvm.Sub:
   803  				result = lhs.Uint() - rhs.Uint()
   804  			case llvm.Mul:
   805  				result = lhs.Uint() * rhs.Uint()
   806  			case llvm.UDiv:
   807  				result = lhs.Uint() / rhs.Uint()
   808  			case llvm.SDiv:
   809  				result = uint64(lhs.Int() / rhs.Int())
   810  			case llvm.URem:
   811  				result = lhs.Uint() % rhs.Uint()
   812  			case llvm.SRem:
   813  				result = uint64(lhs.Int() % rhs.Int())
   814  			case llvm.Shl:
   815  				result = lhs.Uint() << rhs.Uint()
   816  			case llvm.LShr:
   817  				result = lhs.Uint() >> rhs.Uint()
   818  			case llvm.AShr:
   819  				result = uint64(lhs.Int() >> rhs.Uint())
   820  			case llvm.And:
   821  				result = lhs.Uint() & rhs.Uint()
   822  			case llvm.Or:
   823  				result = lhs.Uint() | rhs.Uint()
   824  			case llvm.Xor:
   825  				result = lhs.Uint() ^ rhs.Uint()
   826  			default:
   827  				panic("unreachable")
   828  			}
   829  			locals[inst.localIndex] = makeLiteralInt(result, int(lhs.len(r)*8))
   830  			if r.debug {
   831  				fmt.Fprintln(os.Stderr, indent+instructionNameMap[inst.opcode]+":", lhs, rhs, "->", result)
   832  			}
   833  		case llvm.SExt, llvm.ZExt, llvm.Trunc:
   834  			// Change the size of an integer to a larger or smaller bit width.
   835  			// We make use of the fact that the Uint() function already
   836  			// zero-extends the value and that Int() already sign-extends the
   837  			// value, so we only need to truncate it to the appropriate bit
   838  			// width. This means we can implement sext, zext and trunc in the
   839  			// same way, by first {zero,sign}extending all the way up to uint64
   840  			// and then truncating it as necessary.
   841  			var value uint64
   842  			if inst.opcode == llvm.SExt {
   843  				value = uint64(operands[0].Int())
   844  			} else {
   845  				value = operands[0].Uint()
   846  			}
   847  			bitwidth := operands[1].Uint()
   848  			if r.debug {
   849  				fmt.Fprintln(os.Stderr, indent+instructionNameMap[inst.opcode]+":", value, bitwidth)
   850  			}
   851  			locals[inst.localIndex] = makeLiteralInt(value, int(bitwidth))
   852  		case llvm.SIToFP, llvm.UIToFP:
   853  			var value float64
   854  			switch inst.opcode {
   855  			case llvm.SIToFP:
   856  				value = float64(operands[0].Int())
   857  			case llvm.UIToFP:
   858  				value = float64(operands[0].Uint())
   859  			}
   860  			bitwidth := operands[1].Uint()
   861  			if r.debug {
   862  				fmt.Fprintln(os.Stderr, indent+instructionNameMap[inst.opcode]+":", value, bitwidth)
   863  			}
   864  			switch bitwidth {
   865  			case 64:
   866  				locals[inst.localIndex] = literalValue{math.Float64bits(value)}
   867  			case 32:
   868  				locals[inst.localIndex] = literalValue{math.Float32bits(float32(value))}
   869  			default:
   870  				panic("unknown integer size in sitofp/uitofp")
   871  			}
   872  		default:
   873  			if r.debug {
   874  				fmt.Fprintln(os.Stderr, indent+inst.String())
   875  			}
   876  			return nil, mem, r.errorAt(inst, errUnsupportedInst)
   877  		}
   878  	}
   879  	return nil, mem, r.errorAt(bb.instructions[len(bb.instructions)-1], errors.New("interp: reached end of basic block without terminator"))
   880  }
   881  
   882  // Interpret an icmp instruction. Doesn't have side effects, only returns the
   883  // output of the comparison.
   884  func (r *runner) interpretICmp(lhs, rhs value, predicate llvm.IntPredicate) bool {
   885  	switch predicate {
   886  	case llvm.IntEQ, llvm.IntNE:
   887  		var result bool
   888  		lhsPointer, lhsErr := lhs.asPointer(r)
   889  		rhsPointer, rhsErr := rhs.asPointer(r)
   890  		if (lhsErr == nil) != (rhsErr == nil) {
   891  			// Fast path: only one is a pointer, so they can't be equal.
   892  			result = false
   893  		} else if lhsErr == nil {
   894  			// Both must be nil, so both are pointers.
   895  			// Compare them directly.
   896  			result = lhsPointer.equal(rhsPointer)
   897  		} else {
   898  			// Fall back to generic comparison.
   899  			result = lhs.asRawValue(r).equal(rhs.asRawValue(r))
   900  		}
   901  		if predicate == llvm.IntNE {
   902  			result = !result
   903  		}
   904  		return result
   905  	case llvm.IntUGT:
   906  		return lhs.Uint() > rhs.Uint()
   907  	case llvm.IntUGE:
   908  		return lhs.Uint() >= rhs.Uint()
   909  	case llvm.IntULT:
   910  		return lhs.Uint() < rhs.Uint()
   911  	case llvm.IntULE:
   912  		return lhs.Uint() <= rhs.Uint()
   913  	case llvm.IntSGT:
   914  		return lhs.Int() > rhs.Int()
   915  	case llvm.IntSGE:
   916  		return lhs.Int() >= rhs.Int()
   917  	case llvm.IntSLT:
   918  		return lhs.Int() < rhs.Int()
   919  	case llvm.IntSLE:
   920  		return lhs.Int() <= rhs.Int()
   921  	default:
   922  		// _should_ be unreachable, until LLVM adds new icmp operands (unlikely)
   923  		panic("interp: unsupported icmp")
   924  	}
   925  }
   926  
   927  func (r *runner) runAtRuntime(fn *function, inst instruction, locals []value, mem *memoryView, indent string) *Error {
   928  	numOperands := inst.llvmInst.OperandsCount()
   929  	operands := make([]llvm.Value, numOperands)
   930  	for i := 0; i < numOperands; i++ {
   931  		operand := inst.llvmInst.Operand(i)
   932  		if !operand.IsAInstruction().IsNil() || !operand.IsAArgument().IsNil() {
   933  			var err error
   934  			operand, err = locals[fn.locals[operand]].toLLVMValue(operand.Type(), mem)
   935  			if err != nil {
   936  				return r.errorAt(inst, err)
   937  			}
   938  		}
   939  		operands[i] = operand
   940  	}
   941  	if r.debug {
   942  		fmt.Fprintln(os.Stderr, indent+inst.String())
   943  	}
   944  	var result llvm.Value
   945  	switch inst.opcode {
   946  	case llvm.Call:
   947  		llvmFn := operands[len(operands)-1]
   948  		args := operands[:len(operands)-1]
   949  		for _, arg := range args {
   950  			if arg.Type().TypeKind() == llvm.PointerTypeKind {
   951  				err := mem.markExternalStore(arg)
   952  				if err != nil {
   953  					return r.errorAt(inst, err)
   954  				}
   955  			}
   956  		}
   957  		result = r.builder.CreateCall(inst.llvmInst.CalledFunctionType(), llvmFn, args, inst.name)
   958  	case llvm.Load:
   959  		err := mem.markExternalLoad(operands[0])
   960  		if err != nil {
   961  			return r.errorAt(inst, err)
   962  		}
   963  		result = r.builder.CreateLoad(inst.llvmInst.Type(), operands[0], inst.name)
   964  		if inst.llvmInst.IsVolatile() {
   965  			result.SetVolatile(true)
   966  		}
   967  		if ordering := inst.llvmInst.Ordering(); ordering != llvm.AtomicOrderingNotAtomic {
   968  			result.SetOrdering(ordering)
   969  		}
   970  	case llvm.Store:
   971  		err := mem.markExternalStore(operands[1])
   972  		if err != nil {
   973  			return r.errorAt(inst, err)
   974  		}
   975  		result = r.builder.CreateStore(operands[0], operands[1])
   976  		if inst.llvmInst.IsVolatile() {
   977  			result.SetVolatile(true)
   978  		}
   979  		if ordering := inst.llvmInst.Ordering(); ordering != llvm.AtomicOrderingNotAtomic {
   980  			result.SetOrdering(ordering)
   981  		}
   982  	case llvm.BitCast:
   983  		result = r.builder.CreateBitCast(operands[0], inst.llvmInst.Type(), inst.name)
   984  	case llvm.ExtractValue:
   985  		indices := inst.llvmInst.Indices()
   986  		// Note: the Go LLVM API doesn't support multiple indices, so simulate
   987  		// this operation with some extra extractvalue instructions. Hopefully
   988  		// this is optimized to a single instruction.
   989  		agg := operands[0]
   990  		for i := 0; i < len(indices)-1; i++ {
   991  			agg = r.builder.CreateExtractValue(agg, int(indices[i]), inst.name+".agg")
   992  			mem.instructions = append(mem.instructions, agg)
   993  		}
   994  		result = r.builder.CreateExtractValue(agg, int(indices[len(indices)-1]), inst.name)
   995  	case llvm.InsertValue:
   996  		indices := inst.llvmInst.Indices()
   997  		// Similar to extractvalue, we're working around a limitation in the Go
   998  		// LLVM API here by splitting the insertvalue into multiple instructions
   999  		// if there is more than one operand.
  1000  		agg := operands[0]
  1001  		aggregates := []llvm.Value{agg}
  1002  		for i := 0; i < len(indices)-1; i++ {
  1003  			agg = r.builder.CreateExtractValue(agg, int(indices[i]), inst.name+".agg"+strconv.Itoa(i))
  1004  			aggregates = append(aggregates, agg)
  1005  			mem.instructions = append(mem.instructions, agg)
  1006  		}
  1007  		result = operands[1]
  1008  		for i := len(indices) - 1; i >= 0; i-- {
  1009  			agg := aggregates[i]
  1010  			result = r.builder.CreateInsertValue(agg, result, int(indices[i]), inst.name+".insertvalue"+strconv.Itoa(i))
  1011  			if i != 0 { // don't add last result to mem.instructions as it will be done at the end already
  1012  				mem.instructions = append(mem.instructions, result)
  1013  			}
  1014  		}
  1015  
  1016  	case llvm.Add:
  1017  		result = r.builder.CreateAdd(operands[0], operands[1], inst.name)
  1018  	case llvm.Sub:
  1019  		result = r.builder.CreateSub(operands[0], operands[1], inst.name)
  1020  	case llvm.Mul:
  1021  		result = r.builder.CreateMul(operands[0], operands[1], inst.name)
  1022  	case llvm.UDiv:
  1023  		result = r.builder.CreateUDiv(operands[0], operands[1], inst.name)
  1024  	case llvm.SDiv:
  1025  		result = r.builder.CreateSDiv(operands[0], operands[1], inst.name)
  1026  	case llvm.URem:
  1027  		result = r.builder.CreateURem(operands[0], operands[1], inst.name)
  1028  	case llvm.SRem:
  1029  		result = r.builder.CreateSRem(operands[0], operands[1], inst.name)
  1030  	case llvm.ZExt:
  1031  		result = r.builder.CreateZExt(operands[0], inst.llvmInst.Type(), inst.name)
  1032  	default:
  1033  		return r.errorAt(inst, errUnsupportedRuntimeInst)
  1034  	}
  1035  	locals[inst.localIndex] = localValue{result}
  1036  	mem.instructions = append(mem.instructions, result)
  1037  	return nil
  1038  }
  1039  
  1040  func intPredicateString(predicate llvm.IntPredicate) string {
  1041  	switch predicate {
  1042  	case llvm.IntEQ:
  1043  		return "eq"
  1044  	case llvm.IntNE:
  1045  		return "ne"
  1046  	case llvm.IntUGT:
  1047  		return "ugt"
  1048  	case llvm.IntUGE:
  1049  		return "uge"
  1050  	case llvm.IntULT:
  1051  		return "ult"
  1052  	case llvm.IntULE:
  1053  		return "ule"
  1054  	case llvm.IntSGT:
  1055  		return "sgt"
  1056  	case llvm.IntSGE:
  1057  		return "sge"
  1058  	case llvm.IntSLT:
  1059  		return "slt"
  1060  	case llvm.IntSLE:
  1061  		return "sle"
  1062  	default:
  1063  		return "cmp?"
  1064  	}
  1065  }