github.com/wasilibs/wazerox@v0.0.0-20240124024944-4923be63ab5f/internal/engine/compiler/engine.go (about) 1 package compiler 2 3 import ( 4 "context" 5 "errors" 6 "fmt" 7 "reflect" 8 "runtime" 9 "sort" 10 "sync" 11 "sync/atomic" 12 "unsafe" 13 14 "github.com/wasilibs/wazerox/api" 15 "github.com/wasilibs/wazerox/experimental" 16 "github.com/wasilibs/wazerox/internal/asm" 17 "github.com/wasilibs/wazerox/internal/bitpack" 18 "github.com/wasilibs/wazerox/internal/filecache" 19 "github.com/wasilibs/wazerox/internal/internalapi" 20 "github.com/wasilibs/wazerox/internal/platform" 21 "github.com/wasilibs/wazerox/internal/version" 22 "github.com/wasilibs/wazerox/internal/wasm" 23 "github.com/wasilibs/wazerox/internal/wasmdebug" 24 "github.com/wasilibs/wazerox/internal/wasmruntime" 25 "github.com/wasilibs/wazerox/internal/wazeroir" 26 ) 27 28 // NOTE: The offset of many of the struct fields defined here are referenced from 29 // assembly using the constants below such as moduleEngineFunctionsOffset. 30 // If changing a struct, update the constant and associated tests as needed. 31 type ( 32 // engine is a Compiler implementation of wasm.Engine 33 engine struct { 34 enabledFeatures api.CoreFeatures 35 codes map[wasm.ModuleID]*compiledModule // guarded by mutex. 36 fileCache filecache.Cache 37 mux sync.RWMutex 38 // setFinalizer defaults to runtime.SetFinalizer, but overridable for tests. 39 setFinalizer func(obj interface{}, finalizer interface{}) 40 wazeroVersion string 41 } 42 43 // moduleEngine implements wasm.ModuleEngine 44 moduleEngine struct { 45 // See note at top of file before modifying this struct. 46 47 // functions are the functions in a module instances. 48 // The index is module instance-scoped. We intentionally avoid using map 49 // as the underlying memory region is accessed by assembly directly by using 50 // codesElement0Address. 51 functions []function 52 53 // Keep a reference to the compiled module to prevent the GC from reclaiming 54 // it while the code may still be needed. 55 module *compiledModule 56 } 57 58 // callEngine holds context per moduleEngine.Call, and shared across all the 59 // function calls originating from the same moduleEngine.Call execution. 60 // 61 // This implements api.Function. 62 callEngine struct { 63 internalapi.WazeroOnlyType 64 65 // See note at top of file before modifying this struct. 66 67 // These contexts are read and written by compiled code. 68 // Note: structs are embedded to reduce the costs to access fields inside them. Also, this eases field offset 69 // calculation. 70 moduleContext 71 stackContext 72 exitContext 73 archContext 74 75 // The following fields are not accessed by compiled code directly. 76 77 // stack is the go-allocated stack for holding values and call frames. 78 // Note: We never edit len or cap in compiled code, so we won't get screwed when GC comes in. 79 // 80 // At any point of execution, say currently executing function F2 which was called by F1, then 81 // the stack should look like like: 82 // 83 // [..., arg0, arg1, ..., argN, _, _, _, v1, v2, v3, .... 84 // ^ { } 85 // | F1's callFrame 86 // | 87 // stackBasePointer 88 // 89 // where 90 // - callFrame is the F1's callFrame which called F2. It contains F1's return address, F1's base pointer, and F1's *function. 91 // - stackBasePointer is the stack base pointer stored at (callEngine stackContext.stackBasePointerInBytes) 92 // - arg0, ..., argN are the function parameters, and v1, v2, v3,... are the local variables 93 // including the non-function param locals as well as the temporary variable produced by instructions (e.g i32.const). 94 // 95 // If the F2 makes a function call to F3 which takes two arguments, then the stack will become: 96 // 97 // [..., arg0, arg1, ..., argN, _, _, _, v1, v2, v3, _, _, _ 98 // { } ^ { } 99 // F1's callFrame | F2's callFrame 100 // | 101 // stackBasePointer 102 // where 103 // - F2's callFrame is pushed above the v2 and v3 (arguments for F3). 104 // - The previous stackBasePointer (pointed at arg0) was saved inside the F2's callFrame. 105 // 106 // Then, if F3 returns one result, say w1, then the result will look like: 107 // 108 // [..., arg0, arg1, ..., argN, _, _, _, v1, w1, ... 109 // ^ { } 110 // | F1's callFrame 111 // | 112 // stackBasePointer 113 // 114 // where 115 // - stackBasePointer was reverted to the position at arg0 116 // - The result from F3 was pushed above v1 117 // 118 // If the number of parameters is smaller than that of return values, then the empty slots are reserved 119 // below the callFrame to store the results on teh return. 120 // For example, if F3 takes no parameter but returns N(>0) results, then the stack 121 // after making a call against F3 will look like: 122 // 123 // [..., arg0, arg1, ..., argN, _, _, _, v1, v2, v3, res_1, _, res_N, _, _, _ 124 // { } ^ { } 125 // F1's callFrame | F2's callFrame 126 // | 127 // stackBasePointer 128 // where res_1, ..., res_N are the reserved slots below the call frame. In general, 129 // the number of reserved slots equals max(0, len(results)-len(params). 130 // 131 // This reserved slots are necessary to save the result values onto the stack while not destroying 132 // the callFrame value on function returns. 133 stack []uint64 134 135 // initialFn is the initial function for this call engine. 136 initialFn *function 137 138 // Keep a reference to the compiled module to prevent the GC from reclaiming 139 // it while the code may still be needed. 140 module *compiledModule 141 142 // stackIterator provides a way to iterate over the stack for Listeners. 143 // It is setup and valid only during a call to a Listener hook. 144 stackIterator stackIterator 145 } 146 147 // moduleContext holds the per-function call specific module information. 148 // This is subject to be manipulated from compiled native code whenever we make function calls. 149 moduleContext struct { 150 // See note at top of file before modifying this struct. 151 152 // fn holds the currently executed *function. 153 fn *function 154 155 // moduleInstance is the address of module instance from which we initialize 156 // the following fields. This is set whenever we enter a function or return from function calls. 157 // 158 // On the entry to the native code, this must be initialized to zero to let native code preamble know 159 // that this is the initial function call (which leads to moduleContext initialization pass). 160 moduleInstance *wasm.ModuleInstance //lint:ignore U1000 This is only used by Compiler code. 161 162 // globalElement0Address is the address of the first element in the global slice, 163 // i.e. &ModuleInstance.Globals[0] as uintptr. 164 globalElement0Address uintptr 165 // memoryElement0Address is the address of the first element in the global slice, 166 // i.e. &ModuleInstance.Memory.Buffer[0] as uintptr. 167 memoryElement0Address uintptr 168 // memorySliceLen is the length of the memory buffer, i.e. len(ModuleInstance.Memory.Buffer). 169 memorySliceLen uint64 170 // memoryInstance holds the memory instance for this module instance. 171 memoryInstance *wasm.MemoryInstance 172 // tableElement0Address is the address of the first item in the tables slice, 173 // i.e. &ModuleInstance.Tables[0] as uintptr. 174 tablesElement0Address uintptr 175 176 // functionsElement0Address is &moduleContext.functions[0] as uintptr. 177 functionsElement0Address uintptr 178 179 // typeIDsElement0Address holds the &ModuleInstance.TypeIDs[0] as uintptr. 180 typeIDsElement0Address uintptr 181 182 // dataInstancesElement0Address holds the &ModuleInstance.DataInstances[0] as uintptr. 183 dataInstancesElement0Address uintptr 184 185 // elementInstancesElement0Address holds the &ModuleInstance.ElementInstances[0] as uintptr. 186 elementInstancesElement0Address uintptr 187 } 188 189 // stackContext stores the data to access engine.stack. 190 stackContext struct { 191 // See note at top of file before modifying this struct. 192 193 // stackPointer on .stack field which is accessed by stack[stackBasePointer+stackBasePointerInBytes*8]. 194 // 195 // Note: stackPointer is not used in assembly since the native code knows exact position of 196 // each variable in the value stack from the info from compilation. 197 // Therefore, only updated when native code exit from the Compiler world and go back to the Go function. 198 stackPointer uint64 199 200 // stackBasePointerInBytes is updated whenever we make function calls. 201 // Background: Functions might be compiled as if they use the stack from the bottom. 202 // However, in reality, they have to use it from the middle of the stack depending on 203 // when these function calls are made. So instead of accessing stack via stackPointer alone, 204 // functions are compiled, so they access the stack via [stackBasePointer](fixed for entire function) + [stackPointer]. 205 // More precisely, stackBasePointer is set to [callee's stack pointer] + [callee's stack base pointer] - [caller's params]. 206 // This way, compiled functions can be independent of the timing of functions calls made against them. 207 stackBasePointerInBytes uint64 208 209 // stackElement0Address is &engine.stack[0] as uintptr. 210 // Note: this is updated when growing the stack in builtinFunctionGrowStack. 211 stackElement0Address uintptr 212 213 // stackLenInBytes is len(engine.stack[0]) * 8 (bytes). 214 // Note: this is updated when growing the stack in builtinFunctionGrowStack. 215 stackLenInBytes uint64 216 } 217 218 // exitContext will be manipulated whenever compiled native code returns into the Go function. 219 exitContext struct { 220 // See note at top of file before modifying this struct. 221 222 // Where we store the status code of Compiler execution. 223 statusCode nativeCallStatusCode 224 225 // Set when statusCode == compilerStatusCallBuiltInFunction 226 // Indicating the function call index. 227 builtinFunctionCallIndex wasm.Index 228 229 // returnAddress is the return address which the engine jumps into 230 // after executing a builtin function or host function. 231 returnAddress uintptr 232 233 // callerModuleInstance holds the caller's wasm.ModuleInstance, and is only valid if currently executing a host function. 234 callerModuleInstance *wasm.ModuleInstance 235 } 236 237 // callFrame holds the information to which the caller function can return. 238 // This is mixed in callEngine.stack with other Wasm values just like any other 239 // native program (where the stack is the system stack though), and we retrieve the struct 240 // with unsafe pointer casts. 241 callFrame struct { 242 // See note at top of file before modifying this struct. 243 244 // returnAddress is the return address to which the engine jumps when the callee function returns. 245 returnAddress uintptr 246 // returnStackBasePointerInBytes is the stack base pointer to set on stackContext.stackBasePointerInBytes 247 // when the callee function returns. 248 returnStackBasePointerInBytes uint64 249 // function is the caller *function, and is used to retrieve the stack trace. 250 // Note: should be possible to revive *function from returnAddress, but might be costly. 251 function *function 252 } 253 254 // Function corresponds to function instance in Wasm, and is created from `code`. 255 function struct { 256 // See note at top of file before modifying this struct. 257 258 // codeInitialAddress is the pre-calculated pointer pointing to the initial byte of .codeSegment slice. 259 // That mean codeInitialAddress always equals uintptr(unsafe.Pointer(&.codeSegment[0])) 260 // and we cache the value (uintptr(unsafe.Pointer(&.codeSegment[0]))) to this field, 261 // so we don't need to repeat the calculation on each function call. 262 codeInitialAddress uintptr 263 // moduleInstance holds the address of source.ModuleInstance. 264 moduleInstance *wasm.ModuleInstance 265 // typeID is the corresponding wasm.FunctionTypeID for funcType. 266 typeID wasm.FunctionTypeID 267 // funcType is the function type for this function. Created during compilation. 268 funcType *wasm.FunctionType 269 // parent holds code from which this is created. 270 parent *compiledFunction 271 } 272 273 compiledModule struct { 274 // The data that need to be accessed by compiledFunction.parent are 275 // separated in an embedded field because we use finalizers to manage 276 // the lifecycle of compiledModule instances and having cyclic pointers 277 // prevents the Go runtime from calling them, which results in memory 278 // leaks since the memory mapped code segments cannot be released. 279 // 280 // The indirection guarantees that the finalizer set on compiledModule 281 // instances can run when all references are gone, and the Go GC can 282 // manage to reclaim the compiledCode when all compiledFunction objects 283 // referencing it have been freed. 284 *compiledCode 285 functions []compiledFunction 286 287 ensureTermination bool 288 } 289 290 compiledCode struct { 291 source *wasm.Module 292 executable asm.CodeSegment 293 } 294 295 // compiledFunction corresponds to a function in a module (not instantiated one). This holds the machine code 296 // compiled by wazero compiler. 297 compiledFunction struct { 298 // codeSegment is holding the compiled native code as a byte slice. 299 executableOffset uintptr 300 // See the doc for codeStaticData type. 301 // stackPointerCeil is the max of the stack pointer this function can reach. Lazily applied via maybeGrowStack. 302 stackPointerCeil uint64 303 304 index wasm.Index 305 goFunc interface{} 306 listener experimental.FunctionListener 307 parent *compiledCode 308 sourceOffsetMap sourceOffsetMap 309 } 310 311 // sourceOffsetMap holds the information to retrieve the original offset in 312 // the Wasm binary from the offset in the native binary. 313 // 314 // The fields are implemented as bit-packed arrays of 64 bits integers to 315 // reduce the memory footprint. Indexing into such arrays is not as fast as 316 // indexing into a simple slice, but the source offset map is intended to be 317 // used for debugging, lookups into the arrays should not appear on code 318 // paths that are critical to the application performance. 319 // 320 // The bitpack.OffsetArray fields may be nil, use bitpack.OffsetArrayLen to 321 // determine whether they are empty prior to indexing into the arrays to 322 // avoid panics caused by accessing nil pointers. 323 sourceOffsetMap struct { 324 // See note at top of file before modifying this struct. 325 326 // irOperationOffsetsInNativeBinary is index-correlated with irOperationSourceOffsetsInWasmBinary, 327 // and maps each index (corresponding to each IR Operation) to the offset in the compiled native code. 328 irOperationOffsetsInNativeBinary bitpack.OffsetArray 329 // irOperationSourceOffsetsInWasmBinary is index-correlated with irOperationOffsetsInNativeBinary. 330 // See wazeroir.CompilationResult irOperationOffsetsInNativeBinary. 331 irOperationSourceOffsetsInWasmBinary bitpack.OffsetArray 332 } 333 334 // functionListenerInvocation captures arguments needed to perform function 335 // listener invocations when unwinding the call stack. 336 functionListenerInvocation struct { 337 experimental.FunctionListener 338 def api.FunctionDefinition 339 } 340 ) 341 342 // Native code reads/writes Go's structs with the following constants. 343 // See TestVerifyOffsetValue for how to derive these values. 344 const ( 345 // Offsets for moduleEngine.functions 346 moduleEngineFunctionsOffset = 0 347 348 // Offsets for callEngine moduleContext. 349 callEngineModuleContextFnOffset = 0 350 callEngineModuleContextModuleInstanceOffset = 8 351 callEngineModuleContextGlobalElement0AddressOffset = 16 352 callEngineModuleContextMemoryElement0AddressOffset = 24 353 callEngineModuleContextMemoryInstanceOffset = 40 354 callEngineModuleContextTablesElement0AddressOffset = 48 355 callEngineModuleContextFunctionsElement0AddressOffset = 56 356 callEngineModuleContextTypeIDsElement0AddressOffset = 64 357 callEngineModuleContextDataInstancesElement0AddressOffset = 72 358 callEngineModuleContextElementInstancesElement0AddressOffset = 80 359 360 // Offsets for callEngine stackContext. 361 callEngineStackContextStackPointerOffset = 88 362 callEngineStackContextStackBasePointerInBytesOffset = 96 363 callEngineStackContextStackElement0AddressOffset = 104 364 callEngineStackContextStackLenInBytesOffset = 112 365 366 // Offsets for callEngine exitContext. 367 callEngineExitContextNativeCallStatusCodeOffset = 120 368 callEngineExitContextBuiltinFunctionCallIndexOffset = 124 369 callEngineExitContextReturnAddressOffset = 128 370 callEngineExitContextCallerModuleInstanceOffset = 136 371 372 // Offsets for function. 373 functionCodeInitialAddressOffset = 0 374 functionModuleInstanceOffset = 8 375 functionTypeIDOffset = 16 376 functionSize = 40 377 378 // Offsets for wasm.ModuleInstance. 379 moduleInstanceGlobalsOffset = 24 380 moduleInstanceMemoryOffset = 48 381 moduleInstanceTablesOffset = 56 382 moduleInstanceEngineOffset = 80 383 moduleInstanceTypeIDsOffset = 96 384 moduleInstanceDataInstancesOffset = 120 385 moduleInstanceElementInstancesOffset = 144 386 387 // Offsets for wasm.TableInstance. 388 tableInstanceTableOffset = 0 389 tableInstanceTableLenOffset = 8 390 391 // Offsets for wasm.MemoryInstance. 392 memoryInstanceBufferOffset = 0 393 memoryInstanceBufferLenOffset = 8 394 395 // Offsets for wasm.GlobalInstance. 396 globalInstanceValueOffset = 8 397 398 // Offsets for Go's interface. 399 // https://research.swtch.com/interfaces 400 // https://github.com/golang/go/blob/release-branch.go1.20/src/runtime/runtime2.go#L207-L210 401 interfaceDataOffset = 8 402 403 // Consts for wasm.DataInstance. 404 dataInstanceStructSize = 24 405 406 // Consts for wasm.ElementInstance. 407 elementInstanceStructSize = 24 408 409 // pointerSizeLog2 satisfies: 1 << pointerSizeLog2 = sizeOf(uintptr) 410 pointerSizeLog2 = 3 411 412 // callFrameDataSizeInUint64 is the size of callFrame struct per 8 bytes (= size of uint64). 413 callFrameDataSizeInUint64 = 24 / 8 414 ) 415 416 // nativeCallStatusCode represents the result of `nativecall`. 417 // This is set by the native code. 418 type nativeCallStatusCode uint32 419 420 const ( 421 // nativeCallStatusCodeReturned means the nativecall reaches the end of function, and returns successfully. 422 nativeCallStatusCodeReturned nativeCallStatusCode = iota 423 // nativeCallStatusCodeCallGoHostFunction means the nativecall returns to make a host function call. 424 nativeCallStatusCodeCallGoHostFunction 425 // nativeCallStatusCodeCallBuiltInFunction means the nativecall returns to make a builtin function call. 426 nativeCallStatusCodeCallBuiltInFunction 427 // nativeCallStatusCodeUnreachable means the function invocation reaches "unreachable" instruction. 428 nativeCallStatusCodeUnreachable 429 // nativeCallStatusCodeInvalidFloatToIntConversion means an invalid conversion of integer to floats happened. 430 nativeCallStatusCodeInvalidFloatToIntConversion 431 // nativeCallStatusCodeMemoryOutOfBounds means an out-of-bounds memory access happened. 432 nativeCallStatusCodeMemoryOutOfBounds 433 // nativeCallStatusCodeInvalidTableAccess means either offset to the table was out of bounds of table, or 434 // the target element in the table was uninitialized during call_indirect instruction. 435 nativeCallStatusCodeInvalidTableAccess 436 // nativeCallStatusCodeTypeMismatchOnIndirectCall means the type check failed during call_indirect. 437 nativeCallStatusCodeTypeMismatchOnIndirectCall 438 nativeCallStatusIntegerOverflow 439 nativeCallStatusIntegerDivisionByZero 440 nativeCallStatusUnalignedAtomic 441 nativeCallStatusModuleClosed 442 ) 443 444 // causePanic causes a panic with the corresponding error to the nativeCallStatusCode. 445 func (s nativeCallStatusCode) causePanic() { 446 var err error 447 switch s { 448 case nativeCallStatusIntegerOverflow: 449 err = wasmruntime.ErrRuntimeIntegerOverflow 450 case nativeCallStatusIntegerDivisionByZero: 451 err = wasmruntime.ErrRuntimeIntegerDivideByZero 452 case nativeCallStatusCodeInvalidFloatToIntConversion: 453 err = wasmruntime.ErrRuntimeInvalidConversionToInteger 454 case nativeCallStatusCodeUnreachable: 455 err = wasmruntime.ErrRuntimeUnreachable 456 case nativeCallStatusCodeMemoryOutOfBounds: 457 err = wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess 458 case nativeCallStatusCodeInvalidTableAccess: 459 err = wasmruntime.ErrRuntimeInvalidTableAccess 460 case nativeCallStatusCodeTypeMismatchOnIndirectCall: 461 err = wasmruntime.ErrRuntimeIndirectCallTypeMismatch 462 case nativeCallStatusUnalignedAtomic: 463 err = wasmruntime.ErrRuntimeUnalignedAtomic 464 } 465 panic(err) 466 } 467 468 func (s nativeCallStatusCode) String() (ret string) { 469 switch s { 470 case nativeCallStatusCodeReturned: 471 ret = "returned" 472 case nativeCallStatusCodeCallGoHostFunction: 473 ret = "call_host_function" 474 case nativeCallStatusCodeCallBuiltInFunction: 475 ret = "call_builtin_function" 476 case nativeCallStatusCodeUnreachable: 477 ret = "unreachable" 478 case nativeCallStatusCodeInvalidFloatToIntConversion: 479 ret = "invalid float to int conversion" 480 case nativeCallStatusCodeMemoryOutOfBounds: 481 ret = "memory out of bounds" 482 case nativeCallStatusCodeInvalidTableAccess: 483 ret = "invalid table access" 484 case nativeCallStatusCodeTypeMismatchOnIndirectCall: 485 ret = "type mismatch on indirect call" 486 case nativeCallStatusIntegerOverflow: 487 ret = "integer overflow" 488 case nativeCallStatusIntegerDivisionByZero: 489 ret = "integer division by zero" 490 case nativeCallStatusModuleClosed: 491 ret = "module closed" 492 case nativeCallStatusUnalignedAtomic: 493 ret = "unaligned atomic" 494 default: 495 panic("BUG") 496 } 497 return 498 } 499 500 // releaseCompiledModule is a runtime.SetFinalizer function that munmaps the compiledModule.executable. 501 func releaseCompiledModule(cm *compiledModule) { 502 if err := cm.executable.Unmap(); err != nil { 503 // munmap failure cannot recover, and happen asynchronously on the 504 // finalizer thread. While finalizer functions can return errors, 505 // they are ignored. 506 panic(fmt.Errorf("compiler: failed to munmap code segment: %w", err)) 507 } 508 } 509 510 // CompiledModuleCount implements the same method as documented on wasm.Engine. 511 func (e *engine) CompiledModuleCount() uint32 { 512 return uint32(len(e.codes)) 513 } 514 515 // DeleteCompiledModule implements the same method as documented on wasm.Engine. 516 func (e *engine) DeleteCompiledModule(module *wasm.Module) { 517 e.deleteCompiledModule(module) 518 } 519 520 // Close implements the same method as documented on wasm.Engine. 521 func (e *engine) Close() (err error) { 522 e.mux.Lock() 523 defer e.mux.Unlock() 524 // Releasing the references to compiled codes including the memory-mapped machine codes. 525 e.codes = nil 526 return 527 } 528 529 // CompileModule implements the same method as documented on wasm.Engine. 530 func (e *engine) CompileModule(_ context.Context, module *wasm.Module, listeners []experimental.FunctionListener, ensureTermination bool) error { 531 if _, ok, err := e.getCompiledModule(module, listeners); ok { // cache hit! 532 return nil 533 } else if err != nil { 534 return err 535 } 536 537 irCompiler, err := wazeroir.NewCompiler(e.enabledFeatures, callFrameDataSizeInUint64, module, ensureTermination) 538 if err != nil { 539 return err 540 } 541 542 var withGoFunc bool 543 localFuncs, importedFuncs := len(module.FunctionSection), module.ImportFunctionCount 544 cm := &compiledModule{ 545 compiledCode: &compiledCode{ 546 source: module, 547 }, 548 functions: make([]compiledFunction, localFuncs), 549 ensureTermination: ensureTermination, 550 } 551 552 if localFuncs == 0 { 553 return e.addCompiledModule(module, cm, withGoFunc) 554 } 555 556 // As this uses mmap, we need to munmap on the compiled machine code when it's GCed. 557 e.setFinalizer(cm, releaseCompiledModule) 558 ln := len(listeners) 559 cmp := newCompiler() 560 asmNodes := new(asmNodes) 561 offsets := new(offsets) 562 563 // The executable code is allocated in memory mappings held by the 564 // CodeSegment, which gros on demand when it exhausts its capacity. 565 var executable asm.CodeSegment 566 defer func() { 567 // At the end of the function, the executable is set on the compiled 568 // module and the local variable cleared; until then, the function owns 569 // the memory mapping and is reponsible for clearing it if it returns 570 // due to an error. Note that an error at this stage is not recoverable 571 // so we panic if we fail to unmap the memory segment. 572 if err := executable.Unmap(); err != nil { 573 panic(fmt.Errorf("compiler: failed to munmap code segment: %w", err)) 574 } 575 }() 576 577 for i := range module.CodeSection { 578 typ := &module.TypeSection[module.FunctionSection[i]] 579 buf := executable.NextCodeSection() 580 funcIndex := wasm.Index(i) 581 compiledFn := &cm.functions[i] 582 compiledFn.executableOffset = executable.Size() 583 compiledFn.parent = cm.compiledCode 584 compiledFn.index = importedFuncs + funcIndex 585 if i < ln { 586 compiledFn.listener = listeners[i] 587 } 588 589 if codeSeg := &module.CodeSection[i]; codeSeg.GoFunc != nil { 590 cmp.Init(typ, nil, compiledFn.listener != nil) 591 withGoFunc = true 592 if err = compileGoDefinedHostFunction(buf, cmp); err != nil { 593 def := module.FunctionDefinition(compiledFn.index) 594 return fmt.Errorf("error compiling host go func[%s]: %w", def.DebugName(), err) 595 } 596 compiledFn.goFunc = codeSeg.GoFunc 597 } else { 598 ir, err := irCompiler.Next() 599 if err != nil { 600 return fmt.Errorf("failed to lower func[%d]: %v", i, err) 601 } 602 cmp.Init(typ, ir, compiledFn.listener != nil) 603 604 compiledFn.stackPointerCeil, compiledFn.sourceOffsetMap, err = compileWasmFunction(buf, cmp, ir, asmNodes, offsets) 605 if err != nil { 606 def := module.FunctionDefinition(compiledFn.index) 607 return fmt.Errorf("error compiling wasm func[%s]: %w", def.DebugName(), err) 608 } 609 } 610 } 611 612 if runtime.GOARCH == "arm64" { 613 // On arm64, we cannot give all of rwx at the same time, so we change it to exec. 614 if err := platform.MprotectRX(executable.Bytes()); err != nil { 615 return err 616 } 617 } 618 cm.executable, executable = executable, asm.CodeSegment{} 619 return e.addCompiledModule(module, cm, withGoFunc) 620 } 621 622 // NewModuleEngine implements the same method as documented on wasm.Engine. 623 func (e *engine) NewModuleEngine(module *wasm.Module, instance *wasm.ModuleInstance) (wasm.ModuleEngine, error) { 624 me := &moduleEngine{ 625 functions: make([]function, len(module.FunctionSection)+int(module.ImportFunctionCount)), 626 } 627 628 // Note: imported functions are resolved in moduleEngine.ResolveImportedFunction. 629 630 cm, ok, err := e.getCompiledModule(module, 631 // listeners arg is not needed here since NewModuleEngine is called after CompileModule which 632 // ensures the association of listener with *code. 633 nil) 634 if !ok { 635 return nil, errors.New("source module must be compiled before instantiation") 636 } else if err != nil { 637 return nil, err 638 } 639 640 for i := range cm.functions { 641 c := &cm.functions[i] 642 offset := int(module.ImportFunctionCount) + i 643 typeIndex := module.FunctionSection[i] 644 me.functions[offset] = function{ 645 codeInitialAddress: cm.executable.Addr() + c.executableOffset, 646 moduleInstance: instance, 647 typeID: instance.TypeIDs[typeIndex], 648 funcType: &module.TypeSection[typeIndex], 649 parent: c, 650 } 651 } 652 653 me.module = cm 654 return me, nil 655 } 656 657 // ResolveImportedFunction implements wasm.ModuleEngine. 658 func (e *moduleEngine) ResolveImportedFunction(index, indexInImportedModule wasm.Index, importedModuleEngine wasm.ModuleEngine) { 659 imported := importedModuleEngine.(*moduleEngine) 660 // Copies the content from the import target moduleEngine. 661 e.functions[index] = imported.functions[indexInImportedModule] 662 } 663 664 // ResolveImportedMemory implements wasm.ModuleEngine. 665 func (e *moduleEngine) ResolveImportedMemory(wasm.ModuleEngine) {} 666 667 // FunctionInstanceReference implements the same method as documented on wasm.ModuleEngine. 668 func (e *moduleEngine) FunctionInstanceReference(funcIndex wasm.Index) wasm.Reference { 669 return uintptr(unsafe.Pointer(&e.functions[funcIndex])) 670 } 671 672 // DoneInstantiation implements wasm.ModuleEngine. 673 func (e *moduleEngine) DoneInstantiation() {} 674 675 // NewFunction implements wasm.ModuleEngine. 676 func (e *moduleEngine) NewFunction(index wasm.Index) api.Function { 677 return e.newFunction(&e.functions[index]) 678 } 679 680 func (e *moduleEngine) newFunction(f *function) api.Function { 681 initStackSize := initialStackSize 682 if initialStackSize < f.parent.stackPointerCeil { 683 initStackSize = f.parent.stackPointerCeil * 2 684 } 685 return e.newCallEngine(initStackSize, f) 686 } 687 688 // LookupFunction implements the same method as documented on wasm.ModuleEngine. 689 func (e *moduleEngine) LookupFunction(t *wasm.TableInstance, typeId wasm.FunctionTypeID, tableOffset wasm.Index) (*wasm.ModuleInstance, wasm.Index) { 690 if tableOffset >= uint32(len(t.References)) || t.Type != wasm.RefTypeFuncref { 691 panic(wasmruntime.ErrRuntimeInvalidTableAccess) 692 } 693 rawPtr := t.References[tableOffset] 694 if rawPtr == 0 { 695 panic(wasmruntime.ErrRuntimeInvalidTableAccess) 696 } 697 698 tf := functionFromUintptr(rawPtr) 699 if tf.typeID != typeId { 700 panic(wasmruntime.ErrRuntimeIndirectCallTypeMismatch) 701 } 702 return tf.moduleInstance, tf.parent.index 703 } 704 705 // functionFromUintptr resurrects the original *function from the given uintptr 706 // which comes from either funcref table or OpcodeRefFunc instruction. 707 func functionFromUintptr(ptr uintptr) *function { 708 // Wraps ptrs as the double pointer in order to avoid the unsafe access as detected by race detector. 709 // 710 // For example, if we have (*function)(unsafe.Pointer(ptr)) instead, then the race detector's "checkptr" 711 // subroutine wanrs as "checkptr: pointer arithmetic result points to invalid allocation" 712 // https://github.com/golang/go/blob/1ce7fcf139417d618c2730010ede2afb41664211/src/runtime/checkptr.go#L69 713 var wrapped *uintptr = &ptr 714 return *(**function)(unsafe.Pointer(wrapped)) 715 } 716 717 // Definition implements the same method as documented on wasm.ModuleEngine. 718 func (ce *callEngine) Definition() api.FunctionDefinition { 719 return ce.initialFn.definition() 720 } 721 722 func (f *function) definition() api.FunctionDefinition { 723 compiled := f.parent 724 return compiled.parent.source.FunctionDefinition(compiled.index) 725 } 726 727 // Call implements the same method as documented on wasm.ModuleEngine. 728 func (ce *callEngine) Call(ctx context.Context, params ...uint64) (results []uint64, err error) { 729 ft := ce.initialFn.funcType 730 if n := ft.ParamNumInUint64; n != len(params) { 731 return nil, fmt.Errorf("expected %d params, but passed %d", n, len(params)) 732 } 733 return ce.call(ctx, params, nil) 734 } 735 736 // CallWithStack implements the same method as documented on wasm.ModuleEngine. 737 func (ce *callEngine) CallWithStack(ctx context.Context, stack []uint64) error { 738 params, results, err := wasm.SplitCallStack(ce.initialFn.funcType, stack) 739 if err != nil { 740 return err 741 } 742 _, err = ce.call(ctx, params, results) 743 return err 744 } 745 746 func (ce *callEngine) call(ctx context.Context, params, results []uint64) (_ []uint64, err error) { 747 m := ce.initialFn.moduleInstance 748 if ce.module.ensureTermination { 749 select { 750 case <-ctx.Done(): 751 // If the provided context is already done, close the call context 752 // and return the error. 753 m.CloseWithCtxErr(ctx) 754 return nil, m.FailIfClosed() 755 default: 756 } 757 } 758 759 // We ensure that this Call method never panics as 760 // this Call method is indirectly invoked by embedders via store.CallFunction, 761 // and we have to make sure that all the runtime errors, including the one happening inside 762 // host functions, will be captured as errors, not panics. 763 defer func() { 764 err = ce.deferredOnCall(ctx, m, recover()) 765 if err == nil { 766 // If the module closed during the call, and the call didn't err for another reason, set an ExitError. 767 err = m.FailIfClosed() 768 } 769 // Ensure that the compiled module will never be GC'd before this method returns. 770 runtime.KeepAlive(ce.module) 771 }() 772 773 ft := ce.initialFn.funcType 774 ce.initializeStack(ft, params) 775 776 if ce.module.ensureTermination { 777 done := m.CloseModuleOnCanceledOrTimeout(ctx) 778 defer done() 779 } 780 781 if ctx.Value(experimental.EnableSnapshotterKey{}) != nil { 782 ctx = context.WithValue(ctx, experimental.SnapshotterKey{}, ce) 783 } 784 785 ce.execWasmFunction(ctx, m) 786 787 // This returns a safe copy of the results, instead of a slice view. If we 788 // returned a re-slice, the caller could accidentally or purposefully 789 // corrupt the stack of subsequent calls. 790 if results == nil && ft.ResultNumInUint64 > 0 { 791 results = make([]uint64, ft.ResultNumInUint64) 792 } 793 copy(results, ce.stack) 794 return results, nil 795 } 796 797 // initializeStack initializes callEngine.stack before entering native code. 798 // 799 // The stack must look like, if len(params) < len(results): 800 // 801 // [arg0, arg1, ..., argN, 0, 0, 0, ... 802 // { } ^ 803 // callFrame | 804 // | 805 // stackPointer 806 // 807 // else: 808 // 809 // [arg0, arg1, ..., argN, _, _, _, 0, 0, 0, ... 810 // | | { } ^ 811 // |reserved| callFrame | 812 // | | | 813 // |--------> stackPointer 814 // len(results)-len(params) 815 // 816 // where we reserve the slots below the callframe with the length len(results)-len(params). 817 // 818 // Note: callFrame { } is zeroed to indicate that the initial "caller" is this callEngine, not the Wasm function. 819 // 820 // See callEngine.stack as well. 821 func (ce *callEngine) initializeStack(tp *wasm.FunctionType, args []uint64) { 822 for _, v := range args { 823 ce.pushValue(v) 824 } 825 826 ce.stackPointer = uint64(callFrameOffset(tp)) 827 828 for i := 0; i < callFrameDataSizeInUint64; i++ { 829 ce.stack[ce.stackPointer] = 0 830 ce.stackPointer++ 831 } 832 } 833 834 // callFrameOffset returns the offset of the call frame from the stack base pointer. 835 // 836 // See the diagram in callEngine.stack. 837 func callFrameOffset(funcType *wasm.FunctionType) (ret int) { 838 ret = funcType.ResultNumInUint64 839 if ret < funcType.ParamNumInUint64 { 840 ret = funcType.ParamNumInUint64 841 } 842 return 843 } 844 845 // deferredOnCall takes the recovered value `recovered`, and wraps it 846 // with the call frame stack traces when not nil. This also resets 847 // the state of callEngine so that it can be used for the subsequent calls. 848 // 849 // This is defined for testability. 850 func (ce *callEngine) deferredOnCall(ctx context.Context, m *wasm.ModuleInstance, recovered interface{}) (err error) { 851 if s, ok := recovered.(*snapshot); ok { 852 // A snapshot that wasn't handled was created by a different call engine possibly from a nested wasm invocation, 853 // let it propagate up to be handled by the caller. 854 panic(s) 855 } 856 if recovered != nil { 857 builder := wasmdebug.NewErrorBuilder() 858 859 // Unwinds call frames from the values stack, starting from the 860 // current function `ce.fn`, and the current stack base pointer `ce.stackBasePointerInBytes`. 861 fn := ce.fn 862 pc := uint64(ce.returnAddress) 863 stackBasePointer := int(ce.stackBasePointerInBytes >> 3) 864 functionListeners := make([]functionListenerInvocation, 0, 16) 865 866 for { 867 def := fn.definition() 868 869 // sourceInfo holds the source code information corresponding to the frame. 870 // It is not empty only when the DWARF is enabled. 871 var sources []string 872 if p := fn.parent; p.parent.executable.Bytes() != nil { 873 if fn.parent.sourceOffsetMap.irOperationSourceOffsetsInWasmBinary != nil { 874 offset := fn.getSourceOffsetInWasmBinary(pc) 875 sources = p.parent.source.DWARFLines.Line(offset) 876 } 877 } 878 builder.AddFrame(def.DebugName(), def.ParamTypes(), def.ResultTypes(), sources) 879 880 if fn.parent.listener != nil { 881 functionListeners = append(functionListeners, functionListenerInvocation{ 882 FunctionListener: fn.parent.listener, 883 def: fn.definition(), 884 }) 885 } 886 887 callFrameOffset := callFrameOffset(fn.funcType) 888 if stackBasePointer != 0 { 889 frame := *(*callFrame)(unsafe.Pointer(&ce.stack[stackBasePointer+callFrameOffset])) 890 fn = frame.function 891 pc = uint64(frame.returnAddress) 892 stackBasePointer = int(frame.returnStackBasePointerInBytes >> 3) 893 } else { // base == 0 means that this was the last call frame stacked. 894 break 895 } 896 } 897 898 err = builder.FromRecovered(recovered) 899 for i := range functionListeners { 900 functionListeners[i].Abort(ctx, m, functionListeners[i].def, err) 901 } 902 } 903 904 // Allows the reuse of CallEngine. 905 ce.stackBasePointerInBytes, ce.stackPointer, ce.moduleInstance = 0, 0, nil 906 ce.moduleContext.fn = ce.initialFn 907 return 908 } 909 910 // getSourceOffsetInWasmBinary returns the corresponding offset in the original Wasm binary's code section 911 // for the given pc (which is an absolute address in the memory). 912 // If needPreviousInstr equals true, this returns the previous instruction's offset for the given pc. 913 func (f *function) getSourceOffsetInWasmBinary(pc uint64) uint64 { 914 srcMap := &f.parent.sourceOffsetMap 915 n := bitpack.OffsetArrayLen(srcMap.irOperationOffsetsInNativeBinary) + 1 916 917 // Calculate the offset in the compiled native binary. 918 pcOffsetInNativeBinary := pc - uint64(f.codeInitialAddress) 919 920 // Then, do the binary search on the list of offsets in the native binary 921 // for all the IR operations. This returns the index of the *next* IR 922 // operation of the one corresponding to the origin of this pc. 923 // See sort.Search. 924 // 925 // TODO: the underlying implementation of irOperationOffsetsInNativeBinary 926 // uses uses delta encoding an calls to the Index method might require a 927 // O(N) scan of the underlying array, turning binary search into a 928 // O(N*log(N)) operation. If this code path ends up being a bottleneck, 929 // we could add a Search method on the bitpack.OffsetArray types to delegate 930 // the lookup to the underlying data structure, allowing for the selection 931 // of a more optimized version of the algorithm. If you do so, please add a 932 // benchmark to verify the impact on compute time. 933 index := sort.Search(n, func(i int) bool { 934 if i == n-1 { 935 return true 936 } 937 return srcMap.irOperationOffsetsInNativeBinary.Index(i) >= pcOffsetInNativeBinary 938 }) 939 if index == 0 && bitpack.OffsetArrayLen(srcMap.irOperationSourceOffsetsInWasmBinary) > 0 { 940 // When pc is the beginning of the function, the next IR 941 // operation (returned by sort.Search) is the first of the 942 // offset map. 943 return srcMap.irOperationSourceOffsetsInWasmBinary.Index(0) 944 } 945 946 if index == n || index == 0 { // This case, somehow pc is not found in the source offset map. 947 return 0 948 } else { 949 return srcMap.irOperationSourceOffsetsInWasmBinary.Index(index - 1) 950 } 951 } 952 953 func NewEngine(_ context.Context, enabledFeatures api.CoreFeatures, fileCache filecache.Cache) wasm.Engine { 954 return newEngine(enabledFeatures, fileCache) 955 } 956 957 func newEngine(enabledFeatures api.CoreFeatures, fileCache filecache.Cache) *engine { 958 return &engine{ 959 enabledFeatures: enabledFeatures, 960 codes: map[wasm.ModuleID]*compiledModule{}, 961 setFinalizer: runtime.SetFinalizer, 962 fileCache: fileCache, 963 wazeroVersion: version.GetWazeroVersion(), 964 } 965 } 966 967 // Do not make this variable as constant, otherwise there would be 968 // dangerous memory access from native code. 969 // 970 // Background: Go has a mechanism called "goroutine stack-shrink" where Go 971 // runtime shrinks Goroutine's stack when it is GCing. Shrinking means that 972 // all the contents on the goroutine stack will be relocated by runtime, 973 // Therefore, the memory address of these contents change undeterministically. 974 // Not only shrinks, but also Go runtime grows the goroutine stack at any point 975 // of function call entries, which also might end up relocating contents. 976 // 977 // On the other hand, we hold pointers to the data region of value stack and 978 // call-frame stack slices and use these raw pointers from native code. 979 // Therefore, it is dangerous if these two stacks are allocated on stack 980 // as these stack's address might be changed by Goroutine which we cannot 981 // detect. 982 // 983 // By declaring these values as `var`, slices created via `make([]..., var)` 984 // will never be allocated on stack [1]. This means accessing these slices via 985 // raw pointers is safe: As of version 1.21, Go's garbage collector never relocates 986 // heap-allocated objects (aka no compaction of memory [2]). 987 // 988 // On Go upgrades, re-validate heap-allocation via `go build -gcflags='-m' ./internal/engine/compiler/...`. 989 // 990 // [1] https://github.com/golang/go/blob/c19c4c566c63818dfd059b352e52c4710eecf14d/src/cmd/compile/internal/escape/utils.go#L213-L215 991 // [2] https://github.com/golang/go/blob/c19c4c566c63818dfd059b352e52c4710eecf14d/src/runtime/mgc.go#L9 992 // [3] https://mayurwadekar2.medium.com/escape-analysis-in-golang-ee40a1c064c1 993 // [4] https://medium.com/@yulang.chu/go-stack-or-heap-2-slices-which-keep-in-stack-have-limitation-of-size-b3f3adfd6190 994 var initialStackSize uint64 = 512 995 996 func (e *moduleEngine) newCallEngine(stackSize uint64, fn *function) *callEngine { 997 ce := &callEngine{ 998 stack: make([]uint64, stackSize), 999 archContext: newArchContext(), 1000 initialFn: fn, 1001 moduleContext: moduleContext{fn: fn}, 1002 module: e.module, 1003 } 1004 1005 stackHeader := (*reflect.SliceHeader)(unsafe.Pointer(&ce.stack)) 1006 ce.stackContext = stackContext{ 1007 stackElement0Address: stackHeader.Data, 1008 stackLenInBytes: uint64(stackHeader.Len) << 3, 1009 } 1010 return ce 1011 } 1012 1013 func (ce *callEngine) popValue() (ret uint64) { 1014 ce.stackContext.stackPointer-- 1015 ret = ce.stack[ce.stackTopIndex()] 1016 return 1017 } 1018 1019 func (ce *callEngine) pushValue(v uint64) { 1020 ce.stack[ce.stackTopIndex()] = v 1021 ce.stackContext.stackPointer++ 1022 } 1023 1024 func (ce *callEngine) stackTopIndex() uint64 { 1025 return ce.stackContext.stackPointer + (ce.stackContext.stackBasePointerInBytes >> 3) 1026 } 1027 1028 const ( 1029 builtinFunctionIndexMemoryGrow wasm.Index = iota 1030 builtinFunctionIndexGrowStack 1031 builtinFunctionIndexTableGrow 1032 builtinFunctionIndexFunctionListenerBefore 1033 builtinFunctionIndexFunctionListenerAfter 1034 builtinFunctionIndexCheckExitCode 1035 // builtinFunctionIndexBreakPoint is internal (only for wazero developers). Disabled by default. 1036 builtinFunctionIndexBreakPoint 1037 builtinFunctionMemoryWait32 1038 builtinFunctionMemoryWait64 1039 builtinFunctionMemoryNotify 1040 ) 1041 1042 func (ce *callEngine) execWasmFunction(ctx context.Context, m *wasm.ModuleInstance) { 1043 codeAddr := ce.initialFn.codeInitialAddress 1044 modAddr := ce.initialFn.moduleInstance 1045 1046 entry: 1047 { 1048 // Call into the native code. 1049 nativecall(codeAddr, ce, modAddr) 1050 1051 // Check the status code from Compiler code. 1052 switch status := ce.exitContext.statusCode; status { 1053 case nativeCallStatusCodeReturned: 1054 case nativeCallStatusCodeCallGoHostFunction: 1055 calleeHostFunction := ce.moduleContext.fn 1056 base := int(ce.stackBasePointerInBytes >> 3) 1057 1058 // In the compiler engine, ce.stack has enough capacity for the 1059 // max of param or result length, so we don't need to grow when 1060 // there are more results than parameters. 1061 stackLen := calleeHostFunction.funcType.ParamNumInUint64 1062 if resultLen := calleeHostFunction.funcType.ResultNumInUint64; resultLen > stackLen { 1063 stackLen = resultLen 1064 } 1065 stack := ce.stack[base : base+stackLen] 1066 1067 fn := calleeHostFunction.parent.goFunc 1068 func() { 1069 defer func() { 1070 if r := recover(); r != nil { 1071 if s, ok := r.(*snapshot); ok { 1072 if s.ce == ce { 1073 s.doRestore() 1074 } else { 1075 panic(r) 1076 } 1077 } else { 1078 panic(r) 1079 } 1080 } 1081 }() 1082 switch fn := fn.(type) { 1083 case api.GoModuleFunction: 1084 fn.Call(ctx, ce.callerModuleInstance, stack) 1085 case api.GoFunction: 1086 fn.Call(ctx, stack) 1087 } 1088 }() 1089 1090 codeAddr, modAddr = ce.returnAddress, ce.moduleInstance 1091 goto entry 1092 case nativeCallStatusCodeCallBuiltInFunction: 1093 caller := ce.moduleContext.fn 1094 switch ce.exitContext.builtinFunctionCallIndex { 1095 case builtinFunctionIndexMemoryGrow: 1096 ce.builtinFunctionMemoryGrow(caller.moduleInstance.MemoryInstance) 1097 case builtinFunctionIndexGrowStack: 1098 ce.builtinFunctionGrowStack(caller.parent.stackPointerCeil) 1099 case builtinFunctionIndexTableGrow: 1100 ce.builtinFunctionTableGrow(caller.moduleInstance.Tables) 1101 case builtinFunctionMemoryWait32: 1102 ce.builtinFunctionMemoryWait32(caller.moduleInstance.MemoryInstance) 1103 case builtinFunctionMemoryWait64: 1104 ce.builtinFunctionMemoryWait64(caller.moduleInstance.MemoryInstance) 1105 case builtinFunctionMemoryNotify: 1106 ce.builtinFunctionMemoryNotify(caller.moduleInstance.MemoryInstance) 1107 case builtinFunctionIndexFunctionListenerBefore: 1108 ce.builtinFunctionFunctionListenerBefore(ctx, m, caller) 1109 case builtinFunctionIndexFunctionListenerAfter: 1110 ce.builtinFunctionFunctionListenerAfter(ctx, m, caller) 1111 case builtinFunctionIndexCheckExitCode: 1112 // Note: this operation must be done in Go, not native code. The reason is that 1113 // native code cannot be preempted and that means it can block forever if there are not 1114 // enough OS threads (which we don't have control over). 1115 if err := m.FailIfClosed(); err != nil { 1116 panic(err) 1117 } 1118 } 1119 if false { 1120 if ce.exitContext.builtinFunctionCallIndex == builtinFunctionIndexBreakPoint { 1121 runtime.Breakpoint() 1122 } 1123 } 1124 1125 codeAddr, modAddr = ce.returnAddress, ce.moduleInstance 1126 goto entry 1127 default: 1128 status.causePanic() 1129 } 1130 } 1131 } 1132 1133 // callStackCeiling is the maximum WebAssembly call frame stack height. This allows wazero to raise 1134 // wasm.ErrCallStackOverflow instead of overflowing the Go runtime. 1135 // 1136 // The default value should suffice for most use cases. Those wishing to change this can via `go build -ldflags`. 1137 // 1138 // TODO: allows to configure this via context? 1139 var callStackCeiling = uint64(5000000) // in uint64 (8 bytes) == 40000000 bytes in total == 40mb. 1140 1141 func (ce *callEngine) builtinFunctionGrowStack(stackPointerCeil uint64) { 1142 oldLen := uint64(len(ce.stack)) 1143 if callStackCeiling < oldLen { 1144 panic(wasmruntime.ErrRuntimeStackOverflow) 1145 } 1146 1147 // Extends the stack's length to oldLen*2+stackPointerCeil. 1148 newLen := oldLen<<1 + (stackPointerCeil) 1149 newStack := make([]uint64, newLen) 1150 top := ce.stackTopIndex() 1151 copy(newStack[:top], ce.stack[:top]) 1152 ce.stack = newStack 1153 stackHeader := (*reflect.SliceHeader)(unsafe.Pointer(&ce.stack)) 1154 ce.stackContext.stackElement0Address = stackHeader.Data 1155 ce.stackContext.stackLenInBytes = newLen << 3 1156 } 1157 1158 func (ce *callEngine) builtinFunctionMemoryGrow(mem *wasm.MemoryInstance) { 1159 newPages := ce.popValue() 1160 1161 if res, ok := mem.Grow(uint32(newPages)); !ok { 1162 ce.pushValue(uint64(0xffffffff)) // = -1 in signed 32-bit integer. 1163 } else { 1164 ce.pushValue(uint64(res)) 1165 } 1166 1167 // Update the moduleContext fields as they become stale after the update ^^. 1168 bufSliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&mem.Buffer)) 1169 atomic.StoreUint64(&ce.moduleContext.memorySliceLen, uint64(bufSliceHeader.Len)) 1170 atomic.StoreUintptr(&ce.moduleContext.memoryElement0Address, bufSliceHeader.Data) 1171 } 1172 1173 func (ce *callEngine) builtinFunctionTableGrow(tables []*wasm.TableInstance) { 1174 tableIndex := uint32(ce.popValue()) 1175 table := tables[tableIndex] // verified not to be out of range by the func validation at compilation phase. 1176 num := ce.popValue() 1177 ref := ce.popValue() 1178 res := table.Grow(uint32(num), uintptr(ref)) 1179 ce.pushValue(uint64(res)) 1180 } 1181 1182 func (ce *callEngine) builtinFunctionMemoryWait32(mem *wasm.MemoryInstance) { 1183 if !mem.Shared { 1184 panic(wasmruntime.ErrRuntimeExpectedSharedMemory) 1185 } 1186 1187 timeout := int64(ce.popValue()) 1188 exp := uint32(ce.popValue()) 1189 addr := uintptr(ce.popValue()) 1190 base := uintptr(unsafe.Pointer(&mem.Buffer[0])) 1191 1192 offset := uint32(addr - base) 1193 1194 ce.pushValue(mem.Wait32(offset, exp, timeout)) 1195 } 1196 1197 func (ce *callEngine) builtinFunctionMemoryWait64(mem *wasm.MemoryInstance) { 1198 if !mem.Shared { 1199 panic(wasmruntime.ErrRuntimeExpectedSharedMemory) 1200 } 1201 1202 timeout := int64(ce.popValue()) 1203 exp := ce.popValue() 1204 addr := uintptr(ce.popValue()) 1205 base := uintptr(unsafe.Pointer(&mem.Buffer[0])) 1206 1207 offset := uint32(addr - base) 1208 1209 ce.pushValue(mem.Wait64(offset, exp, timeout)) 1210 } 1211 1212 func (ce *callEngine) builtinFunctionMemoryNotify(mem *wasm.MemoryInstance) { 1213 count := ce.popValue() 1214 addr := ce.popValue() 1215 1216 offset := uint32(uintptr(addr) - uintptr(unsafe.Pointer(&mem.Buffer[0]))) 1217 1218 ce.pushValue(uint64(mem.Notify(offset, uint32(count)))) 1219 } 1220 1221 // snapshot implements experimental.Snapshot 1222 type snapshot struct { 1223 stackPointer uint64 1224 stackBasePointerInBytes uint64 1225 returnAddress uint64 1226 hostBase int 1227 stack []uint64 1228 1229 ret []uint64 1230 1231 ce *callEngine 1232 } 1233 1234 // Snapshot implements the same method as documented on experimental.Snapshotter. 1235 func (ce *callEngine) Snapshot() experimental.Snapshot { 1236 hostBase := int(ce.stackBasePointerInBytes >> 3) 1237 1238 stackTop := int(ce.stackTopIndex()) 1239 stack := make([]uint64, stackTop) 1240 copy(stack, ce.stack[:stackTop]) 1241 1242 return &snapshot{ 1243 stackPointer: ce.stackContext.stackPointer, 1244 stackBasePointerInBytes: ce.stackBasePointerInBytes, 1245 returnAddress: uint64(ce.returnAddress), 1246 hostBase: hostBase, 1247 stack: stack, 1248 ce: ce, 1249 } 1250 } 1251 1252 // Restore implements the same method as documented on experimental.Snapshot. 1253 func (s *snapshot) Restore(ret []uint64) { 1254 s.ret = ret 1255 panic(s) 1256 } 1257 1258 // Restore implements the same method as documented on experimental.Snapshot. 1259 func (s *snapshot) doRestore() { 1260 ce := s.ce 1261 ce.stackContext.stackPointer = s.stackPointer 1262 ce.stackContext.stackBasePointerInBytes = s.stackBasePointerInBytes 1263 copy(ce.stack, s.stack) 1264 ce.returnAddress = uintptr(s.returnAddress) 1265 copy(ce.stack[s.hostBase:], s.ret) 1266 } 1267 1268 // Error implements the same method on error. 1269 func (s *snapshot) Error() string { 1270 return "unhandled snapshot restore, this generally indicates restore was called from a different " + 1271 "exported function invocation than snapshot" 1272 } 1273 1274 // stackIterator implements experimental.StackIterator. 1275 type stackIterator struct { 1276 stack []uint64 1277 fn *function 1278 base int 1279 pc uint64 1280 started bool 1281 } 1282 1283 func (si *stackIterator) reset(stack []uint64, fn *function, base int, pc uint64) { 1284 si.stack = stack 1285 si.fn = fn 1286 si.base = base 1287 si.pc = pc 1288 si.started = false 1289 } 1290 1291 func (si *stackIterator) clear() { 1292 si.stack = nil 1293 si.fn = nil 1294 si.base = 0 1295 si.started = false 1296 } 1297 1298 // Next implements the same method as documented on experimental.StackIterator. 1299 func (si *stackIterator) Next() bool { 1300 if !si.started { 1301 si.started = true 1302 return true 1303 } 1304 1305 if si.fn == nil || si.base == 0 { 1306 return false 1307 } 1308 1309 frame := si.base + callFrameOffset(si.fn.funcType) 1310 si.pc = si.stack[frame+0] 1311 si.base = int(si.stack[frame+1] >> 3) 1312 // *function lives in the third field of callFrame struct. This must be 1313 // aligned with the definition of callFrame struct. 1314 si.fn = *(**function)(unsafe.Pointer(&si.stack[frame+2])) 1315 return si.fn != nil 1316 } 1317 1318 // ProgramCounter implements the same method as documented on experimental.StackIterator. 1319 func (si *stackIterator) ProgramCounter() experimental.ProgramCounter { 1320 return experimental.ProgramCounter(si.pc) 1321 } 1322 1323 // Function implements the same method as documented on experimental.StackIterator. 1324 func (si *stackIterator) Function() experimental.InternalFunction { 1325 return internalFunction{si.fn} 1326 } 1327 1328 // internalFunction implements experimental.InternalFunction. 1329 type internalFunction struct{ *function } 1330 1331 // Definition implements the same method as documented on experimental.InternalFunction. 1332 func (f internalFunction) Definition() api.FunctionDefinition { 1333 return f.definition() 1334 } 1335 1336 // SourceOffsetForPC implements the same method as documented on experimental.InternalFunction. 1337 func (f internalFunction) SourceOffsetForPC(pc experimental.ProgramCounter) uint64 { 1338 p := f.parent 1339 if bitpack.OffsetArrayLen(p.sourceOffsetMap.irOperationSourceOffsetsInWasmBinary) == 0 { 1340 return 0 // source not available 1341 } 1342 return f.getSourceOffsetInWasmBinary(uint64(pc)) 1343 } 1344 1345 func (ce *callEngine) builtinFunctionFunctionListenerBefore(ctx context.Context, mod api.Module, fn *function) { 1346 base := int(ce.stackBasePointerInBytes >> 3) 1347 pc := uint64(ce.returnAddress) 1348 ce.stackIterator.reset(ce.stack, fn, base, pc) 1349 1350 params := ce.stack[base : base+fn.funcType.ParamNumInUint64] 1351 fn.parent.listener.Before(ctx, mod, fn.definition(), params, &ce.stackIterator) 1352 1353 ce.stackIterator.clear() 1354 } 1355 1356 func (ce *callEngine) builtinFunctionFunctionListenerAfter(ctx context.Context, mod api.Module, fn *function) { 1357 base := int(ce.stackBasePointerInBytes >> 3) 1358 fn.parent.listener.After(ctx, mod, fn.definition(), ce.stack[base:base+fn.funcType.ResultNumInUint64]) 1359 } 1360 1361 func compileGoDefinedHostFunction(buf asm.Buffer, cmp compiler) error { 1362 if err := cmp.compileGoDefinedHostFunction(); err != nil { 1363 return err 1364 } 1365 _, err := cmp.compile(buf) 1366 return err 1367 } 1368 1369 type asmNodes struct { 1370 nodes []asm.Node 1371 } 1372 1373 type offsets struct { 1374 values []uint64 1375 } 1376 1377 func compileWasmFunction(buf asm.Buffer, cmp compiler, ir *wazeroir.CompilationResult, asmNodes *asmNodes, offsets *offsets) (spCeil uint64, sm sourceOffsetMap, err error) { 1378 if err = cmp.compilePreamble(); err != nil { 1379 err = fmt.Errorf("failed to emit preamble: %w", err) 1380 return 1381 } 1382 1383 needSourceOffsets := len(ir.IROperationSourceOffsetsInWasmBinary) > 0 1384 var irOpBegins []asm.Node 1385 if needSourceOffsets { 1386 irOpBegins = append(asmNodes.nodes[:0], make([]asm.Node, len(ir.Operations))...) 1387 defer func() { asmNodes.nodes = irOpBegins }() 1388 } 1389 1390 var skip bool 1391 for i := range ir.Operations { 1392 op := &ir.Operations[i] 1393 if needSourceOffsets { 1394 // If this compilation requires source offsets for DWARF based back trace, 1395 // we emit a NOP node at the beginning of each IR operation to get the 1396 // binary offset of the beginning of the corresponding compiled native code. 1397 irOpBegins[i] = cmp.compileNOP() 1398 } 1399 1400 // Compiler determines whether skip the entire label. 1401 // For example, if the label doesn't have any caller, 1402 // we don't need to generate native code at all as we never reach the region. 1403 if op.Kind == wazeroir.OperationKindLabel { 1404 skip = cmp.compileLabel(op) 1405 } 1406 if skip { 1407 continue 1408 } 1409 1410 if false { 1411 fmt.Printf("compiling op=%s: %s\n", op.Kind, cmp) 1412 } 1413 switch op.Kind { 1414 case wazeroir.OperationKindUnreachable: 1415 err = cmp.compileUnreachable() 1416 case wazeroir.OperationKindLabel: 1417 // label op is already handled ^^. 1418 case wazeroir.OperationKindBr: 1419 err = cmp.compileBr(op) 1420 case wazeroir.OperationKindBrIf: 1421 err = cmp.compileBrIf(op) 1422 case wazeroir.OperationKindBrTable: 1423 err = cmp.compileBrTable(op) 1424 case wazeroir.OperationKindCall: 1425 err = cmp.compileCall(op) 1426 case wazeroir.OperationKindCallIndirect: 1427 err = cmp.compileCallIndirect(op) 1428 case wazeroir.OperationKindDrop: 1429 err = cmp.compileDrop(op) 1430 case wazeroir.OperationKindSelect: 1431 err = cmp.compileSelect(op) 1432 case wazeroir.OperationKindPick: 1433 err = cmp.compilePick(op) 1434 case wazeroir.OperationKindSet: 1435 err = cmp.compileSet(op) 1436 case wazeroir.OperationKindGlobalGet: 1437 err = cmp.compileGlobalGet(op) 1438 case wazeroir.OperationKindGlobalSet: 1439 err = cmp.compileGlobalSet(op) 1440 case wazeroir.OperationKindLoad: 1441 err = cmp.compileLoad(op) 1442 case wazeroir.OperationKindLoad8: 1443 err = cmp.compileLoad8(op) 1444 case wazeroir.OperationKindLoad16: 1445 err = cmp.compileLoad16(op) 1446 case wazeroir.OperationKindLoad32: 1447 err = cmp.compileLoad32(op) 1448 case wazeroir.OperationKindStore: 1449 err = cmp.compileStore(op) 1450 case wazeroir.OperationKindStore8: 1451 err = cmp.compileStore8(op) 1452 case wazeroir.OperationKindStore16: 1453 err = cmp.compileStore16(op) 1454 case wazeroir.OperationKindStore32: 1455 err = cmp.compileStore32(op) 1456 case wazeroir.OperationKindMemorySize: 1457 err = cmp.compileMemorySize() 1458 case wazeroir.OperationKindMemoryGrow: 1459 err = cmp.compileMemoryGrow() 1460 case wazeroir.OperationKindConstI32: 1461 err = cmp.compileConstI32(op) 1462 case wazeroir.OperationKindConstI64: 1463 err = cmp.compileConstI64(op) 1464 case wazeroir.OperationKindConstF32: 1465 err = cmp.compileConstF32(op) 1466 case wazeroir.OperationKindConstF64: 1467 err = cmp.compileConstF64(op) 1468 case wazeroir.OperationKindEq: 1469 err = cmp.compileEq(op) 1470 case wazeroir.OperationKindNe: 1471 err = cmp.compileNe(op) 1472 case wazeroir.OperationKindEqz: 1473 err = cmp.compileEqz(op) 1474 case wazeroir.OperationKindLt: 1475 err = cmp.compileLt(op) 1476 case wazeroir.OperationKindGt: 1477 err = cmp.compileGt(op) 1478 case wazeroir.OperationKindLe: 1479 err = cmp.compileLe(op) 1480 case wazeroir.OperationKindGe: 1481 err = cmp.compileGe(op) 1482 case wazeroir.OperationKindAdd: 1483 err = cmp.compileAdd(op) 1484 case wazeroir.OperationKindSub: 1485 err = cmp.compileSub(op) 1486 case wazeroir.OperationKindMul: 1487 err = cmp.compileMul(op) 1488 case wazeroir.OperationKindClz: 1489 err = cmp.compileClz(op) 1490 case wazeroir.OperationKindCtz: 1491 err = cmp.compileCtz(op) 1492 case wazeroir.OperationKindPopcnt: 1493 err = cmp.compilePopcnt(op) 1494 case wazeroir.OperationKindDiv: 1495 err = cmp.compileDiv(op) 1496 case wazeroir.OperationKindRem: 1497 err = cmp.compileRem(op) 1498 case wazeroir.OperationKindAnd: 1499 err = cmp.compileAnd(op) 1500 case wazeroir.OperationKindOr: 1501 err = cmp.compileOr(op) 1502 case wazeroir.OperationKindXor: 1503 err = cmp.compileXor(op) 1504 case wazeroir.OperationKindShl: 1505 err = cmp.compileShl(op) 1506 case wazeroir.OperationKindShr: 1507 err = cmp.compileShr(op) 1508 case wazeroir.OperationKindRotl: 1509 err = cmp.compileRotl(op) 1510 case wazeroir.OperationKindRotr: 1511 err = cmp.compileRotr(op) 1512 case wazeroir.OperationKindAbs: 1513 err = cmp.compileAbs(op) 1514 case wazeroir.OperationKindNeg: 1515 err = cmp.compileNeg(op) 1516 case wazeroir.OperationKindCeil: 1517 err = cmp.compileCeil(op) 1518 case wazeroir.OperationKindFloor: 1519 err = cmp.compileFloor(op) 1520 case wazeroir.OperationKindTrunc: 1521 err = cmp.compileTrunc(op) 1522 case wazeroir.OperationKindNearest: 1523 err = cmp.compileNearest(op) 1524 case wazeroir.OperationKindSqrt: 1525 err = cmp.compileSqrt(op) 1526 case wazeroir.OperationKindMin: 1527 err = cmp.compileMin(op) 1528 case wazeroir.OperationKindMax: 1529 err = cmp.compileMax(op) 1530 case wazeroir.OperationKindCopysign: 1531 err = cmp.compileCopysign(op) 1532 case wazeroir.OperationKindI32WrapFromI64: 1533 err = cmp.compileI32WrapFromI64() 1534 case wazeroir.OperationKindITruncFromF: 1535 err = cmp.compileITruncFromF(op) 1536 case wazeroir.OperationKindFConvertFromI: 1537 err = cmp.compileFConvertFromI(op) 1538 case wazeroir.OperationKindF32DemoteFromF64: 1539 err = cmp.compileF32DemoteFromF64() 1540 case wazeroir.OperationKindF64PromoteFromF32: 1541 err = cmp.compileF64PromoteFromF32() 1542 case wazeroir.OperationKindI32ReinterpretFromF32: 1543 err = cmp.compileI32ReinterpretFromF32() 1544 case wazeroir.OperationKindI64ReinterpretFromF64: 1545 err = cmp.compileI64ReinterpretFromF64() 1546 case wazeroir.OperationKindF32ReinterpretFromI32: 1547 err = cmp.compileF32ReinterpretFromI32() 1548 case wazeroir.OperationKindF64ReinterpretFromI64: 1549 err = cmp.compileF64ReinterpretFromI64() 1550 case wazeroir.OperationKindExtend: 1551 err = cmp.compileExtend(op) 1552 case wazeroir.OperationKindSignExtend32From8: 1553 err = cmp.compileSignExtend32From8() 1554 case wazeroir.OperationKindSignExtend32From16: 1555 err = cmp.compileSignExtend32From16() 1556 case wazeroir.OperationKindSignExtend64From8: 1557 err = cmp.compileSignExtend64From8() 1558 case wazeroir.OperationKindSignExtend64From16: 1559 err = cmp.compileSignExtend64From16() 1560 case wazeroir.OperationKindSignExtend64From32: 1561 err = cmp.compileSignExtend64From32() 1562 case wazeroir.OperationKindMemoryInit: 1563 err = cmp.compileMemoryInit(op) 1564 case wazeroir.OperationKindDataDrop: 1565 err = cmp.compileDataDrop(op) 1566 case wazeroir.OperationKindMemoryCopy: 1567 err = cmp.compileMemoryCopy() 1568 case wazeroir.OperationKindMemoryFill: 1569 err = cmp.compileMemoryFill() 1570 case wazeroir.OperationKindTableInit: 1571 err = cmp.compileTableInit(op) 1572 case wazeroir.OperationKindElemDrop: 1573 err = cmp.compileElemDrop(op) 1574 case wazeroir.OperationKindTableCopy: 1575 err = cmp.compileTableCopy(op) 1576 case wazeroir.OperationKindRefFunc: 1577 err = cmp.compileRefFunc(op) 1578 case wazeroir.OperationKindTableGet: 1579 err = cmp.compileTableGet(op) 1580 case wazeroir.OperationKindTableSet: 1581 err = cmp.compileTableSet(op) 1582 case wazeroir.OperationKindTableGrow: 1583 err = cmp.compileTableGrow(op) 1584 case wazeroir.OperationKindTableSize: 1585 err = cmp.compileTableSize(op) 1586 case wazeroir.OperationKindTableFill: 1587 err = cmp.compileTableFill(op) 1588 case wazeroir.OperationKindV128Const: 1589 err = cmp.compileV128Const(op) 1590 case wazeroir.OperationKindV128Add: 1591 err = cmp.compileV128Add(op) 1592 case wazeroir.OperationKindV128Sub: 1593 err = cmp.compileV128Sub(op) 1594 case wazeroir.OperationKindV128Load: 1595 err = cmp.compileV128Load(op) 1596 case wazeroir.OperationKindV128LoadLane: 1597 err = cmp.compileV128LoadLane(op) 1598 case wazeroir.OperationKindV128Store: 1599 err = cmp.compileV128Store(op) 1600 case wazeroir.OperationKindV128StoreLane: 1601 err = cmp.compileV128StoreLane(op) 1602 case wazeroir.OperationKindV128ExtractLane: 1603 err = cmp.compileV128ExtractLane(op) 1604 case wazeroir.OperationKindV128ReplaceLane: 1605 err = cmp.compileV128ReplaceLane(op) 1606 case wazeroir.OperationKindV128Splat: 1607 err = cmp.compileV128Splat(op) 1608 case wazeroir.OperationKindV128Shuffle: 1609 err = cmp.compileV128Shuffle(op) 1610 case wazeroir.OperationKindV128Swizzle: 1611 err = cmp.compileV128Swizzle(op) 1612 case wazeroir.OperationKindV128AnyTrue: 1613 err = cmp.compileV128AnyTrue(op) 1614 case wazeroir.OperationKindV128AllTrue: 1615 err = cmp.compileV128AllTrue(op) 1616 case wazeroir.OperationKindV128BitMask: 1617 err = cmp.compileV128BitMask(op) 1618 case wazeroir.OperationKindV128And: 1619 err = cmp.compileV128And(op) 1620 case wazeroir.OperationKindV128Not: 1621 err = cmp.compileV128Not(op) 1622 case wazeroir.OperationKindV128Or: 1623 err = cmp.compileV128Or(op) 1624 case wazeroir.OperationKindV128Xor: 1625 err = cmp.compileV128Xor(op) 1626 case wazeroir.OperationKindV128Bitselect: 1627 err = cmp.compileV128Bitselect(op) 1628 case wazeroir.OperationKindV128AndNot: 1629 err = cmp.compileV128AndNot(op) 1630 case wazeroir.OperationKindV128Shl: 1631 err = cmp.compileV128Shl(op) 1632 case wazeroir.OperationKindV128Shr: 1633 err = cmp.compileV128Shr(op) 1634 case wazeroir.OperationKindV128Cmp: 1635 err = cmp.compileV128Cmp(op) 1636 case wazeroir.OperationKindV128AddSat: 1637 err = cmp.compileV128AddSat(op) 1638 case wazeroir.OperationKindV128SubSat: 1639 err = cmp.compileV128SubSat(op) 1640 case wazeroir.OperationKindV128Mul: 1641 err = cmp.compileV128Mul(op) 1642 case wazeroir.OperationKindV128Div: 1643 err = cmp.compileV128Div(op) 1644 case wazeroir.OperationKindV128Neg: 1645 err = cmp.compileV128Neg(op) 1646 case wazeroir.OperationKindV128Sqrt: 1647 err = cmp.compileV128Sqrt(op) 1648 case wazeroir.OperationKindV128Abs: 1649 err = cmp.compileV128Abs(op) 1650 case wazeroir.OperationKindV128Popcnt: 1651 err = cmp.compileV128Popcnt(op) 1652 case wazeroir.OperationKindV128Min: 1653 err = cmp.compileV128Min(op) 1654 case wazeroir.OperationKindV128Max: 1655 err = cmp.compileV128Max(op) 1656 case wazeroir.OperationKindV128AvgrU: 1657 err = cmp.compileV128AvgrU(op) 1658 case wazeroir.OperationKindV128Pmin: 1659 err = cmp.compileV128Pmin(op) 1660 case wazeroir.OperationKindV128Pmax: 1661 err = cmp.compileV128Pmax(op) 1662 case wazeroir.OperationKindV128Ceil: 1663 err = cmp.compileV128Ceil(op) 1664 case wazeroir.OperationKindV128Floor: 1665 err = cmp.compileV128Floor(op) 1666 case wazeroir.OperationKindV128Trunc: 1667 err = cmp.compileV128Trunc(op) 1668 case wazeroir.OperationKindV128Nearest: 1669 err = cmp.compileV128Nearest(op) 1670 case wazeroir.OperationKindV128Extend: 1671 err = cmp.compileV128Extend(op) 1672 case wazeroir.OperationKindV128ExtMul: 1673 err = cmp.compileV128ExtMul(op) 1674 case wazeroir.OperationKindV128Q15mulrSatS: 1675 err = cmp.compileV128Q15mulrSatS(op) 1676 case wazeroir.OperationKindV128ExtAddPairwise: 1677 err = cmp.compileV128ExtAddPairwise(op) 1678 case wazeroir.OperationKindV128FloatPromote: 1679 err = cmp.compileV128FloatPromote(op) 1680 case wazeroir.OperationKindV128FloatDemote: 1681 err = cmp.compileV128FloatDemote(op) 1682 case wazeroir.OperationKindV128FConvertFromI: 1683 err = cmp.compileV128FConvertFromI(op) 1684 case wazeroir.OperationKindV128Dot: 1685 err = cmp.compileV128Dot(op) 1686 case wazeroir.OperationKindV128Narrow: 1687 err = cmp.compileV128Narrow(op) 1688 case wazeroir.OperationKindV128ITruncSatFromF: 1689 err = cmp.compileV128ITruncSatFromF(op) 1690 case wazeroir.OperationKindBuiltinFunctionCheckExitCode: 1691 err = cmp.compileBuiltinFunctionCheckExitCode() 1692 case wazeroir.OperationKindAtomicLoad: 1693 err = cmp.compileAtomicLoad(op) 1694 case wazeroir.OperationKindAtomicLoad8: 1695 err = cmp.compileAtomicLoad8(op) 1696 case wazeroir.OperationKindAtomicLoad16: 1697 err = cmp.compileAtomicLoad16(op) 1698 case wazeroir.OperationKindAtomicStore: 1699 err = cmp.compileAtomicStore(op) 1700 case wazeroir.OperationKindAtomicStore8: 1701 err = cmp.compileAtomicStore8(op) 1702 case wazeroir.OperationKindAtomicStore16: 1703 err = cmp.compileAtomicStore16(op) 1704 case wazeroir.OperationKindAtomicRMW: 1705 err = cmp.compileAtomicRMW(op) 1706 case wazeroir.OperationKindAtomicRMW8: 1707 err = cmp.compileAtomicRMW8(op) 1708 case wazeroir.OperationKindAtomicRMW16: 1709 err = cmp.compileAtomicRMW16(op) 1710 case wazeroir.OperationKindAtomicRMWCmpxchg: 1711 err = cmp.compileAtomicRMWCmpxchg(op) 1712 case wazeroir.OperationKindAtomicRMW8Cmpxchg: 1713 err = cmp.compileAtomicRMW8Cmpxchg(op) 1714 case wazeroir.OperationKindAtomicRMW16Cmpxchg: 1715 err = cmp.compileAtomicRMW16Cmpxchg(op) 1716 case wazeroir.OperationKindAtomicMemoryWait: 1717 err = cmp.compileAtomicMemoryWait(op) 1718 case wazeroir.OperationKindAtomicMemoryNotify: 1719 err = cmp.compileAtomicMemoryNotify(op) 1720 case wazeroir.OperationKindAtomicFence: 1721 err = cmp.compileAtomicFence(op) 1722 default: 1723 err = errors.New("unsupported") 1724 } 1725 if err != nil { 1726 err = fmt.Errorf("operation %s: %w", op.Kind.String(), err) 1727 return 1728 } 1729 } 1730 1731 spCeil, err = cmp.compile(buf) 1732 if err != nil { 1733 err = fmt.Errorf("failed to compile: %w", err) 1734 return 1735 } 1736 1737 if needSourceOffsets { 1738 offsetInNativeBin := append(offsets.values[:0], make([]uint64, len(irOpBegins))...) 1739 offsets.values = offsetInNativeBin 1740 for i, nop := range irOpBegins { 1741 offsetInNativeBin[i] = nop.OffsetInBinary() 1742 } 1743 sm.irOperationOffsetsInNativeBinary = bitpack.NewOffsetArray(offsetInNativeBin) 1744 sm.irOperationSourceOffsetsInWasmBinary = bitpack.NewOffsetArray(ir.IROperationSourceOffsetsInWasmBinary) 1745 } 1746 return 1747 }