github.com/hikaru7719/go@v0.0.0-20181025140707-c8b2ac68906a/src/cmd/compile/internal/gc/ssa.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "bufio" 9 "bytes" 10 "encoding/binary" 11 "fmt" 12 "html" 13 "os" 14 "sort" 15 16 "cmd/compile/internal/ssa" 17 "cmd/compile/internal/types" 18 "cmd/internal/obj" 19 "cmd/internal/objabi" 20 "cmd/internal/src" 21 "cmd/internal/sys" 22 ) 23 24 var ssaConfig *ssa.Config 25 var ssaCaches []ssa.Cache 26 27 var ssaDump string // early copy of $GOSSAFUNC; the func name to dump output for 28 var ssaDumpStdout bool // whether to dump to stdout 29 const ssaDumpFile = "ssa.html" 30 31 // ssaDumpInlined holds all inlined functions when ssaDump contains a function name. 32 var ssaDumpInlined []*Node 33 34 func initssaconfig() { 35 types_ := ssa.NewTypes() 36 37 if thearch.SoftFloat { 38 softfloatInit() 39 } 40 41 // Generate a few pointer types that are uncommon in the frontend but common in the backend. 42 // Caching is disabled in the backend, so generating these here avoids allocations. 43 _ = types.NewPtr(types.Types[TINTER]) // *interface{} 44 _ = types.NewPtr(types.NewPtr(types.Types[TSTRING])) // **string 45 _ = types.NewPtr(types.NewPtr(types.Idealstring)) // **string 46 _ = types.NewPtr(types.NewSlice(types.Types[TINTER])) // *[]interface{} 47 _ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte 48 _ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte 49 _ = types.NewPtr(types.NewSlice(types.Types[TSTRING])) // *[]string 50 _ = types.NewPtr(types.NewSlice(types.Idealstring)) // *[]string 51 _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8 52 _ = types.NewPtr(types.Types[TINT16]) // *int16 53 _ = types.NewPtr(types.Types[TINT64]) // *int64 54 _ = types.NewPtr(types.Errortype) // *error 55 types.NewPtrCacheEnabled = false 56 ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, Ctxt, Debug['N'] == 0) 57 if thearch.LinkArch.Name == "386" { 58 ssaConfig.Set387(thearch.Use387) 59 } 60 ssaConfig.SoftFloat = thearch.SoftFloat 61 ssaConfig.Race = flag_race 62 ssaCaches = make([]ssa.Cache, nBackendWorkers) 63 64 // Set up some runtime functions we'll need to call. 65 assertE2I = sysfunc("assertE2I") 66 assertE2I2 = sysfunc("assertE2I2") 67 assertI2I = sysfunc("assertI2I") 68 assertI2I2 = sysfunc("assertI2I2") 69 deferproc = sysfunc("deferproc") 70 Deferreturn = sysfunc("deferreturn") 71 Duffcopy = sysfunc("duffcopy") 72 Duffzero = sysfunc("duffzero") 73 gcWriteBarrier = sysfunc("gcWriteBarrier") 74 goschedguarded = sysfunc("goschedguarded") 75 growslice = sysfunc("growslice") 76 msanread = sysfunc("msanread") 77 msanwrite = sysfunc("msanwrite") 78 newproc = sysfunc("newproc") 79 panicdivide = sysfunc("panicdivide") 80 panicdottypeE = sysfunc("panicdottypeE") 81 panicdottypeI = sysfunc("panicdottypeI") 82 panicindex = sysfunc("panicindex") 83 panicnildottype = sysfunc("panicnildottype") 84 panicslice = sysfunc("panicslice") 85 raceread = sysfunc("raceread") 86 racereadrange = sysfunc("racereadrange") 87 racewrite = sysfunc("racewrite") 88 racewriterange = sysfunc("racewriterange") 89 supportPopcnt = sysfunc("support_popcnt") 90 supportSSE41 = sysfunc("support_sse41") 91 arm64SupportAtomics = sysfunc("arm64_support_atomics") 92 typedmemclr = sysfunc("typedmemclr") 93 typedmemmove = sysfunc("typedmemmove") 94 Udiv = sysfunc("udiv") 95 writeBarrier = sysfunc("writeBarrier") 96 97 // GO386=387 runtime functions 98 ControlWord64trunc = sysfunc("controlWord64trunc") 99 ControlWord32 = sysfunc("controlWord32") 100 101 // Wasm 102 WasmMove = sysfunc("wasmMove") 103 WasmZero = sysfunc("wasmZero") 104 WasmDiv = sysfunc("wasmDiv") 105 WasmTruncS = sysfunc("wasmTruncS") 106 WasmTruncU = sysfunc("wasmTruncU") 107 SigPanic = sysfunc("sigpanic") 108 } 109 110 // buildssa builds an SSA function for fn. 111 // worker indicates which of the backend workers is doing the processing. 112 func buildssa(fn *Node, worker int) *ssa.Func { 113 name := fn.funcname() 114 printssa := name == ssaDump 115 var astBuf *bytes.Buffer 116 if printssa { 117 astBuf = &bytes.Buffer{} 118 fdumplist(astBuf, "buildssa-enter", fn.Func.Enter) 119 fdumplist(astBuf, "buildssa-body", fn.Nbody) 120 fdumplist(astBuf, "buildssa-exit", fn.Func.Exit) 121 if ssaDumpStdout { 122 fmt.Println("generating SSA for", name) 123 fmt.Print(astBuf.String()) 124 } 125 } 126 127 var s state 128 s.pushLine(fn.Pos) 129 defer s.popLine() 130 131 s.hasdefer = fn.Func.HasDefer() 132 if fn.Func.Pragma&CgoUnsafeArgs != 0 { 133 s.cgoUnsafeArgs = true 134 } 135 136 fe := ssafn{ 137 curfn: fn, 138 log: printssa && ssaDumpStdout, 139 } 140 s.curfn = fn 141 142 s.f = ssa.NewFunc(&fe) 143 s.config = ssaConfig 144 s.f.Type = fn.Type 145 s.f.Config = ssaConfig 146 s.f.Cache = &ssaCaches[worker] 147 s.f.Cache.Reset() 148 s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH", name) 149 s.f.Name = name 150 s.f.PrintOrHtmlSSA = printssa 151 if fn.Func.Pragma&Nosplit != 0 { 152 s.f.NoSplit = true 153 } 154 s.panics = map[funcLine]*ssa.Block{} 155 s.softFloat = s.config.SoftFloat 156 157 if printssa { 158 s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDumpFile, s.f.Frontend(), name) 159 // TODO: generate and print a mapping from nodes to values and blocks 160 dumpSourcesColumn(s.f.HTMLWriter, fn) 161 s.f.HTMLWriter.WriteAST("AST", astBuf) 162 } 163 164 // Allocate starting block 165 s.f.Entry = s.f.NewBlock(ssa.BlockPlain) 166 167 // Allocate starting values 168 s.labels = map[string]*ssaLabel{} 169 s.labeledNodes = map[*Node]*ssaLabel{} 170 s.fwdVars = map[*Node]*ssa.Value{} 171 s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem) 172 s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead 173 s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR]) 174 175 s.startBlock(s.f.Entry) 176 s.vars[&memVar] = s.startmem 177 178 // Generate addresses of local declarations 179 s.decladdrs = map[*Node]*ssa.Value{} 180 for _, n := range fn.Func.Dcl { 181 switch n.Class() { 182 case PPARAM, PPARAMOUT: 183 s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem) 184 if n.Class() == PPARAMOUT && s.canSSA(n) { 185 // Save ssa-able PPARAMOUT variables so we can 186 // store them back to the stack at the end of 187 // the function. 188 s.returns = append(s.returns, n) 189 } 190 case PAUTO: 191 // processed at each use, to prevent Addr coming 192 // before the decl. 193 case PAUTOHEAP: 194 // moved to heap - already handled by frontend 195 case PFUNC: 196 // local function - already handled by frontend 197 default: 198 s.Fatalf("local variable with class %v unimplemented", n.Class()) 199 } 200 } 201 202 // Populate SSAable arguments. 203 for _, n := range fn.Func.Dcl { 204 if n.Class() == PPARAM && s.canSSA(n) { 205 v := s.newValue0A(ssa.OpArg, n.Type, n) 206 s.vars[n] = v 207 s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself. 208 } 209 } 210 211 // Convert the AST-based IR to the SSA-based IR 212 s.stmtList(fn.Func.Enter) 213 s.stmtList(fn.Nbody) 214 215 // fallthrough to exit 216 if s.curBlock != nil { 217 s.pushLine(fn.Func.Endlineno) 218 s.exit() 219 s.popLine() 220 } 221 222 for _, b := range s.f.Blocks { 223 if b.Pos != src.NoXPos { 224 s.updateUnsetPredPos(b) 225 } 226 } 227 228 s.insertPhis() 229 230 // Main call to ssa package to compile function 231 ssa.Compile(s.f) 232 return s.f 233 } 234 235 func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *Node) { 236 // Read sources of target function fn. 237 fname := Ctxt.PosTable.Pos(fn.Pos).Filename() 238 targetFn, err := readFuncLines(fname, fn.Pos.Line(), fn.Func.Endlineno.Line()) 239 if err != nil { 240 writer.Logger.Logf("cannot read sources for function %v: %v", fn, err) 241 } 242 243 // Read sources of inlined functions. 244 var inlFns []*ssa.FuncLines 245 for _, fi := range ssaDumpInlined { 246 var elno src.XPos 247 if fi.Name.Defn == nil { 248 // Endlineno is filled from exported data. 249 elno = fi.Func.Endlineno 250 } else { 251 elno = fi.Name.Defn.Func.Endlineno 252 } 253 fname := Ctxt.PosTable.Pos(fi.Pos).Filename() 254 fnLines, err := readFuncLines(fname, fi.Pos.Line(), elno.Line()) 255 if err != nil { 256 writer.Logger.Logf("cannot read sources for function %v: %v", fi, err) 257 continue 258 } 259 inlFns = append(inlFns, fnLines) 260 } 261 262 sort.Sort(ssa.ByTopo(inlFns)) 263 if targetFn != nil { 264 inlFns = append([]*ssa.FuncLines{targetFn}, inlFns...) 265 } 266 267 writer.WriteSources("sources", inlFns) 268 } 269 270 func readFuncLines(file string, start, end uint) (*ssa.FuncLines, error) { 271 f, err := os.Open(os.ExpandEnv(file)) 272 if err != nil { 273 return nil, err 274 } 275 defer f.Close() 276 var lines []string 277 ln := uint(1) 278 scanner := bufio.NewScanner(f) 279 for scanner.Scan() && ln <= end { 280 if ln >= start { 281 lines = append(lines, scanner.Text()) 282 } 283 ln++ 284 } 285 return &ssa.FuncLines{Filename: file, StartLineno: start, Lines: lines}, nil 286 } 287 288 // updateUnsetPredPos propagates the earliest-value position information for b 289 // towards all of b's predecessors that need a position, and recurs on that 290 // predecessor if its position is updated. B should have a non-empty position. 291 func (s *state) updateUnsetPredPos(b *ssa.Block) { 292 if b.Pos == src.NoXPos { 293 s.Fatalf("Block %s should have a position", b) 294 } 295 bestPos := src.NoXPos 296 for _, e := range b.Preds { 297 p := e.Block() 298 if !p.LackingPos() { 299 continue 300 } 301 if bestPos == src.NoXPos { 302 bestPos = b.Pos 303 for _, v := range b.Values { 304 if v.LackingPos() { 305 continue 306 } 307 if v.Pos != src.NoXPos { 308 // Assume values are still in roughly textual order; 309 // TODO: could also seek minimum position? 310 bestPos = v.Pos 311 break 312 } 313 } 314 } 315 p.Pos = bestPos 316 s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay. 317 } 318 } 319 320 type state struct { 321 // configuration (arch) information 322 config *ssa.Config 323 324 // function we're building 325 f *ssa.Func 326 327 // Node for function 328 curfn *Node 329 330 // labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f 331 labels map[string]*ssaLabel 332 labeledNodes map[*Node]*ssaLabel 333 334 // unlabeled break and continue statement tracking 335 breakTo *ssa.Block // current target for plain break statement 336 continueTo *ssa.Block // current target for plain continue statement 337 338 // current location where we're interpreting the AST 339 curBlock *ssa.Block 340 341 // variable assignments in the current block (map from variable symbol to ssa value) 342 // *Node is the unique identifier (an ONAME Node) for the variable. 343 // TODO: keep a single varnum map, then make all of these maps slices instead? 344 vars map[*Node]*ssa.Value 345 346 // fwdVars are variables that are used before they are defined in the current block. 347 // This map exists just to coalesce multiple references into a single FwdRef op. 348 // *Node is the unique identifier (an ONAME Node) for the variable. 349 fwdVars map[*Node]*ssa.Value 350 351 // all defined variables at the end of each block. Indexed by block ID. 352 defvars []map[*Node]*ssa.Value 353 354 // addresses of PPARAM and PPARAMOUT variables. 355 decladdrs map[*Node]*ssa.Value 356 357 // starting values. Memory, stack pointer, and globals pointer 358 startmem *ssa.Value 359 sp *ssa.Value 360 sb *ssa.Value 361 362 // line number stack. The current line number is top of stack 363 line []src.XPos 364 // the last line number processed; it may have been popped 365 lastPos src.XPos 366 367 // list of panic calls by function name and line number. 368 // Used to deduplicate panic calls. 369 panics map[funcLine]*ssa.Block 370 371 // list of PPARAMOUT (return) variables. 372 returns []*Node 373 374 cgoUnsafeArgs bool 375 hasdefer bool // whether the function contains a defer statement 376 softFloat bool 377 } 378 379 type funcLine struct { 380 f *obj.LSym 381 base *src.PosBase 382 line uint 383 } 384 385 type ssaLabel struct { 386 target *ssa.Block // block identified by this label 387 breakTarget *ssa.Block // block to break to in control flow node identified by this label 388 continueTarget *ssa.Block // block to continue to in control flow node identified by this label 389 } 390 391 // label returns the label associated with sym, creating it if necessary. 392 func (s *state) label(sym *types.Sym) *ssaLabel { 393 lab := s.labels[sym.Name] 394 if lab == nil { 395 lab = new(ssaLabel) 396 s.labels[sym.Name] = lab 397 } 398 return lab 399 } 400 401 func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) } 402 func (s *state) Log() bool { return s.f.Log() } 403 func (s *state) Fatalf(msg string, args ...interface{}) { 404 s.f.Frontend().Fatalf(s.peekPos(), msg, args...) 405 } 406 func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) } 407 func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() } 408 409 var ( 410 // dummy node for the memory variable 411 memVar = Node{Op: ONAME, Sym: &types.Sym{Name: "mem"}} 412 413 // dummy nodes for temporary variables 414 ptrVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}} 415 lenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}} 416 newlenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}} 417 capVar = Node{Op: ONAME, Sym: &types.Sym{Name: "cap"}} 418 typVar = Node{Op: ONAME, Sym: &types.Sym{Name: "typ"}} 419 okVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ok"}} 420 ) 421 422 // startBlock sets the current block we're generating code in to b. 423 func (s *state) startBlock(b *ssa.Block) { 424 if s.curBlock != nil { 425 s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock) 426 } 427 s.curBlock = b 428 s.vars = map[*Node]*ssa.Value{} 429 for n := range s.fwdVars { 430 delete(s.fwdVars, n) 431 } 432 } 433 434 // endBlock marks the end of generating code for the current block. 435 // Returns the (former) current block. Returns nil if there is no current 436 // block, i.e. if no code flows to the current execution point. 437 func (s *state) endBlock() *ssa.Block { 438 b := s.curBlock 439 if b == nil { 440 return nil 441 } 442 for len(s.defvars) <= int(b.ID) { 443 s.defvars = append(s.defvars, nil) 444 } 445 s.defvars[b.ID] = s.vars 446 s.curBlock = nil 447 s.vars = nil 448 if b.LackingPos() { 449 // Empty plain blocks get the line of their successor (handled after all blocks created), 450 // except for increment blocks in For statements (handled in ssa conversion of OFOR), 451 // and for blocks ending in GOTO/BREAK/CONTINUE. 452 b.Pos = src.NoXPos 453 } else { 454 b.Pos = s.lastPos 455 } 456 return b 457 } 458 459 // pushLine pushes a line number on the line number stack. 460 func (s *state) pushLine(line src.XPos) { 461 if !line.IsKnown() { 462 // the frontend may emit node with line number missing, 463 // use the parent line number in this case. 464 line = s.peekPos() 465 if Debug['K'] != 0 { 466 Warn("buildssa: unknown position (line 0)") 467 } 468 } else { 469 s.lastPos = line 470 } 471 472 s.line = append(s.line, line) 473 } 474 475 // popLine pops the top of the line number stack. 476 func (s *state) popLine() { 477 s.line = s.line[:len(s.line)-1] 478 } 479 480 // peekPos peeks the top of the line number stack. 481 func (s *state) peekPos() src.XPos { 482 return s.line[len(s.line)-1] 483 } 484 485 // newValue0 adds a new value with no arguments to the current block. 486 func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value { 487 return s.curBlock.NewValue0(s.peekPos(), op, t) 488 } 489 490 // newValue0A adds a new value with no arguments and an aux value to the current block. 491 func (s *state) newValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value { 492 return s.curBlock.NewValue0A(s.peekPos(), op, t, aux) 493 } 494 495 // newValue0I adds a new value with no arguments and an auxint value to the current block. 496 func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value { 497 return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint) 498 } 499 500 // newValue1 adds a new value with one argument to the current block. 501 func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { 502 return s.curBlock.NewValue1(s.peekPos(), op, t, arg) 503 } 504 505 // newValue1A adds a new value with one argument and an aux value to the current block. 506 func (s *state) newValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 507 return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg) 508 } 509 510 // newValue1Apos adds a new value with one argument and an aux value to the current block. 511 // isStmt determines whether the created values may be a statement or not 512 // (i.e., false means never, yes means maybe). 513 func (s *state) newValue1Apos(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value, isStmt bool) *ssa.Value { 514 if isStmt { 515 return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg) 516 } 517 return s.curBlock.NewValue1A(s.peekPos().WithNotStmt(), op, t, aux, arg) 518 } 519 520 // newValue1I adds a new value with one argument and an auxint value to the current block. 521 func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value { 522 return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg) 523 } 524 525 // newValue2 adds a new value with two arguments to the current block. 526 func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value { 527 return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1) 528 } 529 530 // newValue2Apos adds a new value with two arguments and an aux value to the current block. 531 // isStmt determines whether the created values may be a statement or not 532 // (i.e., false means never, yes means maybe). 533 func (s *state) newValue2Apos(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value, isStmt bool) *ssa.Value { 534 if isStmt { 535 return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1) 536 } 537 return s.curBlock.NewValue2A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1) 538 } 539 540 // newValue2I adds a new value with two arguments and an auxint value to the current block. 541 func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value { 542 return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1) 543 } 544 545 // newValue3 adds a new value with three arguments to the current block. 546 func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 547 return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2) 548 } 549 550 // newValue3I adds a new value with three arguments and an auxint value to the current block. 551 func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 552 return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2) 553 } 554 555 // newValue3A adds a new value with three arguments and an aux value to the current block. 556 func (s *state) newValue3A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 557 return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2) 558 } 559 560 // newValue3Apos adds a new value with three arguments and an aux value to the current block. 561 // isStmt determines whether the created values may be a statement or not 562 // (i.e., false means never, yes means maybe). 563 func (s *state) newValue3Apos(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value, isStmt bool) *ssa.Value { 564 if isStmt { 565 return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2) 566 } 567 return s.curBlock.NewValue3A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1, arg2) 568 } 569 570 // newValue4 adds a new value with four arguments to the current block. 571 func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value { 572 return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3) 573 } 574 575 // entryNewValue0 adds a new value with no arguments to the entry block. 576 func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value { 577 return s.f.Entry.NewValue0(src.NoXPos, op, t) 578 } 579 580 // entryNewValue0A adds a new value with no arguments and an aux value to the entry block. 581 func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value { 582 return s.f.Entry.NewValue0A(src.NoXPos, op, t, aux) 583 } 584 585 // entryNewValue1 adds a new value with one argument to the entry block. 586 func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { 587 return s.f.Entry.NewValue1(src.NoXPos, op, t, arg) 588 } 589 590 // entryNewValue1 adds a new value with one argument and an auxint value to the entry block. 591 func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value { 592 return s.f.Entry.NewValue1I(src.NoXPos, op, t, auxint, arg) 593 } 594 595 // entryNewValue1A adds a new value with one argument and an aux value to the entry block. 596 func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 597 return s.f.Entry.NewValue1A(src.NoXPos, op, t, aux, arg) 598 } 599 600 // entryNewValue2 adds a new value with two arguments to the entry block. 601 func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value { 602 return s.f.Entry.NewValue2(src.NoXPos, op, t, arg0, arg1) 603 } 604 605 // entryNewValue2A adds a new value with two arguments and an aux value to the entry block. 606 func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value) *ssa.Value { 607 return s.f.Entry.NewValue2A(src.NoXPos, op, t, aux, arg0, arg1) 608 } 609 610 // const* routines add a new const value to the entry block. 611 func (s *state) constSlice(t *types.Type) *ssa.Value { 612 return s.f.ConstSlice(t) 613 } 614 func (s *state) constInterface(t *types.Type) *ssa.Value { 615 return s.f.ConstInterface(t) 616 } 617 func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(t) } 618 func (s *state) constEmptyString(t *types.Type) *ssa.Value { 619 return s.f.ConstEmptyString(t) 620 } 621 func (s *state) constBool(c bool) *ssa.Value { 622 return s.f.ConstBool(types.Types[TBOOL], c) 623 } 624 func (s *state) constInt8(t *types.Type, c int8) *ssa.Value { 625 return s.f.ConstInt8(t, c) 626 } 627 func (s *state) constInt16(t *types.Type, c int16) *ssa.Value { 628 return s.f.ConstInt16(t, c) 629 } 630 func (s *state) constInt32(t *types.Type, c int32) *ssa.Value { 631 return s.f.ConstInt32(t, c) 632 } 633 func (s *state) constInt64(t *types.Type, c int64) *ssa.Value { 634 return s.f.ConstInt64(t, c) 635 } 636 func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value { 637 return s.f.ConstFloat32(t, c) 638 } 639 func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value { 640 return s.f.ConstFloat64(t, c) 641 } 642 func (s *state) constInt(t *types.Type, c int64) *ssa.Value { 643 if s.config.PtrSize == 8 { 644 return s.constInt64(t, c) 645 } 646 if int64(int32(c)) != c { 647 s.Fatalf("integer constant too big %d", c) 648 } 649 return s.constInt32(t, int32(c)) 650 } 651 func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value { 652 return s.f.ConstOffPtrSP(t, c, s.sp) 653 } 654 655 // newValueOrSfCall* are wrappers around newValue*, which may create a call to a 656 // soft-float runtime function instead (when emitting soft-float code). 657 func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { 658 if s.softFloat { 659 if c, ok := s.sfcall(op, arg); ok { 660 return c 661 } 662 } 663 return s.newValue1(op, t, arg) 664 } 665 func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value { 666 if s.softFloat { 667 if c, ok := s.sfcall(op, arg0, arg1); ok { 668 return c 669 } 670 } 671 return s.newValue2(op, t, arg0, arg1) 672 } 673 674 func (s *state) instrument(t *types.Type, addr *ssa.Value, wr bool) { 675 if !s.curfn.Func.InstrumentBody() { 676 return 677 } 678 679 w := t.Size() 680 if w == 0 { 681 return // can't race on zero-sized things 682 } 683 684 if ssa.IsSanitizerSafeAddr(addr) { 685 return 686 } 687 688 var fn *obj.LSym 689 needWidth := false 690 691 if flag_msan { 692 fn = msanread 693 if wr { 694 fn = msanwrite 695 } 696 needWidth = true 697 } else if flag_race && t.NumComponents(types.CountBlankFields) > 1 { 698 // for composite objects we have to write every address 699 // because a write might happen to any subobject. 700 // composites with only one element don't have subobjects, though. 701 fn = racereadrange 702 if wr { 703 fn = racewriterange 704 } 705 needWidth = true 706 } else if flag_race { 707 // for non-composite objects we can write just the start 708 // address, as any write must write the first byte. 709 fn = raceread 710 if wr { 711 fn = racewrite 712 } 713 } else { 714 panic("unreachable") 715 } 716 717 args := []*ssa.Value{addr} 718 if needWidth { 719 args = append(args, s.constInt(types.Types[TUINTPTR], w)) 720 } 721 s.rtcall(fn, true, nil, args...) 722 } 723 724 func (s *state) load(t *types.Type, src *ssa.Value) *ssa.Value { 725 s.instrument(t, src, false) 726 return s.rawLoad(t, src) 727 } 728 729 func (s *state) rawLoad(t *types.Type, src *ssa.Value) *ssa.Value { 730 return s.newValue2(ssa.OpLoad, t, src, s.mem()) 731 } 732 733 func (s *state) store(t *types.Type, dst, val *ssa.Value) { 734 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem()) 735 } 736 737 func (s *state) zero(t *types.Type, dst *ssa.Value) { 738 s.instrument(t, dst, true) 739 store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem()) 740 store.Aux = t 741 s.vars[&memVar] = store 742 } 743 744 func (s *state) move(t *types.Type, dst, src *ssa.Value) { 745 s.instrument(t, src, false) 746 s.instrument(t, dst, true) 747 store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem()) 748 store.Aux = t 749 s.vars[&memVar] = store 750 } 751 752 // stmtList converts the statement list n to SSA and adds it to s. 753 func (s *state) stmtList(l Nodes) { 754 for _, n := range l.Slice() { 755 s.stmt(n) 756 } 757 } 758 759 // stmt converts the statement n to SSA and adds it to s. 760 func (s *state) stmt(n *Node) { 761 if !(n.Op == OVARKILL || n.Op == OVARLIVE || n.Op == OVARDEF) { 762 // OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging. 763 s.pushLine(n.Pos) 764 defer s.popLine() 765 } 766 767 // If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere), 768 // then this code is dead. Stop here. 769 if s.curBlock == nil && n.Op != OLABEL { 770 return 771 } 772 773 s.stmtList(n.Ninit) 774 switch n.Op { 775 776 case OBLOCK: 777 s.stmtList(n.List) 778 779 // No-ops 780 case OEMPTY, ODCLCONST, ODCLTYPE, OFALL: 781 782 // Expression statements 783 case OCALLFUNC: 784 if isIntrinsicCall(n) { 785 s.intrinsicCall(n) 786 return 787 } 788 fallthrough 789 790 case OCALLMETH, OCALLINTER: 791 s.call(n, callNormal) 792 if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class() == PFUNC { 793 if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" || 794 n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") { 795 m := s.mem() 796 b := s.endBlock() 797 b.Kind = ssa.BlockExit 798 b.SetControl(m) 799 // TODO: never rewrite OPANIC to OCALLFUNC in the 800 // first place. Need to wait until all backends 801 // go through SSA. 802 } 803 } 804 case ODEFER: 805 s.call(n.Left, callDefer) 806 case OPROC: 807 s.call(n.Left, callGo) 808 809 case OAS2DOTTYPE: 810 res, resok := s.dottype(n.Rlist.First(), true) 811 deref := false 812 if !canSSAType(n.Rlist.First().Type) { 813 if res.Op != ssa.OpLoad { 814 s.Fatalf("dottype of non-load") 815 } 816 mem := s.mem() 817 if mem.Op == ssa.OpVarKill { 818 mem = mem.Args[0] 819 } 820 if res.Args[1] != mem { 821 s.Fatalf("memory no longer live from 2-result dottype load") 822 } 823 deref = true 824 res = res.Args[0] 825 } 826 s.assign(n.List.First(), res, deref, 0) 827 s.assign(n.List.Second(), resok, false, 0) 828 return 829 830 case OAS2FUNC: 831 // We come here only when it is an intrinsic call returning two values. 832 if !isIntrinsicCall(n.Rlist.First()) { 833 s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Rlist.First()) 834 } 835 v := s.intrinsicCall(n.Rlist.First()) 836 v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v) 837 v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v) 838 s.assign(n.List.First(), v1, false, 0) 839 s.assign(n.List.Second(), v2, false, 0) 840 return 841 842 case ODCL: 843 if n.Left.Class() == PAUTOHEAP { 844 Fatalf("DCL %v", n) 845 } 846 847 case OLABEL: 848 sym := n.Left.Sym 849 lab := s.label(sym) 850 851 // Associate label with its control flow node, if any 852 if ctl := n.labeledControl(); ctl != nil { 853 s.labeledNodes[ctl] = lab 854 } 855 856 // The label might already have a target block via a goto. 857 if lab.target == nil { 858 lab.target = s.f.NewBlock(ssa.BlockPlain) 859 } 860 861 // Go to that label. 862 // (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.) 863 if s.curBlock != nil { 864 b := s.endBlock() 865 b.AddEdgeTo(lab.target) 866 } 867 s.startBlock(lab.target) 868 869 case OGOTO: 870 sym := n.Left.Sym 871 872 lab := s.label(sym) 873 if lab.target == nil { 874 lab.target = s.f.NewBlock(ssa.BlockPlain) 875 } 876 877 b := s.endBlock() 878 b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block. 879 b.AddEdgeTo(lab.target) 880 881 case OAS: 882 if n.Left == n.Right && n.Left.Op == ONAME { 883 // An x=x assignment. No point in doing anything 884 // here. In addition, skipping this assignment 885 // prevents generating: 886 // VARDEF x 887 // COPY x -> x 888 // which is bad because x is incorrectly considered 889 // dead before the vardef. See issue #14904. 890 return 891 } 892 893 // Evaluate RHS. 894 rhs := n.Right 895 if rhs != nil { 896 switch rhs.Op { 897 case OSTRUCTLIT, OARRAYLIT, OSLICELIT: 898 // All literals with nonzero fields have already been 899 // rewritten during walk. Any that remain are just T{} 900 // or equivalents. Use the zero value. 901 if !isZero(rhs) { 902 Fatalf("literal with nonzero value in SSA: %v", rhs) 903 } 904 rhs = nil 905 case OAPPEND: 906 // Check whether we're writing the result of an append back to the same slice. 907 // If so, we handle it specially to avoid write barriers on the fast 908 // (non-growth) path. 909 if !samesafeexpr(n.Left, rhs.List.First()) || Debug['N'] != 0 { 910 break 911 } 912 // If the slice can be SSA'd, it'll be on the stack, 913 // so there will be no write barriers, 914 // so there's no need to attempt to prevent them. 915 if s.canSSA(n.Left) { 916 if Debug_append > 0 { // replicating old diagnostic message 917 Warnl(n.Pos, "append: len-only update (in local slice)") 918 } 919 break 920 } 921 if Debug_append > 0 { 922 Warnl(n.Pos, "append: len-only update") 923 } 924 s.append(rhs, true) 925 return 926 } 927 } 928 929 if n.Left.isBlank() { 930 // _ = rhs 931 // Just evaluate rhs for side-effects. 932 if rhs != nil { 933 s.expr(rhs) 934 } 935 return 936 } 937 938 var t *types.Type 939 if n.Right != nil { 940 t = n.Right.Type 941 } else { 942 t = n.Left.Type 943 } 944 945 var r *ssa.Value 946 deref := !canSSAType(t) 947 if deref { 948 if rhs == nil { 949 r = nil // Signal assign to use OpZero. 950 } else { 951 r = s.addr(rhs, false) 952 } 953 } else { 954 if rhs == nil { 955 r = s.zeroVal(t) 956 } else { 957 r = s.expr(rhs) 958 } 959 } 960 961 var skip skipMask 962 if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) { 963 // We're assigning a slicing operation back to its source. 964 // Don't write back fields we aren't changing. See issue #14855. 965 i, j, k := rhs.SliceBounds() 966 if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) { 967 // [0:...] is the same as [:...] 968 i = nil 969 } 970 // TODO: detect defaults for len/cap also. 971 // Currently doesn't really work because (*p)[:len(*p)] appears here as: 972 // tmp = len(*p) 973 // (*p)[:tmp] 974 //if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) { 975 // j = nil 976 //} 977 //if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) { 978 // k = nil 979 //} 980 if i == nil { 981 skip |= skipPtr 982 if j == nil { 983 skip |= skipLen 984 } 985 if k == nil { 986 skip |= skipCap 987 } 988 } 989 } 990 991 s.assign(n.Left, r, deref, skip) 992 993 case OIF: 994 bThen := s.f.NewBlock(ssa.BlockPlain) 995 bEnd := s.f.NewBlock(ssa.BlockPlain) 996 var bElse *ssa.Block 997 var likely int8 998 if n.Likely() { 999 likely = 1 1000 } 1001 if n.Rlist.Len() != 0 { 1002 bElse = s.f.NewBlock(ssa.BlockPlain) 1003 s.condBranch(n.Left, bThen, bElse, likely) 1004 } else { 1005 s.condBranch(n.Left, bThen, bEnd, likely) 1006 } 1007 1008 s.startBlock(bThen) 1009 s.stmtList(n.Nbody) 1010 if b := s.endBlock(); b != nil { 1011 b.AddEdgeTo(bEnd) 1012 } 1013 1014 if n.Rlist.Len() != 0 { 1015 s.startBlock(bElse) 1016 s.stmtList(n.Rlist) 1017 if b := s.endBlock(); b != nil { 1018 b.AddEdgeTo(bEnd) 1019 } 1020 } 1021 s.startBlock(bEnd) 1022 1023 case ORETURN: 1024 s.stmtList(n.List) 1025 b := s.exit() 1026 b.Pos = s.lastPos.WithIsStmt() 1027 1028 case ORETJMP: 1029 s.stmtList(n.List) 1030 b := s.exit() 1031 b.Kind = ssa.BlockRetJmp // override BlockRet 1032 b.Aux = n.Sym.Linksym() 1033 1034 case OCONTINUE, OBREAK: 1035 var to *ssa.Block 1036 if n.Left == nil { 1037 // plain break/continue 1038 switch n.Op { 1039 case OCONTINUE: 1040 to = s.continueTo 1041 case OBREAK: 1042 to = s.breakTo 1043 } 1044 } else { 1045 // labeled break/continue; look up the target 1046 sym := n.Left.Sym 1047 lab := s.label(sym) 1048 switch n.Op { 1049 case OCONTINUE: 1050 to = lab.continueTarget 1051 case OBREAK: 1052 to = lab.breakTarget 1053 } 1054 } 1055 1056 b := s.endBlock() 1057 b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block. 1058 b.AddEdgeTo(to) 1059 1060 case OFOR, OFORUNTIL: 1061 // OFOR: for Ninit; Left; Right { Nbody } 1062 // cond (Left); body (Nbody); incr (Right) 1063 // 1064 // OFORUNTIL: for Ninit; Left; Right; List { Nbody } 1065 // => body: { Nbody }; incr: Right; if Left { lateincr: List; goto body }; end: 1066 bCond := s.f.NewBlock(ssa.BlockPlain) 1067 bBody := s.f.NewBlock(ssa.BlockPlain) 1068 bIncr := s.f.NewBlock(ssa.BlockPlain) 1069 bEnd := s.f.NewBlock(ssa.BlockPlain) 1070 1071 // first, jump to condition test (OFOR) or body (OFORUNTIL) 1072 b := s.endBlock() 1073 if n.Op == OFOR { 1074 b.AddEdgeTo(bCond) 1075 // generate code to test condition 1076 s.startBlock(bCond) 1077 if n.Left != nil { 1078 s.condBranch(n.Left, bBody, bEnd, 1) 1079 } else { 1080 b := s.endBlock() 1081 b.Kind = ssa.BlockPlain 1082 b.AddEdgeTo(bBody) 1083 } 1084 1085 } else { 1086 b.AddEdgeTo(bBody) 1087 } 1088 1089 // set up for continue/break in body 1090 prevContinue := s.continueTo 1091 prevBreak := s.breakTo 1092 s.continueTo = bIncr 1093 s.breakTo = bEnd 1094 lab := s.labeledNodes[n] 1095 if lab != nil { 1096 // labeled for loop 1097 lab.continueTarget = bIncr 1098 lab.breakTarget = bEnd 1099 } 1100 1101 // generate body 1102 s.startBlock(bBody) 1103 s.stmtList(n.Nbody) 1104 1105 // tear down continue/break 1106 s.continueTo = prevContinue 1107 s.breakTo = prevBreak 1108 if lab != nil { 1109 lab.continueTarget = nil 1110 lab.breakTarget = nil 1111 } 1112 1113 // done with body, goto incr 1114 if b := s.endBlock(); b != nil { 1115 b.AddEdgeTo(bIncr) 1116 } 1117 1118 // generate incr (and, for OFORUNTIL, condition) 1119 s.startBlock(bIncr) 1120 if n.Right != nil { 1121 s.stmt(n.Right) 1122 } 1123 if n.Op == OFOR { 1124 if b := s.endBlock(); b != nil { 1125 b.AddEdgeTo(bCond) 1126 // It can happen that bIncr ends in a block containing only VARKILL, 1127 // and that muddles the debugging experience. 1128 if n.Op != OFORUNTIL && b.Pos == src.NoXPos { 1129 b.Pos = bCond.Pos 1130 } 1131 } 1132 } else { 1133 // bCond is unused in OFORUNTIL, so repurpose it. 1134 bLateIncr := bCond 1135 // test condition 1136 s.condBranch(n.Left, bLateIncr, bEnd, 1) 1137 // generate late increment 1138 s.startBlock(bLateIncr) 1139 s.stmtList(n.List) 1140 s.endBlock().AddEdgeTo(bBody) 1141 } 1142 1143 s.startBlock(bEnd) 1144 1145 case OSWITCH, OSELECT: 1146 // These have been mostly rewritten by the front end into their Nbody fields. 1147 // Our main task is to correctly hook up any break statements. 1148 bEnd := s.f.NewBlock(ssa.BlockPlain) 1149 1150 prevBreak := s.breakTo 1151 s.breakTo = bEnd 1152 lab := s.labeledNodes[n] 1153 if lab != nil { 1154 // labeled 1155 lab.breakTarget = bEnd 1156 } 1157 1158 // generate body code 1159 s.stmtList(n.Nbody) 1160 1161 s.breakTo = prevBreak 1162 if lab != nil { 1163 lab.breakTarget = nil 1164 } 1165 1166 // walk adds explicit OBREAK nodes to the end of all reachable code paths. 1167 // If we still have a current block here, then mark it unreachable. 1168 if s.curBlock != nil { 1169 m := s.mem() 1170 b := s.endBlock() 1171 b.Kind = ssa.BlockExit 1172 b.SetControl(m) 1173 } 1174 s.startBlock(bEnd) 1175 1176 case OVARDEF: 1177 if !s.canSSA(n.Left) { 1178 s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left, s.mem(), false) 1179 } 1180 case OVARKILL: 1181 // Insert a varkill op to record that a variable is no longer live. 1182 // We only care about liveness info at call sites, so putting the 1183 // varkill in the store chain is enough to keep it correctly ordered 1184 // with respect to call ops. 1185 if !s.canSSA(n.Left) { 1186 s.vars[&memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left, s.mem(), false) 1187 } 1188 1189 case OVARLIVE: 1190 // Insert a varlive op to record that a variable is still live. 1191 if !n.Left.Addrtaken() { 1192 s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left) 1193 } 1194 switch n.Left.Class() { 1195 case PAUTO, PPARAM, PPARAMOUT: 1196 default: 1197 s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left) 1198 } 1199 s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem()) 1200 1201 case OCHECKNIL: 1202 p := s.expr(n.Left) 1203 s.nilCheck(p) 1204 1205 default: 1206 s.Fatalf("unhandled stmt %v", n.Op) 1207 } 1208 } 1209 1210 // exit processes any code that needs to be generated just before returning. 1211 // It returns a BlockRet block that ends the control flow. Its control value 1212 // will be set to the final memory state. 1213 func (s *state) exit() *ssa.Block { 1214 if s.hasdefer { 1215 s.rtcall(Deferreturn, true, nil) 1216 } 1217 1218 // Run exit code. Typically, this code copies heap-allocated PPARAMOUT 1219 // variables back to the stack. 1220 s.stmtList(s.curfn.Func.Exit) 1221 1222 // Store SSAable PPARAMOUT variables back to stack locations. 1223 for _, n := range s.returns { 1224 addr := s.decladdrs[n] 1225 val := s.variable(n, n.Type) 1226 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) 1227 s.store(n.Type, addr, val) 1228 // TODO: if val is ever spilled, we'd like to use the 1229 // PPARAMOUT slot for spilling it. That won't happen 1230 // currently. 1231 } 1232 1233 // Do actual return. 1234 m := s.mem() 1235 b := s.endBlock() 1236 b.Kind = ssa.BlockRet 1237 b.SetControl(m) 1238 return b 1239 } 1240 1241 type opAndType struct { 1242 op Op 1243 etype types.EType 1244 } 1245 1246 var opToSSA = map[opAndType]ssa.Op{ 1247 opAndType{OADD, TINT8}: ssa.OpAdd8, 1248 opAndType{OADD, TUINT8}: ssa.OpAdd8, 1249 opAndType{OADD, TINT16}: ssa.OpAdd16, 1250 opAndType{OADD, TUINT16}: ssa.OpAdd16, 1251 opAndType{OADD, TINT32}: ssa.OpAdd32, 1252 opAndType{OADD, TUINT32}: ssa.OpAdd32, 1253 opAndType{OADD, TINT64}: ssa.OpAdd64, 1254 opAndType{OADD, TUINT64}: ssa.OpAdd64, 1255 opAndType{OADD, TFLOAT32}: ssa.OpAdd32F, 1256 opAndType{OADD, TFLOAT64}: ssa.OpAdd64F, 1257 1258 opAndType{OSUB, TINT8}: ssa.OpSub8, 1259 opAndType{OSUB, TUINT8}: ssa.OpSub8, 1260 opAndType{OSUB, TINT16}: ssa.OpSub16, 1261 opAndType{OSUB, TUINT16}: ssa.OpSub16, 1262 opAndType{OSUB, TINT32}: ssa.OpSub32, 1263 opAndType{OSUB, TUINT32}: ssa.OpSub32, 1264 opAndType{OSUB, TINT64}: ssa.OpSub64, 1265 opAndType{OSUB, TUINT64}: ssa.OpSub64, 1266 opAndType{OSUB, TFLOAT32}: ssa.OpSub32F, 1267 opAndType{OSUB, TFLOAT64}: ssa.OpSub64F, 1268 1269 opAndType{ONOT, TBOOL}: ssa.OpNot, 1270 1271 opAndType{OMINUS, TINT8}: ssa.OpNeg8, 1272 opAndType{OMINUS, TUINT8}: ssa.OpNeg8, 1273 opAndType{OMINUS, TINT16}: ssa.OpNeg16, 1274 opAndType{OMINUS, TUINT16}: ssa.OpNeg16, 1275 opAndType{OMINUS, TINT32}: ssa.OpNeg32, 1276 opAndType{OMINUS, TUINT32}: ssa.OpNeg32, 1277 opAndType{OMINUS, TINT64}: ssa.OpNeg64, 1278 opAndType{OMINUS, TUINT64}: ssa.OpNeg64, 1279 opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F, 1280 opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F, 1281 1282 opAndType{OCOM, TINT8}: ssa.OpCom8, 1283 opAndType{OCOM, TUINT8}: ssa.OpCom8, 1284 opAndType{OCOM, TINT16}: ssa.OpCom16, 1285 opAndType{OCOM, TUINT16}: ssa.OpCom16, 1286 opAndType{OCOM, TINT32}: ssa.OpCom32, 1287 opAndType{OCOM, TUINT32}: ssa.OpCom32, 1288 opAndType{OCOM, TINT64}: ssa.OpCom64, 1289 opAndType{OCOM, TUINT64}: ssa.OpCom64, 1290 1291 opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag, 1292 opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag, 1293 opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal, 1294 opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal, 1295 1296 opAndType{OMUL, TINT8}: ssa.OpMul8, 1297 opAndType{OMUL, TUINT8}: ssa.OpMul8, 1298 opAndType{OMUL, TINT16}: ssa.OpMul16, 1299 opAndType{OMUL, TUINT16}: ssa.OpMul16, 1300 opAndType{OMUL, TINT32}: ssa.OpMul32, 1301 opAndType{OMUL, TUINT32}: ssa.OpMul32, 1302 opAndType{OMUL, TINT64}: ssa.OpMul64, 1303 opAndType{OMUL, TUINT64}: ssa.OpMul64, 1304 opAndType{OMUL, TFLOAT32}: ssa.OpMul32F, 1305 opAndType{OMUL, TFLOAT64}: ssa.OpMul64F, 1306 1307 opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F, 1308 opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F, 1309 1310 opAndType{ODIV, TINT8}: ssa.OpDiv8, 1311 opAndType{ODIV, TUINT8}: ssa.OpDiv8u, 1312 opAndType{ODIV, TINT16}: ssa.OpDiv16, 1313 opAndType{ODIV, TUINT16}: ssa.OpDiv16u, 1314 opAndType{ODIV, TINT32}: ssa.OpDiv32, 1315 opAndType{ODIV, TUINT32}: ssa.OpDiv32u, 1316 opAndType{ODIV, TINT64}: ssa.OpDiv64, 1317 opAndType{ODIV, TUINT64}: ssa.OpDiv64u, 1318 1319 opAndType{OMOD, TINT8}: ssa.OpMod8, 1320 opAndType{OMOD, TUINT8}: ssa.OpMod8u, 1321 opAndType{OMOD, TINT16}: ssa.OpMod16, 1322 opAndType{OMOD, TUINT16}: ssa.OpMod16u, 1323 opAndType{OMOD, TINT32}: ssa.OpMod32, 1324 opAndType{OMOD, TUINT32}: ssa.OpMod32u, 1325 opAndType{OMOD, TINT64}: ssa.OpMod64, 1326 opAndType{OMOD, TUINT64}: ssa.OpMod64u, 1327 1328 opAndType{OAND, TINT8}: ssa.OpAnd8, 1329 opAndType{OAND, TUINT8}: ssa.OpAnd8, 1330 opAndType{OAND, TINT16}: ssa.OpAnd16, 1331 opAndType{OAND, TUINT16}: ssa.OpAnd16, 1332 opAndType{OAND, TINT32}: ssa.OpAnd32, 1333 opAndType{OAND, TUINT32}: ssa.OpAnd32, 1334 opAndType{OAND, TINT64}: ssa.OpAnd64, 1335 opAndType{OAND, TUINT64}: ssa.OpAnd64, 1336 1337 opAndType{OOR, TINT8}: ssa.OpOr8, 1338 opAndType{OOR, TUINT8}: ssa.OpOr8, 1339 opAndType{OOR, TINT16}: ssa.OpOr16, 1340 opAndType{OOR, TUINT16}: ssa.OpOr16, 1341 opAndType{OOR, TINT32}: ssa.OpOr32, 1342 opAndType{OOR, TUINT32}: ssa.OpOr32, 1343 opAndType{OOR, TINT64}: ssa.OpOr64, 1344 opAndType{OOR, TUINT64}: ssa.OpOr64, 1345 1346 opAndType{OXOR, TINT8}: ssa.OpXor8, 1347 opAndType{OXOR, TUINT8}: ssa.OpXor8, 1348 opAndType{OXOR, TINT16}: ssa.OpXor16, 1349 opAndType{OXOR, TUINT16}: ssa.OpXor16, 1350 opAndType{OXOR, TINT32}: ssa.OpXor32, 1351 opAndType{OXOR, TUINT32}: ssa.OpXor32, 1352 opAndType{OXOR, TINT64}: ssa.OpXor64, 1353 opAndType{OXOR, TUINT64}: ssa.OpXor64, 1354 1355 opAndType{OEQ, TBOOL}: ssa.OpEqB, 1356 opAndType{OEQ, TINT8}: ssa.OpEq8, 1357 opAndType{OEQ, TUINT8}: ssa.OpEq8, 1358 opAndType{OEQ, TINT16}: ssa.OpEq16, 1359 opAndType{OEQ, TUINT16}: ssa.OpEq16, 1360 opAndType{OEQ, TINT32}: ssa.OpEq32, 1361 opAndType{OEQ, TUINT32}: ssa.OpEq32, 1362 opAndType{OEQ, TINT64}: ssa.OpEq64, 1363 opAndType{OEQ, TUINT64}: ssa.OpEq64, 1364 opAndType{OEQ, TINTER}: ssa.OpEqInter, 1365 opAndType{OEQ, TSLICE}: ssa.OpEqSlice, 1366 opAndType{OEQ, TFUNC}: ssa.OpEqPtr, 1367 opAndType{OEQ, TMAP}: ssa.OpEqPtr, 1368 opAndType{OEQ, TCHAN}: ssa.OpEqPtr, 1369 opAndType{OEQ, TPTR}: ssa.OpEqPtr, 1370 opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr, 1371 opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr, 1372 opAndType{OEQ, TFLOAT64}: ssa.OpEq64F, 1373 opAndType{OEQ, TFLOAT32}: ssa.OpEq32F, 1374 1375 opAndType{ONE, TBOOL}: ssa.OpNeqB, 1376 opAndType{ONE, TINT8}: ssa.OpNeq8, 1377 opAndType{ONE, TUINT8}: ssa.OpNeq8, 1378 opAndType{ONE, TINT16}: ssa.OpNeq16, 1379 opAndType{ONE, TUINT16}: ssa.OpNeq16, 1380 opAndType{ONE, TINT32}: ssa.OpNeq32, 1381 opAndType{ONE, TUINT32}: ssa.OpNeq32, 1382 opAndType{ONE, TINT64}: ssa.OpNeq64, 1383 opAndType{ONE, TUINT64}: ssa.OpNeq64, 1384 opAndType{ONE, TINTER}: ssa.OpNeqInter, 1385 opAndType{ONE, TSLICE}: ssa.OpNeqSlice, 1386 opAndType{ONE, TFUNC}: ssa.OpNeqPtr, 1387 opAndType{ONE, TMAP}: ssa.OpNeqPtr, 1388 opAndType{ONE, TCHAN}: ssa.OpNeqPtr, 1389 opAndType{ONE, TPTR}: ssa.OpNeqPtr, 1390 opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr, 1391 opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr, 1392 opAndType{ONE, TFLOAT64}: ssa.OpNeq64F, 1393 opAndType{ONE, TFLOAT32}: ssa.OpNeq32F, 1394 1395 opAndType{OLT, TINT8}: ssa.OpLess8, 1396 opAndType{OLT, TUINT8}: ssa.OpLess8U, 1397 opAndType{OLT, TINT16}: ssa.OpLess16, 1398 opAndType{OLT, TUINT16}: ssa.OpLess16U, 1399 opAndType{OLT, TINT32}: ssa.OpLess32, 1400 opAndType{OLT, TUINT32}: ssa.OpLess32U, 1401 opAndType{OLT, TINT64}: ssa.OpLess64, 1402 opAndType{OLT, TUINT64}: ssa.OpLess64U, 1403 opAndType{OLT, TFLOAT64}: ssa.OpLess64F, 1404 opAndType{OLT, TFLOAT32}: ssa.OpLess32F, 1405 1406 opAndType{OGT, TINT8}: ssa.OpGreater8, 1407 opAndType{OGT, TUINT8}: ssa.OpGreater8U, 1408 opAndType{OGT, TINT16}: ssa.OpGreater16, 1409 opAndType{OGT, TUINT16}: ssa.OpGreater16U, 1410 opAndType{OGT, TINT32}: ssa.OpGreater32, 1411 opAndType{OGT, TUINT32}: ssa.OpGreater32U, 1412 opAndType{OGT, TINT64}: ssa.OpGreater64, 1413 opAndType{OGT, TUINT64}: ssa.OpGreater64U, 1414 opAndType{OGT, TFLOAT64}: ssa.OpGreater64F, 1415 opAndType{OGT, TFLOAT32}: ssa.OpGreater32F, 1416 1417 opAndType{OLE, TINT8}: ssa.OpLeq8, 1418 opAndType{OLE, TUINT8}: ssa.OpLeq8U, 1419 opAndType{OLE, TINT16}: ssa.OpLeq16, 1420 opAndType{OLE, TUINT16}: ssa.OpLeq16U, 1421 opAndType{OLE, TINT32}: ssa.OpLeq32, 1422 opAndType{OLE, TUINT32}: ssa.OpLeq32U, 1423 opAndType{OLE, TINT64}: ssa.OpLeq64, 1424 opAndType{OLE, TUINT64}: ssa.OpLeq64U, 1425 opAndType{OLE, TFLOAT64}: ssa.OpLeq64F, 1426 opAndType{OLE, TFLOAT32}: ssa.OpLeq32F, 1427 1428 opAndType{OGE, TINT8}: ssa.OpGeq8, 1429 opAndType{OGE, TUINT8}: ssa.OpGeq8U, 1430 opAndType{OGE, TINT16}: ssa.OpGeq16, 1431 opAndType{OGE, TUINT16}: ssa.OpGeq16U, 1432 opAndType{OGE, TINT32}: ssa.OpGeq32, 1433 opAndType{OGE, TUINT32}: ssa.OpGeq32U, 1434 opAndType{OGE, TINT64}: ssa.OpGeq64, 1435 opAndType{OGE, TUINT64}: ssa.OpGeq64U, 1436 opAndType{OGE, TFLOAT64}: ssa.OpGeq64F, 1437 opAndType{OGE, TFLOAT32}: ssa.OpGeq32F, 1438 } 1439 1440 func (s *state) concreteEtype(t *types.Type) types.EType { 1441 e := t.Etype 1442 switch e { 1443 default: 1444 return e 1445 case TINT: 1446 if s.config.PtrSize == 8 { 1447 return TINT64 1448 } 1449 return TINT32 1450 case TUINT: 1451 if s.config.PtrSize == 8 { 1452 return TUINT64 1453 } 1454 return TUINT32 1455 case TUINTPTR: 1456 if s.config.PtrSize == 8 { 1457 return TUINT64 1458 } 1459 return TUINT32 1460 } 1461 } 1462 1463 func (s *state) ssaOp(op Op, t *types.Type) ssa.Op { 1464 etype := s.concreteEtype(t) 1465 x, ok := opToSSA[opAndType{op, etype}] 1466 if !ok { 1467 s.Fatalf("unhandled binary op %v %s", op, etype) 1468 } 1469 return x 1470 } 1471 1472 func floatForComplex(t *types.Type) *types.Type { 1473 if t.Size() == 8 { 1474 return types.Types[TFLOAT32] 1475 } else { 1476 return types.Types[TFLOAT64] 1477 } 1478 } 1479 1480 type opAndTwoTypes struct { 1481 op Op 1482 etype1 types.EType 1483 etype2 types.EType 1484 } 1485 1486 type twoTypes struct { 1487 etype1 types.EType 1488 etype2 types.EType 1489 } 1490 1491 type twoOpsAndType struct { 1492 op1 ssa.Op 1493 op2 ssa.Op 1494 intermediateType types.EType 1495 } 1496 1497 var fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1498 1499 twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32}, 1500 twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32}, 1501 twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32}, 1502 twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64}, 1503 1504 twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32}, 1505 twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32}, 1506 twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32}, 1507 twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64}, 1508 1509 twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1510 twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1511 twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32}, 1512 twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64}, 1513 1514 twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1515 twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1516 twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32}, 1517 twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64}, 1518 // unsigned 1519 twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32}, 1520 twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32}, 1521 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned 1522 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead 1523 1524 twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32}, 1525 twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32}, 1526 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned 1527 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead 1528 1529 twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1530 twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1531 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1532 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead 1533 1534 twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1535 twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1536 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1537 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead 1538 1539 // float 1540 twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32}, 1541 twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, TFLOAT64}, 1542 twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, TFLOAT32}, 1543 twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64}, 1544 } 1545 1546 // this map is used only for 32-bit arch, and only includes the difference 1547 // on 32-bit arch, don't use int64<->float conversion for uint32 1548 var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{ 1549 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32}, 1550 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32}, 1551 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32}, 1552 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32}, 1553 } 1554 1555 // uint64<->float conversions, only on machines that have intructions for that 1556 var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1557 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64}, 1558 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64}, 1559 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64}, 1560 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64}, 1561 } 1562 1563 var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{ 1564 opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8, 1565 opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8, 1566 opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16, 1567 opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16, 1568 opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32, 1569 opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32, 1570 opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64, 1571 opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64, 1572 1573 opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8, 1574 opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8, 1575 opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16, 1576 opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16, 1577 opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32, 1578 opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32, 1579 opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64, 1580 opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64, 1581 1582 opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8, 1583 opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8, 1584 opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16, 1585 opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16, 1586 opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32, 1587 opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32, 1588 opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64, 1589 opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64, 1590 1591 opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8, 1592 opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8, 1593 opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16, 1594 opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16, 1595 opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32, 1596 opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32, 1597 opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64, 1598 opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64, 1599 1600 opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8, 1601 opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8, 1602 opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16, 1603 opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16, 1604 opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32, 1605 opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32, 1606 opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64, 1607 opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64, 1608 1609 opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8, 1610 opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8, 1611 opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16, 1612 opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16, 1613 opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32, 1614 opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32, 1615 opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64, 1616 opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64, 1617 1618 opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8, 1619 opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8, 1620 opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16, 1621 opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16, 1622 opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32, 1623 opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32, 1624 opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64, 1625 opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64, 1626 1627 opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8, 1628 opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8, 1629 opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16, 1630 opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16, 1631 opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32, 1632 opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32, 1633 opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64, 1634 opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64, 1635 } 1636 1637 func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op { 1638 etype1 := s.concreteEtype(t) 1639 etype2 := s.concreteEtype(u) 1640 x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}] 1641 if !ok { 1642 s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2) 1643 } 1644 return x 1645 } 1646 1647 // expr converts the expression n to ssa, adds it to s and returns the ssa result. 1648 func (s *state) expr(n *Node) *ssa.Value { 1649 if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) { 1650 // ONAMEs and named OLITERALs have the line number 1651 // of the decl, not the use. See issue 14742. 1652 s.pushLine(n.Pos) 1653 defer s.popLine() 1654 } 1655 1656 s.stmtList(n.Ninit) 1657 switch n.Op { 1658 case OARRAYBYTESTRTMP: 1659 slice := s.expr(n.Left) 1660 ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice) 1661 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) 1662 return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) 1663 case OSTRARRAYBYTETMP: 1664 str := s.expr(n.Left) 1665 ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str) 1666 len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str) 1667 return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len) 1668 case OCFUNC: 1669 aux := n.Left.Sym.Linksym() 1670 return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb) 1671 case ONAME: 1672 if n.Class() == PFUNC { 1673 // "value" of a function is the address of the function's closure 1674 sym := funcsym(n.Sym).Linksym() 1675 return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb) 1676 } 1677 if s.canSSA(n) { 1678 return s.variable(n, n.Type) 1679 } 1680 addr := s.addr(n, false) 1681 return s.load(n.Type, addr) 1682 case OCLOSUREVAR: 1683 addr := s.addr(n, false) 1684 return s.load(n.Type, addr) 1685 case OLITERAL: 1686 switch u := n.Val().U.(type) { 1687 case *Mpint: 1688 i := u.Int64() 1689 switch n.Type.Size() { 1690 case 1: 1691 return s.constInt8(n.Type, int8(i)) 1692 case 2: 1693 return s.constInt16(n.Type, int16(i)) 1694 case 4: 1695 return s.constInt32(n.Type, int32(i)) 1696 case 8: 1697 return s.constInt64(n.Type, i) 1698 default: 1699 s.Fatalf("bad integer size %d", n.Type.Size()) 1700 return nil 1701 } 1702 case string: 1703 if u == "" { 1704 return s.constEmptyString(n.Type) 1705 } 1706 return s.entryNewValue0A(ssa.OpConstString, n.Type, u) 1707 case bool: 1708 return s.constBool(u) 1709 case *NilVal: 1710 t := n.Type 1711 switch { 1712 case t.IsSlice(): 1713 return s.constSlice(t) 1714 case t.IsInterface(): 1715 return s.constInterface(t) 1716 default: 1717 return s.constNil(t) 1718 } 1719 case *Mpflt: 1720 switch n.Type.Size() { 1721 case 4: 1722 return s.constFloat32(n.Type, u.Float32()) 1723 case 8: 1724 return s.constFloat64(n.Type, u.Float64()) 1725 default: 1726 s.Fatalf("bad float size %d", n.Type.Size()) 1727 return nil 1728 } 1729 case *Mpcplx: 1730 r := &u.Real 1731 i := &u.Imag 1732 switch n.Type.Size() { 1733 case 8: 1734 pt := types.Types[TFLOAT32] 1735 return s.newValue2(ssa.OpComplexMake, n.Type, 1736 s.constFloat32(pt, r.Float32()), 1737 s.constFloat32(pt, i.Float32())) 1738 case 16: 1739 pt := types.Types[TFLOAT64] 1740 return s.newValue2(ssa.OpComplexMake, n.Type, 1741 s.constFloat64(pt, r.Float64()), 1742 s.constFloat64(pt, i.Float64())) 1743 default: 1744 s.Fatalf("bad float size %d", n.Type.Size()) 1745 return nil 1746 } 1747 1748 default: 1749 s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype()) 1750 return nil 1751 } 1752 case OCONVNOP: 1753 to := n.Type 1754 from := n.Left.Type 1755 1756 // Assume everything will work out, so set up our return value. 1757 // Anything interesting that happens from here is a fatal. 1758 x := s.expr(n.Left) 1759 1760 // Special case for not confusing GC and liveness. 1761 // We don't want pointers accidentally classified 1762 // as not-pointers or vice-versa because of copy 1763 // elision. 1764 if to.IsPtrShaped() != from.IsPtrShaped() { 1765 return s.newValue2(ssa.OpConvert, to, x, s.mem()) 1766 } 1767 1768 v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type 1769 1770 // CONVNOP closure 1771 if to.Etype == TFUNC && from.IsPtrShaped() { 1772 return v 1773 } 1774 1775 // named <--> unnamed type or typed <--> untyped const 1776 if from.Etype == to.Etype { 1777 return v 1778 } 1779 1780 // unsafe.Pointer <--> *T 1781 if to.Etype == TUNSAFEPTR && from.IsPtrShaped() || from.Etype == TUNSAFEPTR && to.IsPtrShaped() { 1782 return v 1783 } 1784 1785 // map <--> *hmap 1786 if to.Etype == TMAP && from.IsPtr() && 1787 to.MapType().Hmap == from.Elem() { 1788 return v 1789 } 1790 1791 dowidth(from) 1792 dowidth(to) 1793 if from.Width != to.Width { 1794 s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width) 1795 return nil 1796 } 1797 if etypesign(from.Etype) != etypesign(to.Etype) { 1798 s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype) 1799 return nil 1800 } 1801 1802 if instrumenting { 1803 // These appear to be fine, but they fail the 1804 // integer constraint below, so okay them here. 1805 // Sample non-integer conversion: map[string]string -> *uint8 1806 return v 1807 } 1808 1809 if etypesign(from.Etype) == 0 { 1810 s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to) 1811 return nil 1812 } 1813 1814 // integer, same width, same sign 1815 return v 1816 1817 case OCONV: 1818 x := s.expr(n.Left) 1819 ft := n.Left.Type // from type 1820 tt := n.Type // to type 1821 if ft.IsBoolean() && tt.IsKind(TUINT8) { 1822 // Bool -> uint8 is generated internally when indexing into runtime.staticbyte. 1823 return s.newValue1(ssa.OpCopy, n.Type, x) 1824 } 1825 if ft.IsInteger() && tt.IsInteger() { 1826 var op ssa.Op 1827 if tt.Size() == ft.Size() { 1828 op = ssa.OpCopy 1829 } else if tt.Size() < ft.Size() { 1830 // truncation 1831 switch 10*ft.Size() + tt.Size() { 1832 case 21: 1833 op = ssa.OpTrunc16to8 1834 case 41: 1835 op = ssa.OpTrunc32to8 1836 case 42: 1837 op = ssa.OpTrunc32to16 1838 case 81: 1839 op = ssa.OpTrunc64to8 1840 case 82: 1841 op = ssa.OpTrunc64to16 1842 case 84: 1843 op = ssa.OpTrunc64to32 1844 default: 1845 s.Fatalf("weird integer truncation %v -> %v", ft, tt) 1846 } 1847 } else if ft.IsSigned() { 1848 // sign extension 1849 switch 10*ft.Size() + tt.Size() { 1850 case 12: 1851 op = ssa.OpSignExt8to16 1852 case 14: 1853 op = ssa.OpSignExt8to32 1854 case 18: 1855 op = ssa.OpSignExt8to64 1856 case 24: 1857 op = ssa.OpSignExt16to32 1858 case 28: 1859 op = ssa.OpSignExt16to64 1860 case 48: 1861 op = ssa.OpSignExt32to64 1862 default: 1863 s.Fatalf("bad integer sign extension %v -> %v", ft, tt) 1864 } 1865 } else { 1866 // zero extension 1867 switch 10*ft.Size() + tt.Size() { 1868 case 12: 1869 op = ssa.OpZeroExt8to16 1870 case 14: 1871 op = ssa.OpZeroExt8to32 1872 case 18: 1873 op = ssa.OpZeroExt8to64 1874 case 24: 1875 op = ssa.OpZeroExt16to32 1876 case 28: 1877 op = ssa.OpZeroExt16to64 1878 case 48: 1879 op = ssa.OpZeroExt32to64 1880 default: 1881 s.Fatalf("weird integer sign extension %v -> %v", ft, tt) 1882 } 1883 } 1884 return s.newValue1(op, n.Type, x) 1885 } 1886 1887 if ft.IsFloat() || tt.IsFloat() { 1888 conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}] 1889 if s.config.RegSize == 4 && thearch.LinkArch.Family != sys.MIPS && !s.softFloat { 1890 if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { 1891 conv = conv1 1892 } 1893 } 1894 if thearch.LinkArch.Family == sys.ARM64 || thearch.LinkArch.Family == sys.Wasm || s.softFloat { 1895 if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { 1896 conv = conv1 1897 } 1898 } 1899 1900 if thearch.LinkArch.Family == sys.MIPS && !s.softFloat { 1901 if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() { 1902 // tt is float32 or float64, and ft is also unsigned 1903 if tt.Size() == 4 { 1904 return s.uint32Tofloat32(n, x, ft, tt) 1905 } 1906 if tt.Size() == 8 { 1907 return s.uint32Tofloat64(n, x, ft, tt) 1908 } 1909 } else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() { 1910 // ft is float32 or float64, and tt is unsigned integer 1911 if ft.Size() == 4 { 1912 return s.float32ToUint32(n, x, ft, tt) 1913 } 1914 if ft.Size() == 8 { 1915 return s.float64ToUint32(n, x, ft, tt) 1916 } 1917 } 1918 } 1919 1920 if !ok { 1921 s.Fatalf("weird float conversion %v -> %v", ft, tt) 1922 } 1923 op1, op2, it := conv.op1, conv.op2, conv.intermediateType 1924 1925 if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid { 1926 // normal case, not tripping over unsigned 64 1927 if op1 == ssa.OpCopy { 1928 if op2 == ssa.OpCopy { 1929 return x 1930 } 1931 return s.newValueOrSfCall1(op2, n.Type, x) 1932 } 1933 if op2 == ssa.OpCopy { 1934 return s.newValueOrSfCall1(op1, n.Type, x) 1935 } 1936 return s.newValueOrSfCall1(op2, n.Type, s.newValueOrSfCall1(op1, types.Types[it], x)) 1937 } 1938 // Tricky 64-bit unsigned cases. 1939 if ft.IsInteger() { 1940 // tt is float32 or float64, and ft is also unsigned 1941 if tt.Size() == 4 { 1942 return s.uint64Tofloat32(n, x, ft, tt) 1943 } 1944 if tt.Size() == 8 { 1945 return s.uint64Tofloat64(n, x, ft, tt) 1946 } 1947 s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt) 1948 } 1949 // ft is float32 or float64, and tt is unsigned integer 1950 if ft.Size() == 4 { 1951 return s.float32ToUint64(n, x, ft, tt) 1952 } 1953 if ft.Size() == 8 { 1954 return s.float64ToUint64(n, x, ft, tt) 1955 } 1956 s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt) 1957 return nil 1958 } 1959 1960 if ft.IsComplex() && tt.IsComplex() { 1961 var op ssa.Op 1962 if ft.Size() == tt.Size() { 1963 switch ft.Size() { 1964 case 8: 1965 op = ssa.OpRound32F 1966 case 16: 1967 op = ssa.OpRound64F 1968 default: 1969 s.Fatalf("weird complex conversion %v -> %v", ft, tt) 1970 } 1971 } else if ft.Size() == 8 && tt.Size() == 16 { 1972 op = ssa.OpCvt32Fto64F 1973 } else if ft.Size() == 16 && tt.Size() == 8 { 1974 op = ssa.OpCvt64Fto32F 1975 } else { 1976 s.Fatalf("weird complex conversion %v -> %v", ft, tt) 1977 } 1978 ftp := floatForComplex(ft) 1979 ttp := floatForComplex(tt) 1980 return s.newValue2(ssa.OpComplexMake, tt, 1981 s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)), 1982 s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x))) 1983 } 1984 1985 s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype) 1986 return nil 1987 1988 case ODOTTYPE: 1989 res, _ := s.dottype(n, false) 1990 return res 1991 1992 // binary ops 1993 case OLT, OEQ, ONE, OLE, OGE, OGT: 1994 a := s.expr(n.Left) 1995 b := s.expr(n.Right) 1996 if n.Left.Type.IsComplex() { 1997 pt := floatForComplex(n.Left.Type) 1998 op := s.ssaOp(OEQ, pt) 1999 r := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)) 2000 i := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)) 2001 c := s.newValue2(ssa.OpAndB, types.Types[TBOOL], r, i) 2002 switch n.Op { 2003 case OEQ: 2004 return c 2005 case ONE: 2006 return s.newValue1(ssa.OpNot, types.Types[TBOOL], c) 2007 default: 2008 s.Fatalf("ordered complex compare %v", n.Op) 2009 } 2010 } 2011 if n.Left.Type.IsFloat() { 2012 return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b) 2013 } 2014 return s.newValue2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b) 2015 case OMUL: 2016 a := s.expr(n.Left) 2017 b := s.expr(n.Right) 2018 if n.Type.IsComplex() { 2019 mulop := ssa.OpMul64F 2020 addop := ssa.OpAdd64F 2021 subop := ssa.OpSub64F 2022 pt := floatForComplex(n.Type) // Could be Float32 or Float64 2023 wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 2024 2025 areal := s.newValue1(ssa.OpComplexReal, pt, a) 2026 breal := s.newValue1(ssa.OpComplexReal, pt, b) 2027 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 2028 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 2029 2030 if pt != wt { // Widen for calculation 2031 areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal) 2032 breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal) 2033 aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag) 2034 bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag) 2035 } 2036 2037 xreal := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag)) 2038 ximag := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, bimag), s.newValueOrSfCall2(mulop, wt, aimag, breal)) 2039 2040 if pt != wt { // Narrow to store back 2041 xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal) 2042 ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag) 2043 } 2044 2045 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 2046 } 2047 2048 if n.Type.IsFloat() { 2049 return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 2050 } 2051 2052 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 2053 2054 case ODIV: 2055 a := s.expr(n.Left) 2056 b := s.expr(n.Right) 2057 if n.Type.IsComplex() { 2058 // TODO this is not executed because the front-end substitutes a runtime call. 2059 // That probably ought to change; with modest optimization the widen/narrow 2060 // conversions could all be elided in larger expression trees. 2061 mulop := ssa.OpMul64F 2062 addop := ssa.OpAdd64F 2063 subop := ssa.OpSub64F 2064 divop := ssa.OpDiv64F 2065 pt := floatForComplex(n.Type) // Could be Float32 or Float64 2066 wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 2067 2068 areal := s.newValue1(ssa.OpComplexReal, pt, a) 2069 breal := s.newValue1(ssa.OpComplexReal, pt, b) 2070 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 2071 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 2072 2073 if pt != wt { // Widen for calculation 2074 areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal) 2075 breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal) 2076 aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag) 2077 bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag) 2078 } 2079 2080 denom := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, breal, breal), s.newValueOrSfCall2(mulop, wt, bimag, bimag)) 2081 xreal := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag)) 2082 ximag := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, aimag, breal), s.newValueOrSfCall2(mulop, wt, areal, bimag)) 2083 2084 // TODO not sure if this is best done in wide precision or narrow 2085 // Double-rounding might be an issue. 2086 // Note that the pre-SSA implementation does the entire calculation 2087 // in wide format, so wide is compatible. 2088 xreal = s.newValueOrSfCall2(divop, wt, xreal, denom) 2089 ximag = s.newValueOrSfCall2(divop, wt, ximag, denom) 2090 2091 if pt != wt { // Narrow to store back 2092 xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal) 2093 ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag) 2094 } 2095 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 2096 } 2097 if n.Type.IsFloat() { 2098 return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 2099 } 2100 return s.intDivide(n, a, b) 2101 case OMOD: 2102 a := s.expr(n.Left) 2103 b := s.expr(n.Right) 2104 return s.intDivide(n, a, b) 2105 case OADD, OSUB: 2106 a := s.expr(n.Left) 2107 b := s.expr(n.Right) 2108 if n.Type.IsComplex() { 2109 pt := floatForComplex(n.Type) 2110 op := s.ssaOp(n.Op, pt) 2111 return s.newValue2(ssa.OpComplexMake, n.Type, 2112 s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)), 2113 s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))) 2114 } 2115 if n.Type.IsFloat() { 2116 return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 2117 } 2118 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 2119 case OAND, OOR, OXOR: 2120 a := s.expr(n.Left) 2121 b := s.expr(n.Right) 2122 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 2123 case OLSH, ORSH: 2124 a := s.expr(n.Left) 2125 b := s.expr(n.Right) 2126 return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b) 2127 case OANDAND, OOROR: 2128 // To implement OANDAND (and OOROR), we introduce a 2129 // new temporary variable to hold the result. The 2130 // variable is associated with the OANDAND node in the 2131 // s.vars table (normally variables are only 2132 // associated with ONAME nodes). We convert 2133 // A && B 2134 // to 2135 // var = A 2136 // if var { 2137 // var = B 2138 // } 2139 // Using var in the subsequent block introduces the 2140 // necessary phi variable. 2141 el := s.expr(n.Left) 2142 s.vars[n] = el 2143 2144 b := s.endBlock() 2145 b.Kind = ssa.BlockIf 2146 b.SetControl(el) 2147 // In theory, we should set b.Likely here based on context. 2148 // However, gc only gives us likeliness hints 2149 // in a single place, for plain OIF statements, 2150 // and passing around context is finnicky, so don't bother for now. 2151 2152 bRight := s.f.NewBlock(ssa.BlockPlain) 2153 bResult := s.f.NewBlock(ssa.BlockPlain) 2154 if n.Op == OANDAND { 2155 b.AddEdgeTo(bRight) 2156 b.AddEdgeTo(bResult) 2157 } else if n.Op == OOROR { 2158 b.AddEdgeTo(bResult) 2159 b.AddEdgeTo(bRight) 2160 } 2161 2162 s.startBlock(bRight) 2163 er := s.expr(n.Right) 2164 s.vars[n] = er 2165 2166 b = s.endBlock() 2167 b.AddEdgeTo(bResult) 2168 2169 s.startBlock(bResult) 2170 return s.variable(n, types.Types[TBOOL]) 2171 case OCOMPLEX: 2172 r := s.expr(n.Left) 2173 i := s.expr(n.Right) 2174 return s.newValue2(ssa.OpComplexMake, n.Type, r, i) 2175 2176 // unary ops 2177 case OMINUS: 2178 a := s.expr(n.Left) 2179 if n.Type.IsComplex() { 2180 tp := floatForComplex(n.Type) 2181 negop := s.ssaOp(n.Op, tp) 2182 return s.newValue2(ssa.OpComplexMake, n.Type, 2183 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)), 2184 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a))) 2185 } 2186 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 2187 case ONOT, OCOM: 2188 a := s.expr(n.Left) 2189 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 2190 case OIMAG, OREAL: 2191 a := s.expr(n.Left) 2192 return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a) 2193 case OPLUS: 2194 return s.expr(n.Left) 2195 2196 case OADDR: 2197 return s.addr(n.Left, n.Bounded()) 2198 2199 case OINDREGSP: 2200 addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset) 2201 return s.load(n.Type, addr) 2202 2203 case OIND: 2204 p := s.exprPtr(n.Left, false, n.Pos) 2205 return s.load(n.Type, p) 2206 2207 case ODOT: 2208 if n.Left.Op == OSTRUCTLIT { 2209 // All literals with nonzero fields have already been 2210 // rewritten during walk. Any that remain are just T{} 2211 // or equivalents. Use the zero value. 2212 if !isZero(n.Left) { 2213 Fatalf("literal with nonzero value in SSA: %v", n.Left) 2214 } 2215 return s.zeroVal(n.Type) 2216 } 2217 // If n is addressable and can't be represented in 2218 // SSA, then load just the selected field. This 2219 // prevents false memory dependencies in race/msan 2220 // instrumentation. 2221 if islvalue(n) && !s.canSSA(n) { 2222 p := s.addr(n, false) 2223 return s.load(n.Type, p) 2224 } 2225 v := s.expr(n.Left) 2226 return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v) 2227 2228 case ODOTPTR: 2229 p := s.exprPtr(n.Left, false, n.Pos) 2230 p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type), n.Xoffset, p) 2231 return s.load(n.Type, p) 2232 2233 case OINDEX: 2234 switch { 2235 case n.Left.Type.IsString(): 2236 if n.Bounded() && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) { 2237 // Replace "abc"[1] with 'b'. 2238 // Delayed until now because "abc"[1] is not an ideal constant. 2239 // See test/fixedbugs/issue11370.go. 2240 return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(n.Left.Val().U.(string)[n.Right.Int64()]))) 2241 } 2242 a := s.expr(n.Left) 2243 i := s.expr(n.Right) 2244 i = s.extendIndex(i, panicindex) 2245 if !n.Bounded() { 2246 len := s.newValue1(ssa.OpStringLen, types.Types[TINT], a) 2247 s.boundsCheck(i, len) 2248 } 2249 ptrtyp := s.f.Config.Types.BytePtr 2250 ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a) 2251 if Isconst(n.Right, CTINT) { 2252 ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr) 2253 } else { 2254 ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i) 2255 } 2256 return s.load(types.Types[TUINT8], ptr) 2257 case n.Left.Type.IsSlice(): 2258 p := s.addr(n, false) 2259 return s.load(n.Left.Type.Elem(), p) 2260 case n.Left.Type.IsArray(): 2261 if canSSAType(n.Left.Type) { 2262 // SSA can handle arrays of length at most 1. 2263 bound := n.Left.Type.NumElem() 2264 a := s.expr(n.Left) 2265 i := s.expr(n.Right) 2266 if bound == 0 { 2267 // Bounds check will never succeed. Might as well 2268 // use constants for the bounds check. 2269 z := s.constInt(types.Types[TINT], 0) 2270 s.boundsCheck(z, z) 2271 // The return value won't be live, return junk. 2272 return s.newValue0(ssa.OpUnknown, n.Type) 2273 } 2274 i = s.extendIndex(i, panicindex) 2275 if !n.Bounded() { 2276 s.boundsCheck(i, s.constInt(types.Types[TINT], bound)) 2277 } 2278 return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a) 2279 } 2280 p := s.addr(n, false) 2281 return s.load(n.Left.Type.Elem(), p) 2282 default: 2283 s.Fatalf("bad type for index %v", n.Left.Type) 2284 return nil 2285 } 2286 2287 case OLEN, OCAP: 2288 switch { 2289 case n.Left.Type.IsSlice(): 2290 op := ssa.OpSliceLen 2291 if n.Op == OCAP { 2292 op = ssa.OpSliceCap 2293 } 2294 return s.newValue1(op, types.Types[TINT], s.expr(n.Left)) 2295 case n.Left.Type.IsString(): // string; not reachable for OCAP 2296 return s.newValue1(ssa.OpStringLen, types.Types[TINT], s.expr(n.Left)) 2297 case n.Left.Type.IsMap(), n.Left.Type.IsChan(): 2298 return s.referenceTypeBuiltin(n, s.expr(n.Left)) 2299 default: // array 2300 return s.constInt(types.Types[TINT], n.Left.Type.NumElem()) 2301 } 2302 2303 case OSPTR: 2304 a := s.expr(n.Left) 2305 if n.Left.Type.IsSlice() { 2306 return s.newValue1(ssa.OpSlicePtr, n.Type, a) 2307 } else { 2308 return s.newValue1(ssa.OpStringPtr, n.Type, a) 2309 } 2310 2311 case OITAB: 2312 a := s.expr(n.Left) 2313 return s.newValue1(ssa.OpITab, n.Type, a) 2314 2315 case OIDATA: 2316 a := s.expr(n.Left) 2317 return s.newValue1(ssa.OpIData, n.Type, a) 2318 2319 case OEFACE: 2320 tab := s.expr(n.Left) 2321 data := s.expr(n.Right) 2322 return s.newValue2(ssa.OpIMake, n.Type, tab, data) 2323 2324 case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR: 2325 v := s.expr(n.Left) 2326 var i, j, k *ssa.Value 2327 low, high, max := n.SliceBounds() 2328 if low != nil { 2329 i = s.extendIndex(s.expr(low), panicslice) 2330 } 2331 if high != nil { 2332 j = s.extendIndex(s.expr(high), panicslice) 2333 } 2334 if max != nil { 2335 k = s.extendIndex(s.expr(max), panicslice) 2336 } 2337 p, l, c := s.slice(n.Left.Type, v, i, j, k, n.Bounded()) 2338 return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c) 2339 2340 case OSLICESTR: 2341 v := s.expr(n.Left) 2342 var i, j *ssa.Value 2343 low, high, _ := n.SliceBounds() 2344 if low != nil { 2345 i = s.extendIndex(s.expr(low), panicslice) 2346 } 2347 if high != nil { 2348 j = s.extendIndex(s.expr(high), panicslice) 2349 } 2350 p, l, _ := s.slice(n.Left.Type, v, i, j, nil, n.Bounded()) 2351 return s.newValue2(ssa.OpStringMake, n.Type, p, l) 2352 2353 case OCALLFUNC: 2354 if isIntrinsicCall(n) { 2355 return s.intrinsicCall(n) 2356 } 2357 fallthrough 2358 2359 case OCALLINTER, OCALLMETH: 2360 a := s.call(n, callNormal) 2361 return s.load(n.Type, a) 2362 2363 case OGETG: 2364 return s.newValue1(ssa.OpGetG, n.Type, s.mem()) 2365 2366 case OAPPEND: 2367 return s.append(n, false) 2368 2369 case OSTRUCTLIT, OARRAYLIT: 2370 // All literals with nonzero fields have already been 2371 // rewritten during walk. Any that remain are just T{} 2372 // or equivalents. Use the zero value. 2373 if !isZero(n) { 2374 Fatalf("literal with nonzero value in SSA: %v", n) 2375 } 2376 return s.zeroVal(n.Type) 2377 2378 default: 2379 s.Fatalf("unhandled expr %v", n.Op) 2380 return nil 2381 } 2382 } 2383 2384 // append converts an OAPPEND node to SSA. 2385 // If inplace is false, it converts the OAPPEND expression n to an ssa.Value, 2386 // adds it to s, and returns the Value. 2387 // If inplace is true, it writes the result of the OAPPEND expression n 2388 // back to the slice being appended to, and returns nil. 2389 // inplace MUST be set to false if the slice can be SSA'd. 2390 func (s *state) append(n *Node, inplace bool) *ssa.Value { 2391 // If inplace is false, process as expression "append(s, e1, e2, e3)": 2392 // 2393 // ptr, len, cap := s 2394 // newlen := len + 3 2395 // if newlen > cap { 2396 // ptr, len, cap = growslice(s, newlen) 2397 // newlen = len + 3 // recalculate to avoid a spill 2398 // } 2399 // // with write barriers, if needed: 2400 // *(ptr+len) = e1 2401 // *(ptr+len+1) = e2 2402 // *(ptr+len+2) = e3 2403 // return makeslice(ptr, newlen, cap) 2404 // 2405 // 2406 // If inplace is true, process as statement "s = append(s, e1, e2, e3)": 2407 // 2408 // a := &s 2409 // ptr, len, cap := s 2410 // newlen := len + 3 2411 // if newlen > cap { 2412 // newptr, len, newcap = growslice(ptr, len, cap, newlen) 2413 // vardef(a) // if necessary, advise liveness we are writing a new a 2414 // *a.cap = newcap // write before ptr to avoid a spill 2415 // *a.ptr = newptr // with write barrier 2416 // } 2417 // newlen = len + 3 // recalculate to avoid a spill 2418 // *a.len = newlen 2419 // // with write barriers, if needed: 2420 // *(ptr+len) = e1 2421 // *(ptr+len+1) = e2 2422 // *(ptr+len+2) = e3 2423 2424 et := n.Type.Elem() 2425 pt := types.NewPtr(et) 2426 2427 // Evaluate slice 2428 sn := n.List.First() // the slice node is the first in the list 2429 2430 var slice, addr *ssa.Value 2431 if inplace { 2432 addr = s.addr(sn, false) 2433 slice = s.load(n.Type, addr) 2434 } else { 2435 slice = s.expr(sn) 2436 } 2437 2438 // Allocate new blocks 2439 grow := s.f.NewBlock(ssa.BlockPlain) 2440 assign := s.f.NewBlock(ssa.BlockPlain) 2441 2442 // Decide if we need to grow 2443 nargs := int64(n.List.Len() - 1) 2444 p := s.newValue1(ssa.OpSlicePtr, pt, slice) 2445 l := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) 2446 c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice) 2447 nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs)) 2448 2449 cmp := s.newValue2(s.ssaOp(OGT, types.Types[TINT]), types.Types[TBOOL], nl, c) 2450 s.vars[&ptrVar] = p 2451 2452 if !inplace { 2453 s.vars[&newlenVar] = nl 2454 s.vars[&capVar] = c 2455 } else { 2456 s.vars[&lenVar] = l 2457 } 2458 2459 b := s.endBlock() 2460 b.Kind = ssa.BlockIf 2461 b.Likely = ssa.BranchUnlikely 2462 b.SetControl(cmp) 2463 b.AddEdgeTo(grow) 2464 b.AddEdgeTo(assign) 2465 2466 // Call growslice 2467 s.startBlock(grow) 2468 taddr := s.expr(n.Left) 2469 r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[TINT], types.Types[TINT]}, taddr, p, l, c, nl) 2470 2471 if inplace { 2472 if sn.Op == ONAME && sn.Class() != PEXTERN { 2473 // Tell liveness we're about to build a new slice 2474 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem()) 2475 } 2476 capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_cap), addr) 2477 s.store(types.Types[TINT], capaddr, r[2]) 2478 s.store(pt, addr, r[0]) 2479 // load the value we just stored to avoid having to spill it 2480 s.vars[&ptrVar] = s.load(pt, addr) 2481 s.vars[&lenVar] = r[1] // avoid a spill in the fast path 2482 } else { 2483 s.vars[&ptrVar] = r[0] 2484 s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs)) 2485 s.vars[&capVar] = r[2] 2486 } 2487 2488 b = s.endBlock() 2489 b.AddEdgeTo(assign) 2490 2491 // assign new elements to slots 2492 s.startBlock(assign) 2493 2494 if inplace { 2495 l = s.variable(&lenVar, types.Types[TINT]) // generates phi for len 2496 nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs)) 2497 lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_nel), addr) 2498 s.store(types.Types[TINT], lenaddr, nl) 2499 } 2500 2501 // Evaluate args 2502 type argRec struct { 2503 // if store is true, we're appending the value v. If false, we're appending the 2504 // value at *v. 2505 v *ssa.Value 2506 store bool 2507 } 2508 args := make([]argRec, 0, nargs) 2509 for _, n := range n.List.Slice()[1:] { 2510 if canSSAType(n.Type) { 2511 args = append(args, argRec{v: s.expr(n), store: true}) 2512 } else { 2513 v := s.addr(n, false) 2514 args = append(args, argRec{v: v}) 2515 } 2516 } 2517 2518 p = s.variable(&ptrVar, pt) // generates phi for ptr 2519 if !inplace { 2520 nl = s.variable(&newlenVar, types.Types[TINT]) // generates phi for nl 2521 c = s.variable(&capVar, types.Types[TINT]) // generates phi for cap 2522 } 2523 p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l) 2524 for i, arg := range args { 2525 addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[TINT], int64(i))) 2526 if arg.store { 2527 s.storeType(et, addr, arg.v, 0, true) 2528 } else { 2529 s.move(et, addr, arg.v) 2530 } 2531 } 2532 2533 delete(s.vars, &ptrVar) 2534 if inplace { 2535 delete(s.vars, &lenVar) 2536 return nil 2537 } 2538 delete(s.vars, &newlenVar) 2539 delete(s.vars, &capVar) 2540 // make result 2541 return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c) 2542 } 2543 2544 // condBranch evaluates the boolean expression cond and branches to yes 2545 // if cond is true and no if cond is false. 2546 // This function is intended to handle && and || better than just calling 2547 // s.expr(cond) and branching on the result. 2548 func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) { 2549 switch cond.Op { 2550 case OANDAND: 2551 mid := s.f.NewBlock(ssa.BlockPlain) 2552 s.stmtList(cond.Ninit) 2553 s.condBranch(cond.Left, mid, no, max8(likely, 0)) 2554 s.startBlock(mid) 2555 s.condBranch(cond.Right, yes, no, likely) 2556 return 2557 // Note: if likely==1, then both recursive calls pass 1. 2558 // If likely==-1, then we don't have enough information to decide 2559 // whether the first branch is likely or not. So we pass 0 for 2560 // the likeliness of the first branch. 2561 // TODO: have the frontend give us branch prediction hints for 2562 // OANDAND and OOROR nodes (if it ever has such info). 2563 case OOROR: 2564 mid := s.f.NewBlock(ssa.BlockPlain) 2565 s.stmtList(cond.Ninit) 2566 s.condBranch(cond.Left, yes, mid, min8(likely, 0)) 2567 s.startBlock(mid) 2568 s.condBranch(cond.Right, yes, no, likely) 2569 return 2570 // Note: if likely==-1, then both recursive calls pass -1. 2571 // If likely==1, then we don't have enough info to decide 2572 // the likelihood of the first branch. 2573 case ONOT: 2574 s.stmtList(cond.Ninit) 2575 s.condBranch(cond.Left, no, yes, -likely) 2576 return 2577 } 2578 c := s.expr(cond) 2579 b := s.endBlock() 2580 b.Kind = ssa.BlockIf 2581 b.SetControl(c) 2582 b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness 2583 b.AddEdgeTo(yes) 2584 b.AddEdgeTo(no) 2585 } 2586 2587 type skipMask uint8 2588 2589 const ( 2590 skipPtr skipMask = 1 << iota 2591 skipLen 2592 skipCap 2593 ) 2594 2595 // assign does left = right. 2596 // Right has already been evaluated to ssa, left has not. 2597 // If deref is true, then we do left = *right instead (and right has already been nil-checked). 2598 // If deref is true and right == nil, just do left = 0. 2599 // skip indicates assignments (at the top level) that can be avoided. 2600 func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) { 2601 if left.Op == ONAME && left.isBlank() { 2602 return 2603 } 2604 t := left.Type 2605 dowidth(t) 2606 if s.canSSA(left) { 2607 if deref { 2608 s.Fatalf("can SSA LHS %v but not RHS %s", left, right) 2609 } 2610 if left.Op == ODOT { 2611 // We're assigning to a field of an ssa-able value. 2612 // We need to build a new structure with the new value for the 2613 // field we're assigning and the old values for the other fields. 2614 // For instance: 2615 // type T struct {a, b, c int} 2616 // var T x 2617 // x.b = 5 2618 // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c} 2619 2620 // Grab information about the structure type. 2621 t := left.Left.Type 2622 nf := t.NumFields() 2623 idx := fieldIdx(left) 2624 2625 // Grab old value of structure. 2626 old := s.expr(left.Left) 2627 2628 // Make new structure. 2629 new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t) 2630 2631 // Add fields as args. 2632 for i := 0; i < nf; i++ { 2633 if i == idx { 2634 new.AddArg(right) 2635 } else { 2636 new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old)) 2637 } 2638 } 2639 2640 // Recursively assign the new value we've made to the base of the dot op. 2641 s.assign(left.Left, new, false, 0) 2642 // TODO: do we need to update named values here? 2643 return 2644 } 2645 if left.Op == OINDEX && left.Left.Type.IsArray() { 2646 // We're assigning to an element of an ssa-able array. 2647 // a[i] = v 2648 t := left.Left.Type 2649 n := t.NumElem() 2650 2651 i := s.expr(left.Right) // index 2652 if n == 0 { 2653 // The bounds check must fail. Might as well 2654 // ignore the actual index and just use zeros. 2655 z := s.constInt(types.Types[TINT], 0) 2656 s.boundsCheck(z, z) 2657 return 2658 } 2659 if n != 1 { 2660 s.Fatalf("assigning to non-1-length array") 2661 } 2662 // Rewrite to a = [1]{v} 2663 i = s.extendIndex(i, panicindex) 2664 s.boundsCheck(i, s.constInt(types.Types[TINT], 1)) 2665 v := s.newValue1(ssa.OpArrayMake1, t, right) 2666 s.assign(left.Left, v, false, 0) 2667 return 2668 } 2669 // Update variable assignment. 2670 s.vars[left] = right 2671 s.addNamedValue(left, right) 2672 return 2673 } 2674 // Left is not ssa-able. Compute its address. 2675 if left.Op == ONAME && left.Class() != PEXTERN && skip == 0 { 2676 s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, left, s.mem(), !left.IsAutoTmp()) 2677 } 2678 addr := s.addr(left, false) 2679 if isReflectHeaderDataField(left) { 2680 // Package unsafe's documentation says storing pointers into 2681 // reflect.SliceHeader and reflect.StringHeader's Data fields 2682 // is valid, even though they have type uintptr (#19168). 2683 // Mark it pointer type to signal the writebarrier pass to 2684 // insert a write barrier. 2685 t = types.Types[TUNSAFEPTR] 2686 } 2687 if deref { 2688 // Treat as a mem->mem move. 2689 if right == nil { 2690 s.zero(t, addr) 2691 } else { 2692 s.move(t, addr, right) 2693 } 2694 return 2695 } 2696 // Treat as a store. 2697 s.storeType(t, addr, right, skip, !left.IsAutoTmp()) 2698 } 2699 2700 // zeroVal returns the zero value for type t. 2701 func (s *state) zeroVal(t *types.Type) *ssa.Value { 2702 switch { 2703 case t.IsInteger(): 2704 switch t.Size() { 2705 case 1: 2706 return s.constInt8(t, 0) 2707 case 2: 2708 return s.constInt16(t, 0) 2709 case 4: 2710 return s.constInt32(t, 0) 2711 case 8: 2712 return s.constInt64(t, 0) 2713 default: 2714 s.Fatalf("bad sized integer type %v", t) 2715 } 2716 case t.IsFloat(): 2717 switch t.Size() { 2718 case 4: 2719 return s.constFloat32(t, 0) 2720 case 8: 2721 return s.constFloat64(t, 0) 2722 default: 2723 s.Fatalf("bad sized float type %v", t) 2724 } 2725 case t.IsComplex(): 2726 switch t.Size() { 2727 case 8: 2728 z := s.constFloat32(types.Types[TFLOAT32], 0) 2729 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2730 case 16: 2731 z := s.constFloat64(types.Types[TFLOAT64], 0) 2732 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2733 default: 2734 s.Fatalf("bad sized complex type %v", t) 2735 } 2736 2737 case t.IsString(): 2738 return s.constEmptyString(t) 2739 case t.IsPtrShaped(): 2740 return s.constNil(t) 2741 case t.IsBoolean(): 2742 return s.constBool(false) 2743 case t.IsInterface(): 2744 return s.constInterface(t) 2745 case t.IsSlice(): 2746 return s.constSlice(t) 2747 case t.IsStruct(): 2748 n := t.NumFields() 2749 v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t) 2750 for i := 0; i < n; i++ { 2751 v.AddArg(s.zeroVal(t.FieldType(i))) 2752 } 2753 return v 2754 case t.IsArray(): 2755 switch t.NumElem() { 2756 case 0: 2757 return s.entryNewValue0(ssa.OpArrayMake0, t) 2758 case 1: 2759 return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem())) 2760 } 2761 } 2762 s.Fatalf("zero for type %v not implemented", t) 2763 return nil 2764 } 2765 2766 type callKind int8 2767 2768 const ( 2769 callNormal callKind = iota 2770 callDefer 2771 callGo 2772 ) 2773 2774 type sfRtCallDef struct { 2775 rtfn *obj.LSym 2776 rtype types.EType 2777 } 2778 2779 var softFloatOps map[ssa.Op]sfRtCallDef 2780 2781 func softfloatInit() { 2782 // Some of these operations get transformed by sfcall. 2783 softFloatOps = map[ssa.Op]sfRtCallDef{ 2784 ssa.OpAdd32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32}, 2785 ssa.OpAdd64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64}, 2786 ssa.OpSub32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32}, 2787 ssa.OpSub64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64}, 2788 ssa.OpMul32F: sfRtCallDef{sysfunc("fmul32"), TFLOAT32}, 2789 ssa.OpMul64F: sfRtCallDef{sysfunc("fmul64"), TFLOAT64}, 2790 ssa.OpDiv32F: sfRtCallDef{sysfunc("fdiv32"), TFLOAT32}, 2791 ssa.OpDiv64F: sfRtCallDef{sysfunc("fdiv64"), TFLOAT64}, 2792 2793 ssa.OpEq64F: sfRtCallDef{sysfunc("feq64"), TBOOL}, 2794 ssa.OpEq32F: sfRtCallDef{sysfunc("feq32"), TBOOL}, 2795 ssa.OpNeq64F: sfRtCallDef{sysfunc("feq64"), TBOOL}, 2796 ssa.OpNeq32F: sfRtCallDef{sysfunc("feq32"), TBOOL}, 2797 ssa.OpLess64F: sfRtCallDef{sysfunc("fgt64"), TBOOL}, 2798 ssa.OpLess32F: sfRtCallDef{sysfunc("fgt32"), TBOOL}, 2799 ssa.OpGreater64F: sfRtCallDef{sysfunc("fgt64"), TBOOL}, 2800 ssa.OpGreater32F: sfRtCallDef{sysfunc("fgt32"), TBOOL}, 2801 ssa.OpLeq64F: sfRtCallDef{sysfunc("fge64"), TBOOL}, 2802 ssa.OpLeq32F: sfRtCallDef{sysfunc("fge32"), TBOOL}, 2803 ssa.OpGeq64F: sfRtCallDef{sysfunc("fge64"), TBOOL}, 2804 ssa.OpGeq32F: sfRtCallDef{sysfunc("fge32"), TBOOL}, 2805 2806 ssa.OpCvt32to32F: sfRtCallDef{sysfunc("fint32to32"), TFLOAT32}, 2807 ssa.OpCvt32Fto32: sfRtCallDef{sysfunc("f32toint32"), TINT32}, 2808 ssa.OpCvt64to32F: sfRtCallDef{sysfunc("fint64to32"), TFLOAT32}, 2809 ssa.OpCvt32Fto64: sfRtCallDef{sysfunc("f32toint64"), TINT64}, 2810 ssa.OpCvt64Uto32F: sfRtCallDef{sysfunc("fuint64to32"), TFLOAT32}, 2811 ssa.OpCvt32Fto64U: sfRtCallDef{sysfunc("f32touint64"), TUINT64}, 2812 ssa.OpCvt32to64F: sfRtCallDef{sysfunc("fint32to64"), TFLOAT64}, 2813 ssa.OpCvt64Fto32: sfRtCallDef{sysfunc("f64toint32"), TINT32}, 2814 ssa.OpCvt64to64F: sfRtCallDef{sysfunc("fint64to64"), TFLOAT64}, 2815 ssa.OpCvt64Fto64: sfRtCallDef{sysfunc("f64toint64"), TINT64}, 2816 ssa.OpCvt64Uto64F: sfRtCallDef{sysfunc("fuint64to64"), TFLOAT64}, 2817 ssa.OpCvt64Fto64U: sfRtCallDef{sysfunc("f64touint64"), TUINT64}, 2818 ssa.OpCvt32Fto64F: sfRtCallDef{sysfunc("f32to64"), TFLOAT64}, 2819 ssa.OpCvt64Fto32F: sfRtCallDef{sysfunc("f64to32"), TFLOAT32}, 2820 } 2821 } 2822 2823 // TODO: do not emit sfcall if operation can be optimized to constant in later 2824 // opt phase 2825 func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) { 2826 if callDef, ok := softFloatOps[op]; ok { 2827 switch op { 2828 case ssa.OpLess32F, 2829 ssa.OpLess64F, 2830 ssa.OpLeq32F, 2831 ssa.OpLeq64F: 2832 args[0], args[1] = args[1], args[0] 2833 case ssa.OpSub32F, 2834 ssa.OpSub64F: 2835 args[1] = s.newValue1(s.ssaOp(OMINUS, types.Types[callDef.rtype]), args[1].Type, args[1]) 2836 } 2837 2838 result := s.rtcall(callDef.rtfn, true, []*types.Type{types.Types[callDef.rtype]}, args...)[0] 2839 if op == ssa.OpNeq32F || op == ssa.OpNeq64F { 2840 result = s.newValue1(ssa.OpNot, result.Type, result) 2841 } 2842 return result, true 2843 } 2844 return nil, false 2845 } 2846 2847 var intrinsics map[intrinsicKey]intrinsicBuilder 2848 2849 // An intrinsicBuilder converts a call node n into an ssa value that 2850 // implements that call as an intrinsic. args is a list of arguments to the func. 2851 type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value 2852 2853 type intrinsicKey struct { 2854 arch *sys.Arch 2855 pkg string 2856 fn string 2857 } 2858 2859 func init() { 2860 intrinsics = map[intrinsicKey]intrinsicBuilder{} 2861 2862 var all []*sys.Arch 2863 var p4 []*sys.Arch 2864 var p8 []*sys.Arch 2865 var lwatomics []*sys.Arch 2866 for _, a := range sys.Archs { 2867 all = append(all, a) 2868 if a.PtrSize == 4 { 2869 p4 = append(p4, a) 2870 } else { 2871 p8 = append(p8, a) 2872 } 2873 if a.Family != sys.PPC64 { 2874 lwatomics = append(lwatomics, a) 2875 } 2876 } 2877 2878 // add adds the intrinsic b for pkg.fn for the given list of architectures. 2879 add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) { 2880 for _, a := range archs { 2881 intrinsics[intrinsicKey{a, pkg, fn}] = b 2882 } 2883 } 2884 // addF does the same as add but operates on architecture families. 2885 addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) { 2886 m := 0 2887 for _, f := range archFamilies { 2888 if f >= 32 { 2889 panic("too many architecture families") 2890 } 2891 m |= 1 << uint(f) 2892 } 2893 for _, a := range all { 2894 if m>>uint(a.Family)&1 != 0 { 2895 intrinsics[intrinsicKey{a, pkg, fn}] = b 2896 } 2897 } 2898 } 2899 // alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists. 2900 alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) { 2901 for _, a := range archs { 2902 if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok { 2903 intrinsics[intrinsicKey{a, pkg, fn}] = b 2904 } 2905 } 2906 } 2907 2908 /******** runtime ********/ 2909 if !instrumenting { 2910 add("runtime", "slicebytetostringtmp", 2911 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2912 // Compiler frontend optimizations emit OARRAYBYTESTRTMP nodes 2913 // for the backend instead of slicebytetostringtmp calls 2914 // when not instrumenting. 2915 slice := args[0] 2916 ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice) 2917 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) 2918 return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) 2919 }, 2920 all...) 2921 } 2922 addF("runtime/internal/math", "MulUintptr", 2923 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2924 if s.config.PtrSize == 4 { 2925 return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1]) 2926 } 2927 return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1]) 2928 }, 2929 sys.AMD64, sys.I386) 2930 add("runtime", "KeepAlive", 2931 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2932 data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0]) 2933 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem()) 2934 return nil 2935 }, 2936 all...) 2937 add("runtime", "getclosureptr", 2938 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2939 return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr) 2940 }, 2941 all...) 2942 2943 add("runtime", "getcallerpc", 2944 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2945 return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr) 2946 }, 2947 all...) 2948 2949 add("runtime", "getcallersp", 2950 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2951 return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr) 2952 }, 2953 all...) 2954 2955 /******** runtime/internal/sys ********/ 2956 addF("runtime/internal/sys", "Ctz32", 2957 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2958 return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0]) 2959 }, 2960 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 2961 addF("runtime/internal/sys", "Ctz64", 2962 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2963 return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0]) 2964 }, 2965 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 2966 addF("runtime/internal/sys", "Bswap32", 2967 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2968 return s.newValue1(ssa.OpBswap32, types.Types[TUINT32], args[0]) 2969 }, 2970 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X) 2971 addF("runtime/internal/sys", "Bswap64", 2972 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2973 return s.newValue1(ssa.OpBswap64, types.Types[TUINT64], args[0]) 2974 }, 2975 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X) 2976 2977 /******** runtime/internal/atomic ********/ 2978 addF("runtime/internal/atomic", "Load", 2979 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2980 v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem()) 2981 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2982 return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) 2983 }, 2984 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) 2985 addF("runtime/internal/atomic", "Load64", 2986 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2987 v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem()) 2988 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2989 return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) 2990 }, 2991 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) 2992 addF("runtime/internal/atomic", "LoadAcq", 2993 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2994 v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem()) 2995 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2996 return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) 2997 }, 2998 sys.PPC64) 2999 addF("runtime/internal/atomic", "Loadp", 3000 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3001 v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem()) 3002 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 3003 return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v) 3004 }, 3005 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) 3006 3007 addF("runtime/internal/atomic", "Store", 3008 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3009 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem()) 3010 return nil 3011 }, 3012 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) 3013 addF("runtime/internal/atomic", "Store64", 3014 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3015 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem()) 3016 return nil 3017 }, 3018 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) 3019 addF("runtime/internal/atomic", "StorepNoWB", 3020 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3021 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem()) 3022 return nil 3023 }, 3024 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64) 3025 addF("runtime/internal/atomic", "StoreRel", 3026 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3027 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem()) 3028 return nil 3029 }, 3030 sys.PPC64) 3031 3032 addF("runtime/internal/atomic", "Xchg", 3033 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3034 v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem()) 3035 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 3036 return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) 3037 }, 3038 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) 3039 addF("runtime/internal/atomic", "Xchg64", 3040 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3041 v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem()) 3042 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 3043 return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) 3044 }, 3045 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) 3046 3047 addF("runtime/internal/atomic", "Xadd", 3048 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3049 v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem()) 3050 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 3051 return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) 3052 }, 3053 sys.AMD64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) 3054 addF("runtime/internal/atomic", "Xadd64", 3055 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3056 v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem()) 3057 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 3058 return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) 3059 }, 3060 sys.AMD64, sys.S390X, sys.MIPS64, sys.PPC64) 3061 3062 makeXaddARM64 := func(op0 ssa.Op, op1 ssa.Op, ty types.EType) func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3063 return func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3064 // Target Atomic feature is identified by dynamic detection 3065 addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), arm64SupportAtomics, s.sb) 3066 v := s.load(types.Types[TBOOL], addr) 3067 b := s.endBlock() 3068 b.Kind = ssa.BlockIf 3069 b.SetControl(v) 3070 bTrue := s.f.NewBlock(ssa.BlockPlain) 3071 bFalse := s.f.NewBlock(ssa.BlockPlain) 3072 bEnd := s.f.NewBlock(ssa.BlockPlain) 3073 b.AddEdgeTo(bTrue) 3074 b.AddEdgeTo(bFalse) 3075 b.Likely = ssa.BranchUnlikely // most machines don't have Atomics nowadays 3076 3077 // We have atomic instructions - use it directly. 3078 s.startBlock(bTrue) 3079 v0 := s.newValue3(op1, types.NewTuple(types.Types[ty], types.TypeMem), args[0], args[1], s.mem()) 3080 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v0) 3081 s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[ty], v0) 3082 s.endBlock().AddEdgeTo(bEnd) 3083 3084 // Use original instruction sequence. 3085 s.startBlock(bFalse) 3086 v1 := s.newValue3(op0, types.NewTuple(types.Types[ty], types.TypeMem), args[0], args[1], s.mem()) 3087 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v1) 3088 s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[ty], v1) 3089 s.endBlock().AddEdgeTo(bEnd) 3090 3091 // Merge results. 3092 s.startBlock(bEnd) 3093 return s.variable(n, types.Types[ty]) 3094 } 3095 } 3096 3097 addF("runtime/internal/atomic", "Xadd", 3098 makeXaddARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, TUINT32), 3099 sys.ARM64) 3100 addF("runtime/internal/atomic", "Xadd64", 3101 makeXaddARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, TUINT64), 3102 sys.ARM64) 3103 3104 addF("runtime/internal/atomic", "Cas", 3105 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3106 v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) 3107 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 3108 return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) 3109 }, 3110 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) 3111 addF("runtime/internal/atomic", "Cas64", 3112 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3113 v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) 3114 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 3115 return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) 3116 }, 3117 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) 3118 addF("runtime/internal/atomic", "CasRel", 3119 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3120 v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) 3121 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 3122 return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) 3123 }, 3124 sys.PPC64) 3125 3126 addF("runtime/internal/atomic", "And8", 3127 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3128 s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem()) 3129 return nil 3130 }, 3131 sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64) 3132 addF("runtime/internal/atomic", "Or8", 3133 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3134 s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem()) 3135 return nil 3136 }, 3137 sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64) 3138 3139 alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...) 3140 alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...) 3141 alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...) 3142 alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...) 3143 alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...) 3144 alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...) 3145 alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...) 3146 alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...) 3147 alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...) 3148 alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...) 3149 alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...) 3150 alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...) 3151 alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...) 3152 alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...) 3153 alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...) 3154 alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...) 3155 alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...) 3156 alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...) 3157 alias("runtime/internal/atomic", "CasRel", "runtime/internal/atomic", "Cas", lwatomics...) 3158 3159 alias("runtime/internal/sys", "Ctz8", "math/bits", "TrailingZeros8", all...) 3160 3161 /******** math ********/ 3162 addF("math", "Sqrt", 3163 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3164 return s.newValue1(ssa.OpSqrt, types.Types[TFLOAT64], args[0]) 3165 }, 3166 sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.S390X) 3167 addF("math", "Trunc", 3168 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3169 return s.newValue1(ssa.OpTrunc, types.Types[TFLOAT64], args[0]) 3170 }, 3171 sys.ARM64, sys.PPC64, sys.S390X) 3172 addF("math", "Ceil", 3173 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3174 return s.newValue1(ssa.OpCeil, types.Types[TFLOAT64], args[0]) 3175 }, 3176 sys.ARM64, sys.PPC64, sys.S390X) 3177 addF("math", "Floor", 3178 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3179 return s.newValue1(ssa.OpFloor, types.Types[TFLOAT64], args[0]) 3180 }, 3181 sys.ARM64, sys.PPC64, sys.S390X) 3182 addF("math", "Round", 3183 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3184 return s.newValue1(ssa.OpRound, types.Types[TFLOAT64], args[0]) 3185 }, 3186 sys.ARM64, sys.PPC64, sys.S390X) 3187 addF("math", "RoundToEven", 3188 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3189 return s.newValue1(ssa.OpRoundToEven, types.Types[TFLOAT64], args[0]) 3190 }, 3191 sys.ARM64, sys.S390X) 3192 addF("math", "Abs", 3193 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3194 return s.newValue1(ssa.OpAbs, types.Types[TFLOAT64], args[0]) 3195 }, 3196 sys.ARM64, sys.PPC64) 3197 addF("math", "Copysign", 3198 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3199 return s.newValue2(ssa.OpCopysign, types.Types[TFLOAT64], args[0], args[1]) 3200 }, 3201 sys.PPC64) 3202 3203 makeRoundAMD64 := func(op ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3204 return func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3205 addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), supportSSE41, s.sb) 3206 v := s.load(types.Types[TBOOL], addr) 3207 b := s.endBlock() 3208 b.Kind = ssa.BlockIf 3209 b.SetControl(v) 3210 bTrue := s.f.NewBlock(ssa.BlockPlain) 3211 bFalse := s.f.NewBlock(ssa.BlockPlain) 3212 bEnd := s.f.NewBlock(ssa.BlockPlain) 3213 b.AddEdgeTo(bTrue) 3214 b.AddEdgeTo(bFalse) 3215 b.Likely = ssa.BranchLikely // most machines have sse4.1 nowadays 3216 3217 // We have the intrinsic - use it directly. 3218 s.startBlock(bTrue) 3219 s.vars[n] = s.newValue1(op, types.Types[TFLOAT64], args[0]) 3220 s.endBlock().AddEdgeTo(bEnd) 3221 3222 // Call the pure Go version. 3223 s.startBlock(bFalse) 3224 a := s.call(n, callNormal) 3225 s.vars[n] = s.load(types.Types[TFLOAT64], a) 3226 s.endBlock().AddEdgeTo(bEnd) 3227 3228 // Merge results. 3229 s.startBlock(bEnd) 3230 return s.variable(n, types.Types[TFLOAT64]) 3231 } 3232 } 3233 addF("math", "RoundToEven", 3234 makeRoundAMD64(ssa.OpRoundToEven), 3235 sys.AMD64) 3236 addF("math", "Floor", 3237 makeRoundAMD64(ssa.OpFloor), 3238 sys.AMD64) 3239 addF("math", "Ceil", 3240 makeRoundAMD64(ssa.OpCeil), 3241 sys.AMD64) 3242 addF("math", "Trunc", 3243 makeRoundAMD64(ssa.OpTrunc), 3244 sys.AMD64) 3245 3246 /******** math/bits ********/ 3247 addF("math/bits", "TrailingZeros64", 3248 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3249 return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0]) 3250 }, 3251 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3252 addF("math/bits", "TrailingZeros32", 3253 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3254 return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0]) 3255 }, 3256 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3257 addF("math/bits", "TrailingZeros16", 3258 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3259 x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0]) 3260 c := s.constInt32(types.Types[TUINT32], 1<<16) 3261 y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c) 3262 return s.newValue1(ssa.OpCtz32, types.Types[TINT], y) 3263 }, 3264 sys.ARM, sys.MIPS) 3265 addF("math/bits", "TrailingZeros16", 3266 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3267 return s.newValue1(ssa.OpCtz16, types.Types[TINT], args[0]) 3268 }, 3269 sys.AMD64) 3270 addF("math/bits", "TrailingZeros16", 3271 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3272 x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0]) 3273 c := s.constInt64(types.Types[TUINT64], 1<<16) 3274 y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c) 3275 return s.newValue1(ssa.OpCtz64, types.Types[TINT], y) 3276 }, 3277 sys.ARM64, sys.S390X, sys.PPC64) 3278 addF("math/bits", "TrailingZeros8", 3279 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3280 x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0]) 3281 c := s.constInt32(types.Types[TUINT32], 1<<8) 3282 y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c) 3283 return s.newValue1(ssa.OpCtz32, types.Types[TINT], y) 3284 }, 3285 sys.ARM, sys.MIPS) 3286 addF("math/bits", "TrailingZeros8", 3287 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3288 return s.newValue1(ssa.OpCtz8, types.Types[TINT], args[0]) 3289 }, 3290 sys.AMD64) 3291 addF("math/bits", "TrailingZeros8", 3292 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3293 x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0]) 3294 c := s.constInt64(types.Types[TUINT64], 1<<8) 3295 y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c) 3296 return s.newValue1(ssa.OpCtz64, types.Types[TINT], y) 3297 }, 3298 sys.ARM64, sys.S390X) 3299 alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...) 3300 alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...) 3301 // ReverseBytes inlines correctly, no need to intrinsify it. 3302 // ReverseBytes16 lowers to a rotate, no need for anything special here. 3303 addF("math/bits", "Len64", 3304 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3305 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0]) 3306 }, 3307 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3308 addF("math/bits", "Len32", 3309 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3310 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0]) 3311 }, 3312 sys.AMD64) 3313 addF("math/bits", "Len32", 3314 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3315 if s.config.PtrSize == 4 { 3316 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0]) 3317 } 3318 x := s.newValue1(ssa.OpZeroExt32to64, types.Types[TUINT64], args[0]) 3319 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) 3320 }, 3321 sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3322 addF("math/bits", "Len16", 3323 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3324 if s.config.PtrSize == 4 { 3325 x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0]) 3326 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x) 3327 } 3328 x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0]) 3329 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) 3330 }, 3331 sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3332 addF("math/bits", "Len16", 3333 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3334 return s.newValue1(ssa.OpBitLen16, types.Types[TINT], args[0]) 3335 }, 3336 sys.AMD64) 3337 addF("math/bits", "Len8", 3338 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3339 if s.config.PtrSize == 4 { 3340 x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0]) 3341 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x) 3342 } 3343 x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0]) 3344 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) 3345 }, 3346 sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3347 addF("math/bits", "Len8", 3348 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3349 return s.newValue1(ssa.OpBitLen8, types.Types[TINT], args[0]) 3350 }, 3351 sys.AMD64) 3352 addF("math/bits", "Len", 3353 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3354 if s.config.PtrSize == 4 { 3355 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0]) 3356 } 3357 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0]) 3358 }, 3359 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3360 // LeadingZeros is handled because it trivially calls Len. 3361 addF("math/bits", "Reverse64", 3362 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3363 return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0]) 3364 }, 3365 sys.ARM64) 3366 addF("math/bits", "Reverse32", 3367 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3368 return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0]) 3369 }, 3370 sys.ARM64) 3371 addF("math/bits", "Reverse16", 3372 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3373 return s.newValue1(ssa.OpBitRev16, types.Types[TINT], args[0]) 3374 }, 3375 sys.ARM64) 3376 addF("math/bits", "Reverse8", 3377 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3378 return s.newValue1(ssa.OpBitRev8, types.Types[TINT], args[0]) 3379 }, 3380 sys.ARM64) 3381 addF("math/bits", "Reverse", 3382 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3383 if s.config.PtrSize == 4 { 3384 return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0]) 3385 } 3386 return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0]) 3387 }, 3388 sys.ARM64) 3389 addF("math/bits", "RotateLeft8", 3390 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3391 return s.newValue2(ssa.OpRotateLeft8, types.Types[TUINT8], args[0], args[1]) 3392 }, 3393 sys.AMD64) 3394 addF("math/bits", "RotateLeft16", 3395 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3396 return s.newValue2(ssa.OpRotateLeft16, types.Types[TUINT16], args[0], args[1]) 3397 }, 3398 sys.AMD64) 3399 addF("math/bits", "RotateLeft32", 3400 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3401 return s.newValue2(ssa.OpRotateLeft32, types.Types[TUINT32], args[0], args[1]) 3402 }, 3403 sys.AMD64, sys.ARM64, sys.S390X) 3404 addF("math/bits", "RotateLeft64", 3405 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3406 return s.newValue2(ssa.OpRotateLeft64, types.Types[TUINT64], args[0], args[1]) 3407 }, 3408 sys.AMD64, sys.ARM64, sys.S390X) 3409 alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...) 3410 3411 makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3412 return func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3413 addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), supportPopcnt, s.sb) 3414 v := s.load(types.Types[TBOOL], addr) 3415 b := s.endBlock() 3416 b.Kind = ssa.BlockIf 3417 b.SetControl(v) 3418 bTrue := s.f.NewBlock(ssa.BlockPlain) 3419 bFalse := s.f.NewBlock(ssa.BlockPlain) 3420 bEnd := s.f.NewBlock(ssa.BlockPlain) 3421 b.AddEdgeTo(bTrue) 3422 b.AddEdgeTo(bFalse) 3423 b.Likely = ssa.BranchLikely // most machines have popcnt nowadays 3424 3425 // We have the intrinsic - use it directly. 3426 s.startBlock(bTrue) 3427 op := op64 3428 if s.config.PtrSize == 4 { 3429 op = op32 3430 } 3431 s.vars[n] = s.newValue1(op, types.Types[TINT], args[0]) 3432 s.endBlock().AddEdgeTo(bEnd) 3433 3434 // Call the pure Go version. 3435 s.startBlock(bFalse) 3436 a := s.call(n, callNormal) 3437 s.vars[n] = s.load(types.Types[TINT], a) 3438 s.endBlock().AddEdgeTo(bEnd) 3439 3440 // Merge results. 3441 s.startBlock(bEnd) 3442 return s.variable(n, types.Types[TINT]) 3443 } 3444 } 3445 addF("math/bits", "OnesCount64", 3446 makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64), 3447 sys.AMD64) 3448 addF("math/bits", "OnesCount64", 3449 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3450 return s.newValue1(ssa.OpPopCount64, types.Types[TINT], args[0]) 3451 }, 3452 sys.PPC64, sys.ARM64, sys.S390X) 3453 addF("math/bits", "OnesCount32", 3454 makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32), 3455 sys.AMD64) 3456 addF("math/bits", "OnesCount32", 3457 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3458 return s.newValue1(ssa.OpPopCount32, types.Types[TINT], args[0]) 3459 }, 3460 sys.PPC64, sys.ARM64, sys.S390X) 3461 addF("math/bits", "OnesCount16", 3462 makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16), 3463 sys.AMD64) 3464 addF("math/bits", "OnesCount16", 3465 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3466 return s.newValue1(ssa.OpPopCount16, types.Types[TINT], args[0]) 3467 }, 3468 sys.ARM64, sys.S390X, sys.PPC64) 3469 addF("math/bits", "OnesCount8", 3470 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3471 return s.newValue1(ssa.OpPopCount8, types.Types[TINT], args[0]) 3472 }, 3473 sys.S390X, sys.PPC64) 3474 addF("math/bits", "OnesCount", 3475 makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32), 3476 sys.AMD64) 3477 alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64) 3478 addF("math/bits", "Mul64", 3479 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3480 return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1]) 3481 }, 3482 sys.AMD64, sys.ARM64, sys.PPC64) 3483 3484 /******** sync/atomic ********/ 3485 3486 // Note: these are disabled by flag_race in findIntrinsic below. 3487 alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...) 3488 alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...) 3489 alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...) 3490 alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...) 3491 alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...) 3492 alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...) 3493 alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...) 3494 3495 alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...) 3496 alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...) 3497 // Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap. 3498 alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...) 3499 alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...) 3500 alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...) 3501 alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...) 3502 3503 alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...) 3504 alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...) 3505 alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...) 3506 alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...) 3507 alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...) 3508 alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...) 3509 3510 alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...) 3511 alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...) 3512 alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...) 3513 alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...) 3514 alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...) 3515 alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...) 3516 3517 alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...) 3518 alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...) 3519 alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...) 3520 alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...) 3521 alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...) 3522 alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...) 3523 3524 /******** math/big ********/ 3525 add("math/big", "mulWW", 3526 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3527 return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1]) 3528 }, 3529 sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64) 3530 add("math/big", "divWW", 3531 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3532 return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2]) 3533 }, 3534 sys.ArchAMD64) 3535 } 3536 3537 // findIntrinsic returns a function which builds the SSA equivalent of the 3538 // function identified by the symbol sym. If sym is not an intrinsic call, returns nil. 3539 func findIntrinsic(sym *types.Sym) intrinsicBuilder { 3540 if ssa.IntrinsicsDisable { 3541 return nil 3542 } 3543 if sym == nil || sym.Pkg == nil { 3544 return nil 3545 } 3546 pkg := sym.Pkg.Path 3547 if sym.Pkg == localpkg { 3548 pkg = myimportpath 3549 } 3550 if flag_race && pkg == "sync/atomic" { 3551 // The race detector needs to be able to intercept these calls. 3552 // We can't intrinsify them. 3553 return nil 3554 } 3555 // Skip intrinsifying math functions (which may contain hard-float 3556 // instructions) when soft-float 3557 if thearch.SoftFloat && pkg == "math" { 3558 return nil 3559 } 3560 3561 fn := sym.Name 3562 return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}] 3563 } 3564 3565 func isIntrinsicCall(n *Node) bool { 3566 if n == nil || n.Left == nil { 3567 return false 3568 } 3569 return findIntrinsic(n.Left.Sym) != nil 3570 } 3571 3572 // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation. 3573 func (s *state) intrinsicCall(n *Node) *ssa.Value { 3574 v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n)) 3575 if ssa.IntrinsicsDebug > 0 { 3576 x := v 3577 if x == nil { 3578 x = s.mem() 3579 } 3580 if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 { 3581 x = x.Args[0] 3582 } 3583 Warnl(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString()) 3584 } 3585 return v 3586 } 3587 3588 // intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them. 3589 func (s *state) intrinsicArgs(n *Node) []*ssa.Value { 3590 // Construct map of temps; see comments in s.call about the structure of n. 3591 temps := map[*Node]*ssa.Value{} 3592 for _, a := range n.List.Slice() { 3593 if a.Op != OAS { 3594 s.Fatalf("non-assignment as a temp function argument %v", a.Op) 3595 } 3596 l, r := a.Left, a.Right 3597 if l.Op != ONAME { 3598 s.Fatalf("non-ONAME temp function argument %v", a.Op) 3599 } 3600 // Evaluate and store to "temporary". 3601 // Walk ensures these temporaries are dead outside of n. 3602 temps[l] = s.expr(r) 3603 } 3604 args := make([]*ssa.Value, n.Rlist.Len()) 3605 for i, n := range n.Rlist.Slice() { 3606 // Store a value to an argument slot. 3607 if x, ok := temps[n]; ok { 3608 // This is a previously computed temporary. 3609 args[i] = x 3610 continue 3611 } 3612 // This is an explicit value; evaluate it. 3613 args[i] = s.expr(n) 3614 } 3615 return args 3616 } 3617 3618 // Calls the function n using the specified call type. 3619 // Returns the address of the return value (or nil if none). 3620 func (s *state) call(n *Node, k callKind) *ssa.Value { 3621 var sym *types.Sym // target symbol (if static) 3622 var closure *ssa.Value // ptr to closure to run (if dynamic) 3623 var codeptr *ssa.Value // ptr to target code (if dynamic) 3624 var rcvr *ssa.Value // receiver to set 3625 fn := n.Left 3626 switch n.Op { 3627 case OCALLFUNC: 3628 if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC { 3629 sym = fn.Sym 3630 break 3631 } 3632 closure = s.expr(fn) 3633 if thearch.LinkArch.Family == sys.Wasm { 3634 // TODO(neelance): On other architectures this should be eliminated by the optimization steps 3635 s.nilCheck(closure) 3636 } 3637 case OCALLMETH: 3638 if fn.Op != ODOTMETH { 3639 Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) 3640 } 3641 if k == callNormal { 3642 sym = fn.Sym 3643 break 3644 } 3645 // Make a name n2 for the function. 3646 // fn.Sym might be sync.(*Mutex).Unlock. 3647 // Make a PFUNC node out of that, then evaluate it. 3648 // We get back an SSA value representing &sync.(*Mutex).Unlock·f. 3649 // We can then pass that to defer or go. 3650 n2 := newnamel(fn.Pos, fn.Sym) 3651 n2.Name.Curfn = s.curfn 3652 n2.SetClass(PFUNC) 3653 n2.Pos = fn.Pos 3654 n2.Type = types.Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it. 3655 closure = s.expr(n2) 3656 // Note: receiver is already present in n.Rlist, so we don't 3657 // want to set it here. 3658 case OCALLINTER: 3659 if fn.Op != ODOTINTER { 3660 Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op) 3661 } 3662 i := s.expr(fn.Left) 3663 itab := s.newValue1(ssa.OpITab, types.Types[TUINTPTR], i) 3664 s.nilCheck(itab) 3665 itabidx := fn.Xoffset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab 3666 itab = s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab) 3667 if k == callNormal { 3668 codeptr = s.load(types.Types[TUINTPTR], itab) 3669 } else { 3670 closure = itab 3671 } 3672 rcvr = s.newValue1(ssa.OpIData, types.Types[TUINTPTR], i) 3673 } 3674 dowidth(fn.Type) 3675 stksize := fn.Type.ArgWidth() // includes receiver 3676 3677 // Run all assignments of temps. 3678 // The temps are introduced to avoid overwriting argument 3679 // slots when arguments themselves require function calls. 3680 s.stmtList(n.List) 3681 3682 // Store arguments to stack, including defer/go arguments and receiver for method calls. 3683 // These are written in SP-offset order. 3684 argStart := Ctxt.FixedFrameSize() 3685 // Defer/go args. 3686 if k != callNormal { 3687 // Write argsize and closure (args to newproc/deferproc). 3688 argsize := s.constInt32(types.Types[TUINT32], int32(stksize)) 3689 addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart) 3690 s.store(types.Types[TUINT32], addr, argsize) 3691 addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr)) 3692 s.store(types.Types[TUINTPTR], addr, closure) 3693 stksize += 2 * int64(Widthptr) 3694 argStart += 2 * int64(Widthptr) 3695 } 3696 3697 // Set receiver (for interface calls). 3698 if rcvr != nil { 3699 addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart) 3700 s.store(types.Types[TUINTPTR], addr, rcvr) 3701 } 3702 3703 // Write args. 3704 t := n.Left.Type 3705 args := n.Rlist.Slice() 3706 if n.Op == OCALLMETH { 3707 f := t.Recv() 3708 s.storeArg(args[0], f.Type, argStart+f.Offset) 3709 args = args[1:] 3710 } 3711 for i, n := range args { 3712 f := t.Params().Field(i) 3713 s.storeArg(n, f.Type, argStart+f.Offset) 3714 } 3715 3716 // call target 3717 var call *ssa.Value 3718 switch { 3719 case k == callDefer: 3720 call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, deferproc, s.mem()) 3721 case k == callGo: 3722 call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, newproc, s.mem()) 3723 case closure != nil: 3724 // rawLoad because loading the code pointer from a 3725 // closure is always safe, but IsSanitizerSafeAddr 3726 // can't always figure that out currently, and it's 3727 // critical that we not clobber any arguments already 3728 // stored onto the stack. 3729 codeptr = s.rawLoad(types.Types[TUINTPTR], closure) 3730 call = s.newValue3(ssa.OpClosureCall, types.TypeMem, codeptr, closure, s.mem()) 3731 case codeptr != nil: 3732 call = s.newValue2(ssa.OpInterCall, types.TypeMem, codeptr, s.mem()) 3733 case sym != nil: 3734 call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, sym.Linksym(), s.mem()) 3735 default: 3736 Fatalf("bad call type %v %v", n.Op, n) 3737 } 3738 call.AuxInt = stksize // Call operations carry the argsize of the callee along with them 3739 s.vars[&memVar] = call 3740 3741 // Finish block for defers 3742 if k == callDefer { 3743 b := s.endBlock() 3744 b.Kind = ssa.BlockDefer 3745 b.SetControl(call) 3746 bNext := s.f.NewBlock(ssa.BlockPlain) 3747 b.AddEdgeTo(bNext) 3748 // Add recover edge to exit code. 3749 r := s.f.NewBlock(ssa.BlockPlain) 3750 s.startBlock(r) 3751 s.exit() 3752 b.AddEdgeTo(r) 3753 b.Likely = ssa.BranchLikely 3754 s.startBlock(bNext) 3755 } 3756 3757 res := n.Left.Type.Results() 3758 if res.NumFields() == 0 || k != callNormal { 3759 // call has no return value. Continue with the next statement. 3760 return nil 3761 } 3762 fp := res.Field(0) 3763 return s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize()) 3764 } 3765 3766 // etypesign returns the signed-ness of e, for integer/pointer etypes. 3767 // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer. 3768 func etypesign(e types.EType) int8 { 3769 switch e { 3770 case TINT8, TINT16, TINT32, TINT64, TINT: 3771 return -1 3772 case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR: 3773 return +1 3774 } 3775 return 0 3776 } 3777 3778 // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result. 3779 // The value that the returned Value represents is guaranteed to be non-nil. 3780 // If bounded is true then this address does not require a nil check for its operand 3781 // even if that would otherwise be implied. 3782 func (s *state) addr(n *Node, bounded bool) *ssa.Value { 3783 t := types.NewPtr(n.Type) 3784 switch n.Op { 3785 case ONAME: 3786 switch n.Class() { 3787 case PEXTERN: 3788 // global variable 3789 v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym.Linksym(), s.sb) 3790 // TODO: Make OpAddr use AuxInt as well as Aux. 3791 if n.Xoffset != 0 { 3792 v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v) 3793 } 3794 return v 3795 case PPARAM: 3796 // parameter slot 3797 v := s.decladdrs[n] 3798 if v != nil { 3799 return v 3800 } 3801 if n == nodfp { 3802 // Special arg that points to the frame pointer (Used by ORECOVER). 3803 return s.entryNewValue2A(ssa.OpLocalAddr, t, n, s.sp, s.startmem) 3804 } 3805 s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs) 3806 return nil 3807 case PAUTO: 3808 return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !n.IsAutoTmp()) 3809 3810 case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early. 3811 // ensure that we reuse symbols for out parameters so 3812 // that cse works on their addresses 3813 return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true) 3814 default: 3815 s.Fatalf("variable address class %v not implemented", n.Class()) 3816 return nil 3817 } 3818 case OINDREGSP: 3819 // indirect off REGSP 3820 // used for storing/loading arguments/returns to/from callees 3821 return s.constOffPtrSP(t, n.Xoffset) 3822 case OINDEX: 3823 if n.Left.Type.IsSlice() { 3824 a := s.expr(n.Left) 3825 i := s.expr(n.Right) 3826 i = s.extendIndex(i, panicindex) 3827 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], a) 3828 if !n.Bounded() { 3829 s.boundsCheck(i, len) 3830 } 3831 p := s.newValue1(ssa.OpSlicePtr, t, a) 3832 return s.newValue2(ssa.OpPtrIndex, t, p, i) 3833 } else { // array 3834 a := s.addr(n.Left, bounded) 3835 i := s.expr(n.Right) 3836 i = s.extendIndex(i, panicindex) 3837 len := s.constInt(types.Types[TINT], n.Left.Type.NumElem()) 3838 if !n.Bounded() { 3839 s.boundsCheck(i, len) 3840 } 3841 return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left.Type.Elem()), a, i) 3842 } 3843 case OIND: 3844 return s.exprPtr(n.Left, bounded, n.Pos) 3845 case ODOT: 3846 p := s.addr(n.Left, bounded) 3847 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p) 3848 case ODOTPTR: 3849 p := s.exprPtr(n.Left, bounded, n.Pos) 3850 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p) 3851 case OCLOSUREVAR: 3852 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, 3853 s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)) 3854 case OCONVNOP: 3855 addr := s.addr(n.Left, bounded) 3856 return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type 3857 case OCALLFUNC, OCALLINTER, OCALLMETH: 3858 return s.call(n, callNormal) 3859 case ODOTTYPE: 3860 v, _ := s.dottype(n, false) 3861 if v.Op != ssa.OpLoad { 3862 s.Fatalf("dottype of non-load") 3863 } 3864 if v.Args[1] != s.mem() { 3865 s.Fatalf("memory no longer live from dottype load") 3866 } 3867 return v.Args[0] 3868 default: 3869 s.Fatalf("unhandled addr %v", n.Op) 3870 return nil 3871 } 3872 } 3873 3874 // canSSA reports whether n is SSA-able. 3875 // n must be an ONAME (or an ODOT sequence with an ONAME base). 3876 func (s *state) canSSA(n *Node) bool { 3877 if Debug['N'] != 0 { 3878 return false 3879 } 3880 for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) { 3881 n = n.Left 3882 } 3883 if n.Op != ONAME { 3884 return false 3885 } 3886 if n.Addrtaken() { 3887 return false 3888 } 3889 if n.isParamHeapCopy() { 3890 return false 3891 } 3892 if n.Class() == PAUTOHEAP { 3893 Fatalf("canSSA of PAUTOHEAP %v", n) 3894 } 3895 switch n.Class() { 3896 case PEXTERN: 3897 return false 3898 case PPARAMOUT: 3899 if s.hasdefer { 3900 // TODO: handle this case? Named return values must be 3901 // in memory so that the deferred function can see them. 3902 // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false } 3903 // Or maybe not, see issue 18860. Even unnamed return values 3904 // must be written back so if a defer recovers, the caller can see them. 3905 return false 3906 } 3907 if s.cgoUnsafeArgs { 3908 // Cgo effectively takes the address of all result args, 3909 // but the compiler can't see that. 3910 return false 3911 } 3912 } 3913 if n.Class() == PPARAM && n.Sym != nil && n.Sym.Name == ".this" { 3914 // wrappers generated by genwrapper need to update 3915 // the .this pointer in place. 3916 // TODO: treat as a PPARMOUT? 3917 return false 3918 } 3919 return canSSAType(n.Type) 3920 // TODO: try to make more variables SSAable? 3921 } 3922 3923 // canSSA reports whether variables of type t are SSA-able. 3924 func canSSAType(t *types.Type) bool { 3925 dowidth(t) 3926 if t.Width > int64(4*Widthptr) { 3927 // 4*Widthptr is an arbitrary constant. We want it 3928 // to be at least 3*Widthptr so slices can be registerized. 3929 // Too big and we'll introduce too much register pressure. 3930 return false 3931 } 3932 switch t.Etype { 3933 case TARRAY: 3934 // We can't do larger arrays because dynamic indexing is 3935 // not supported on SSA variables. 3936 // TODO: allow if all indexes are constant. 3937 if t.NumElem() <= 1 { 3938 return canSSAType(t.Elem()) 3939 } 3940 return false 3941 case TSTRUCT: 3942 if t.NumFields() > ssa.MaxStruct { 3943 return false 3944 } 3945 for _, t1 := range t.Fields().Slice() { 3946 if !canSSAType(t1.Type) { 3947 return false 3948 } 3949 } 3950 return true 3951 default: 3952 return true 3953 } 3954 } 3955 3956 // exprPtr evaluates n to a pointer and nil-checks it. 3957 func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value { 3958 p := s.expr(n) 3959 if bounded || n.NonNil() { 3960 if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 { 3961 s.f.Warnl(lineno, "removed nil check") 3962 } 3963 return p 3964 } 3965 s.nilCheck(p) 3966 return p 3967 } 3968 3969 // nilCheck generates nil pointer checking code. 3970 // Used only for automatically inserted nil checks, 3971 // not for user code like 'x != nil'. 3972 func (s *state) nilCheck(ptr *ssa.Value) { 3973 if disable_checknil != 0 || s.curfn.Func.NilCheckDisabled() { 3974 return 3975 } 3976 s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem()) 3977 } 3978 3979 // boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not. 3980 // Starts a new block on return. 3981 // idx is already converted to full int width. 3982 func (s *state) boundsCheck(idx, len *ssa.Value) { 3983 if Debug['B'] != 0 { 3984 return 3985 } 3986 3987 // bounds check 3988 cmp := s.newValue2(ssa.OpIsInBounds, types.Types[TBOOL], idx, len) 3989 s.check(cmp, panicindex) 3990 } 3991 3992 // sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not. 3993 // Starts a new block on return. 3994 // idx and len are already converted to full int width. 3995 func (s *state) sliceBoundsCheck(idx, len *ssa.Value) { 3996 if Debug['B'] != 0 { 3997 return 3998 } 3999 4000 // bounds check 4001 cmp := s.newValue2(ssa.OpIsSliceInBounds, types.Types[TBOOL], idx, len) 4002 s.check(cmp, panicslice) 4003 } 4004 4005 // If cmp (a bool) is false, panic using the given function. 4006 func (s *state) check(cmp *ssa.Value, fn *obj.LSym) { 4007 b := s.endBlock() 4008 b.Kind = ssa.BlockIf 4009 b.SetControl(cmp) 4010 b.Likely = ssa.BranchLikely 4011 bNext := s.f.NewBlock(ssa.BlockPlain) 4012 line := s.peekPos() 4013 pos := Ctxt.PosTable.Pos(line) 4014 fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()} 4015 bPanic := s.panics[fl] 4016 if bPanic == nil { 4017 bPanic = s.f.NewBlock(ssa.BlockPlain) 4018 s.panics[fl] = bPanic 4019 s.startBlock(bPanic) 4020 // The panic call takes/returns memory to ensure that the right 4021 // memory state is observed if the panic happens. 4022 s.rtcall(fn, false, nil) 4023 } 4024 b.AddEdgeTo(bNext) 4025 b.AddEdgeTo(bPanic) 4026 s.startBlock(bNext) 4027 } 4028 4029 func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value { 4030 needcheck := true 4031 switch b.Op { 4032 case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64: 4033 if b.AuxInt != 0 { 4034 needcheck = false 4035 } 4036 } 4037 if needcheck { 4038 // do a size-appropriate check for zero 4039 cmp := s.newValue2(s.ssaOp(ONE, n.Type), types.Types[TBOOL], b, s.zeroVal(n.Type)) 4040 s.check(cmp, panicdivide) 4041 } 4042 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 4043 } 4044 4045 // rtcall issues a call to the given runtime function fn with the listed args. 4046 // Returns a slice of results of the given result types. 4047 // The call is added to the end of the current block. 4048 // If returns is false, the block is marked as an exit block. 4049 func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value { 4050 // Write args to the stack 4051 off := Ctxt.FixedFrameSize() 4052 for _, arg := range args { 4053 t := arg.Type 4054 off = Rnd(off, t.Alignment()) 4055 ptr := s.constOffPtrSP(t.PtrTo(), off) 4056 size := t.Size() 4057 s.store(t, ptr, arg) 4058 off += size 4059 } 4060 off = Rnd(off, int64(Widthreg)) 4061 4062 // Issue call 4063 call := s.newValue1A(ssa.OpStaticCall, types.TypeMem, fn, s.mem()) 4064 s.vars[&memVar] = call 4065 4066 if !returns { 4067 // Finish block 4068 b := s.endBlock() 4069 b.Kind = ssa.BlockExit 4070 b.SetControl(call) 4071 call.AuxInt = off - Ctxt.FixedFrameSize() 4072 if len(results) > 0 { 4073 Fatalf("panic call can't have results") 4074 } 4075 return nil 4076 } 4077 4078 // Load results 4079 res := make([]*ssa.Value, len(results)) 4080 for i, t := range results { 4081 off = Rnd(off, t.Alignment()) 4082 ptr := s.constOffPtrSP(types.NewPtr(t), off) 4083 res[i] = s.load(t, ptr) 4084 off += t.Size() 4085 } 4086 off = Rnd(off, int64(Widthptr)) 4087 4088 // Remember how much callee stack space we needed. 4089 call.AuxInt = off 4090 4091 return res 4092 } 4093 4094 // do *left = right for type t. 4095 func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, leftIsStmt bool) { 4096 s.instrument(t, left, true) 4097 4098 if skip == 0 && (!types.Haspointers(t) || ssa.IsStackAddr(left)) { 4099 // Known to not have write barrier. Store the whole type. 4100 s.vars[&memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt) 4101 return 4102 } 4103 4104 // store scalar fields first, so write barrier stores for 4105 // pointer fields can be grouped together, and scalar values 4106 // don't need to be live across the write barrier call. 4107 // TODO: if the writebarrier pass knows how to reorder stores, 4108 // we can do a single store here as long as skip==0. 4109 s.storeTypeScalars(t, left, right, skip) 4110 if skip&skipPtr == 0 && types.Haspointers(t) { 4111 s.storeTypePtrs(t, left, right) 4112 } 4113 } 4114 4115 // do *left = right for all scalar (non-pointer) parts of t. 4116 func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) { 4117 switch { 4118 case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex(): 4119 s.store(t, left, right) 4120 case t.IsPtrShaped(): 4121 // no scalar fields. 4122 case t.IsString(): 4123 if skip&skipLen != 0 { 4124 return 4125 } 4126 len := s.newValue1(ssa.OpStringLen, types.Types[TINT], right) 4127 lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left) 4128 s.store(types.Types[TINT], lenAddr, len) 4129 case t.IsSlice(): 4130 if skip&skipLen == 0 { 4131 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], right) 4132 lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left) 4133 s.store(types.Types[TINT], lenAddr, len) 4134 } 4135 if skip&skipCap == 0 { 4136 cap := s.newValue1(ssa.OpSliceCap, types.Types[TINT], right) 4137 capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left) 4138 s.store(types.Types[TINT], capAddr, cap) 4139 } 4140 case t.IsInterface(): 4141 // itab field doesn't need a write barrier (even though it is a pointer). 4142 itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right) 4143 s.store(types.Types[TUINTPTR], left, itab) 4144 case t.IsStruct(): 4145 n := t.NumFields() 4146 for i := 0; i < n; i++ { 4147 ft := t.FieldType(i) 4148 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 4149 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 4150 s.storeTypeScalars(ft, addr, val, 0) 4151 } 4152 case t.IsArray() && t.NumElem() == 0: 4153 // nothing 4154 case t.IsArray() && t.NumElem() == 1: 4155 s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0) 4156 default: 4157 s.Fatalf("bad write barrier type %v", t) 4158 } 4159 } 4160 4161 // do *left = right for all pointer parts of t. 4162 func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) { 4163 switch { 4164 case t.IsPtrShaped(): 4165 s.store(t, left, right) 4166 case t.IsString(): 4167 ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right) 4168 s.store(s.f.Config.Types.BytePtr, left, ptr) 4169 case t.IsSlice(): 4170 elType := types.NewPtr(t.Elem()) 4171 ptr := s.newValue1(ssa.OpSlicePtr, elType, right) 4172 s.store(elType, left, ptr) 4173 case t.IsInterface(): 4174 // itab field is treated as a scalar. 4175 idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right) 4176 idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left) 4177 s.store(s.f.Config.Types.BytePtr, idataAddr, idata) 4178 case t.IsStruct(): 4179 n := t.NumFields() 4180 for i := 0; i < n; i++ { 4181 ft := t.FieldType(i) 4182 if !types.Haspointers(ft) { 4183 continue 4184 } 4185 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 4186 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 4187 s.storeTypePtrs(ft, addr, val) 4188 } 4189 case t.IsArray() && t.NumElem() == 0: 4190 // nothing 4191 case t.IsArray() && t.NumElem() == 1: 4192 s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right)) 4193 default: 4194 s.Fatalf("bad write barrier type %v", t) 4195 } 4196 } 4197 4198 func (s *state) storeArg(n *Node, t *types.Type, off int64) { 4199 pt := types.NewPtr(t) 4200 sp := s.constOffPtrSP(pt, off) 4201 4202 if !canSSAType(t) { 4203 a := s.addr(n, false) 4204 s.move(t, sp, a) 4205 return 4206 } 4207 4208 a := s.expr(n) 4209 s.storeType(t, sp, a, 0, false) 4210 } 4211 4212 // slice computes the slice v[i:j:k] and returns ptr, len, and cap of result. 4213 // i,j,k may be nil, in which case they are set to their default value. 4214 // t is a slice, ptr to array, or string type. 4215 func (s *state) slice(t *types.Type, v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) { 4216 var elemtype *types.Type 4217 var ptrtype *types.Type 4218 var ptr *ssa.Value 4219 var len *ssa.Value 4220 var cap *ssa.Value 4221 zero := s.constInt(types.Types[TINT], 0) 4222 switch { 4223 case t.IsSlice(): 4224 elemtype = t.Elem() 4225 ptrtype = types.NewPtr(elemtype) 4226 ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v) 4227 len = s.newValue1(ssa.OpSliceLen, types.Types[TINT], v) 4228 cap = s.newValue1(ssa.OpSliceCap, types.Types[TINT], v) 4229 case t.IsString(): 4230 elemtype = types.Types[TUINT8] 4231 ptrtype = types.NewPtr(elemtype) 4232 ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v) 4233 len = s.newValue1(ssa.OpStringLen, types.Types[TINT], v) 4234 cap = len 4235 case t.IsPtr(): 4236 if !t.Elem().IsArray() { 4237 s.Fatalf("bad ptr to array in slice %v\n", t) 4238 } 4239 elemtype = t.Elem().Elem() 4240 ptrtype = types.NewPtr(elemtype) 4241 s.nilCheck(v) 4242 ptr = v 4243 len = s.constInt(types.Types[TINT], t.Elem().NumElem()) 4244 cap = len 4245 default: 4246 s.Fatalf("bad type in slice %v\n", t) 4247 } 4248 4249 // Set default values 4250 if i == nil { 4251 i = zero 4252 } 4253 if j == nil { 4254 j = len 4255 } 4256 if k == nil { 4257 k = cap 4258 } 4259 4260 if !bounded { 4261 // Panic if slice indices are not in bounds. 4262 s.sliceBoundsCheck(i, j) 4263 if j != k { 4264 s.sliceBoundsCheck(j, k) 4265 } 4266 if k != cap { 4267 s.sliceBoundsCheck(k, cap) 4268 } 4269 } 4270 4271 // Generate the following code assuming that indexes are in bounds. 4272 // The masking is to make sure that we don't generate a slice 4273 // that points to the next object in memory. 4274 // rlen = j - i 4275 // rcap = k - i 4276 // delta = i * elemsize 4277 // rptr = p + delta&mask(rcap) 4278 // result = (SliceMake rptr rlen rcap) 4279 // where mask(x) is 0 if x==0 and -1 if x>0. 4280 subOp := s.ssaOp(OSUB, types.Types[TINT]) 4281 mulOp := s.ssaOp(OMUL, types.Types[TINT]) 4282 andOp := s.ssaOp(OAND, types.Types[TINT]) 4283 rlen := s.newValue2(subOp, types.Types[TINT], j, i) 4284 var rcap *ssa.Value 4285 switch { 4286 case t.IsString(): 4287 // Capacity of the result is unimportant. However, we use 4288 // rcap to test if we've generated a zero-length slice. 4289 // Use length of strings for that. 4290 rcap = rlen 4291 case j == k: 4292 rcap = rlen 4293 default: 4294 rcap = s.newValue2(subOp, types.Types[TINT], k, i) 4295 } 4296 4297 var rptr *ssa.Value 4298 if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 { 4299 // No pointer arithmetic necessary. 4300 rptr = ptr 4301 } else { 4302 // delta = # of bytes to offset pointer by. 4303 delta := s.newValue2(mulOp, types.Types[TINT], i, s.constInt(types.Types[TINT], elemtype.Width)) 4304 // If we're slicing to the point where the capacity is zero, 4305 // zero out the delta. 4306 mask := s.newValue1(ssa.OpSlicemask, types.Types[TINT], rcap) 4307 delta = s.newValue2(andOp, types.Types[TINT], delta, mask) 4308 // Compute rptr = ptr + delta 4309 rptr = s.newValue2(ssa.OpAddPtr, ptrtype, ptr, delta) 4310 } 4311 4312 return rptr, rlen, rcap 4313 } 4314 4315 type u642fcvtTab struct { 4316 geq, cvt2F, and, rsh, or, add ssa.Op 4317 one func(*state, *types.Type, int64) *ssa.Value 4318 } 4319 4320 var u64_f64 = u642fcvtTab{ 4321 geq: ssa.OpGeq64, 4322 cvt2F: ssa.OpCvt64to64F, 4323 and: ssa.OpAnd64, 4324 rsh: ssa.OpRsh64Ux64, 4325 or: ssa.OpOr64, 4326 add: ssa.OpAdd64F, 4327 one: (*state).constInt64, 4328 } 4329 4330 var u64_f32 = u642fcvtTab{ 4331 geq: ssa.OpGeq64, 4332 cvt2F: ssa.OpCvt64to32F, 4333 and: ssa.OpAnd64, 4334 rsh: ssa.OpRsh64Ux64, 4335 or: ssa.OpOr64, 4336 add: ssa.OpAdd32F, 4337 one: (*state).constInt64, 4338 } 4339 4340 func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4341 return s.uint64Tofloat(&u64_f64, n, x, ft, tt) 4342 } 4343 4344 func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4345 return s.uint64Tofloat(&u64_f32, n, x, ft, tt) 4346 } 4347 4348 func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4349 // if x >= 0 { 4350 // result = (floatY) x 4351 // } else { 4352 // y = uintX(x) ; y = x & 1 4353 // z = uintX(x) ; z = z >> 1 4354 // z = z >> 1 4355 // z = z | y 4356 // result = floatY(z) 4357 // result = result + result 4358 // } 4359 // 4360 // Code borrowed from old code generator. 4361 // What's going on: large 64-bit "unsigned" looks like 4362 // negative number to hardware's integer-to-float 4363 // conversion. However, because the mantissa is only 4364 // 63 bits, we don't need the LSB, so instead we do an 4365 // unsigned right shift (divide by two), convert, and 4366 // double. However, before we do that, we need to be 4367 // sure that we do not lose a "1" if that made the 4368 // difference in the resulting rounding. Therefore, we 4369 // preserve it, and OR (not ADD) it back in. The case 4370 // that matters is when the eleven discarded bits are 4371 // equal to 10000000001; that rounds up, and the 1 cannot 4372 // be lost else it would round down if the LSB of the 4373 // candidate mantissa is 0. 4374 cmp := s.newValue2(cvttab.geq, types.Types[TBOOL], x, s.zeroVal(ft)) 4375 b := s.endBlock() 4376 b.Kind = ssa.BlockIf 4377 b.SetControl(cmp) 4378 b.Likely = ssa.BranchLikely 4379 4380 bThen := s.f.NewBlock(ssa.BlockPlain) 4381 bElse := s.f.NewBlock(ssa.BlockPlain) 4382 bAfter := s.f.NewBlock(ssa.BlockPlain) 4383 4384 b.AddEdgeTo(bThen) 4385 s.startBlock(bThen) 4386 a0 := s.newValue1(cvttab.cvt2F, tt, x) 4387 s.vars[n] = a0 4388 s.endBlock() 4389 bThen.AddEdgeTo(bAfter) 4390 4391 b.AddEdgeTo(bElse) 4392 s.startBlock(bElse) 4393 one := cvttab.one(s, ft, 1) 4394 y := s.newValue2(cvttab.and, ft, x, one) 4395 z := s.newValue2(cvttab.rsh, ft, x, one) 4396 z = s.newValue2(cvttab.or, ft, z, y) 4397 a := s.newValue1(cvttab.cvt2F, tt, z) 4398 a1 := s.newValue2(cvttab.add, tt, a, a) 4399 s.vars[n] = a1 4400 s.endBlock() 4401 bElse.AddEdgeTo(bAfter) 4402 4403 s.startBlock(bAfter) 4404 return s.variable(n, n.Type) 4405 } 4406 4407 type u322fcvtTab struct { 4408 cvtI2F, cvtF2F ssa.Op 4409 } 4410 4411 var u32_f64 = u322fcvtTab{ 4412 cvtI2F: ssa.OpCvt32to64F, 4413 cvtF2F: ssa.OpCopy, 4414 } 4415 4416 var u32_f32 = u322fcvtTab{ 4417 cvtI2F: ssa.OpCvt32to32F, 4418 cvtF2F: ssa.OpCvt64Fto32F, 4419 } 4420 4421 func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4422 return s.uint32Tofloat(&u32_f64, n, x, ft, tt) 4423 } 4424 4425 func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4426 return s.uint32Tofloat(&u32_f32, n, x, ft, tt) 4427 } 4428 4429 func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4430 // if x >= 0 { 4431 // result = floatY(x) 4432 // } else { 4433 // result = floatY(float64(x) + (1<<32)) 4434 // } 4435 cmp := s.newValue2(ssa.OpGeq32, types.Types[TBOOL], x, s.zeroVal(ft)) 4436 b := s.endBlock() 4437 b.Kind = ssa.BlockIf 4438 b.SetControl(cmp) 4439 b.Likely = ssa.BranchLikely 4440 4441 bThen := s.f.NewBlock(ssa.BlockPlain) 4442 bElse := s.f.NewBlock(ssa.BlockPlain) 4443 bAfter := s.f.NewBlock(ssa.BlockPlain) 4444 4445 b.AddEdgeTo(bThen) 4446 s.startBlock(bThen) 4447 a0 := s.newValue1(cvttab.cvtI2F, tt, x) 4448 s.vars[n] = a0 4449 s.endBlock() 4450 bThen.AddEdgeTo(bAfter) 4451 4452 b.AddEdgeTo(bElse) 4453 s.startBlock(bElse) 4454 a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[TFLOAT64], x) 4455 twoToThe32 := s.constFloat64(types.Types[TFLOAT64], float64(1<<32)) 4456 a2 := s.newValue2(ssa.OpAdd64F, types.Types[TFLOAT64], a1, twoToThe32) 4457 a3 := s.newValue1(cvttab.cvtF2F, tt, a2) 4458 4459 s.vars[n] = a3 4460 s.endBlock() 4461 bElse.AddEdgeTo(bAfter) 4462 4463 s.startBlock(bAfter) 4464 return s.variable(n, n.Type) 4465 } 4466 4467 // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels. 4468 func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value { 4469 if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() { 4470 s.Fatalf("node must be a map or a channel") 4471 } 4472 // if n == nil { 4473 // return 0 4474 // } else { 4475 // // len 4476 // return *((*int)n) 4477 // // cap 4478 // return *(((*int)n)+1) 4479 // } 4480 lenType := n.Type 4481 nilValue := s.constNil(types.Types[TUINTPTR]) 4482 cmp := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], x, nilValue) 4483 b := s.endBlock() 4484 b.Kind = ssa.BlockIf 4485 b.SetControl(cmp) 4486 b.Likely = ssa.BranchUnlikely 4487 4488 bThen := s.f.NewBlock(ssa.BlockPlain) 4489 bElse := s.f.NewBlock(ssa.BlockPlain) 4490 bAfter := s.f.NewBlock(ssa.BlockPlain) 4491 4492 // length/capacity of a nil map/chan is zero 4493 b.AddEdgeTo(bThen) 4494 s.startBlock(bThen) 4495 s.vars[n] = s.zeroVal(lenType) 4496 s.endBlock() 4497 bThen.AddEdgeTo(bAfter) 4498 4499 b.AddEdgeTo(bElse) 4500 s.startBlock(bElse) 4501 switch n.Op { 4502 case OLEN: 4503 // length is stored in the first word for map/chan 4504 s.vars[n] = s.load(lenType, x) 4505 case OCAP: 4506 // capacity is stored in the second word for chan 4507 sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x) 4508 s.vars[n] = s.load(lenType, sw) 4509 default: 4510 s.Fatalf("op must be OLEN or OCAP") 4511 } 4512 s.endBlock() 4513 bElse.AddEdgeTo(bAfter) 4514 4515 s.startBlock(bAfter) 4516 return s.variable(n, lenType) 4517 } 4518 4519 type f2uCvtTab struct { 4520 ltf, cvt2U, subf, or ssa.Op 4521 floatValue func(*state, *types.Type, float64) *ssa.Value 4522 intValue func(*state, *types.Type, int64) *ssa.Value 4523 cutoff uint64 4524 } 4525 4526 var f32_u64 = f2uCvtTab{ 4527 ltf: ssa.OpLess32F, 4528 cvt2U: ssa.OpCvt32Fto64, 4529 subf: ssa.OpSub32F, 4530 or: ssa.OpOr64, 4531 floatValue: (*state).constFloat32, 4532 intValue: (*state).constInt64, 4533 cutoff: 9223372036854775808, 4534 } 4535 4536 var f64_u64 = f2uCvtTab{ 4537 ltf: ssa.OpLess64F, 4538 cvt2U: ssa.OpCvt64Fto64, 4539 subf: ssa.OpSub64F, 4540 or: ssa.OpOr64, 4541 floatValue: (*state).constFloat64, 4542 intValue: (*state).constInt64, 4543 cutoff: 9223372036854775808, 4544 } 4545 4546 var f32_u32 = f2uCvtTab{ 4547 ltf: ssa.OpLess32F, 4548 cvt2U: ssa.OpCvt32Fto32, 4549 subf: ssa.OpSub32F, 4550 or: ssa.OpOr32, 4551 floatValue: (*state).constFloat32, 4552 intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) }, 4553 cutoff: 2147483648, 4554 } 4555 4556 var f64_u32 = f2uCvtTab{ 4557 ltf: ssa.OpLess64F, 4558 cvt2U: ssa.OpCvt64Fto32, 4559 subf: ssa.OpSub64F, 4560 or: ssa.OpOr32, 4561 floatValue: (*state).constFloat64, 4562 intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) }, 4563 cutoff: 2147483648, 4564 } 4565 4566 func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4567 return s.floatToUint(&f32_u64, n, x, ft, tt) 4568 } 4569 func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4570 return s.floatToUint(&f64_u64, n, x, ft, tt) 4571 } 4572 4573 func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4574 return s.floatToUint(&f32_u32, n, x, ft, tt) 4575 } 4576 4577 func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4578 return s.floatToUint(&f64_u32, n, x, ft, tt) 4579 } 4580 4581 func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4582 // cutoff:=1<<(intY_Size-1) 4583 // if x < floatX(cutoff) { 4584 // result = uintY(x) 4585 // } else { 4586 // y = x - floatX(cutoff) 4587 // z = uintY(y) 4588 // result = z | -(cutoff) 4589 // } 4590 cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff)) 4591 cmp := s.newValue2(cvttab.ltf, types.Types[TBOOL], x, cutoff) 4592 b := s.endBlock() 4593 b.Kind = ssa.BlockIf 4594 b.SetControl(cmp) 4595 b.Likely = ssa.BranchLikely 4596 4597 bThen := s.f.NewBlock(ssa.BlockPlain) 4598 bElse := s.f.NewBlock(ssa.BlockPlain) 4599 bAfter := s.f.NewBlock(ssa.BlockPlain) 4600 4601 b.AddEdgeTo(bThen) 4602 s.startBlock(bThen) 4603 a0 := s.newValue1(cvttab.cvt2U, tt, x) 4604 s.vars[n] = a0 4605 s.endBlock() 4606 bThen.AddEdgeTo(bAfter) 4607 4608 b.AddEdgeTo(bElse) 4609 s.startBlock(bElse) 4610 y := s.newValue2(cvttab.subf, ft, x, cutoff) 4611 y = s.newValue1(cvttab.cvt2U, tt, y) 4612 z := cvttab.intValue(s, tt, int64(-cvttab.cutoff)) 4613 a1 := s.newValue2(cvttab.or, tt, y, z) 4614 s.vars[n] = a1 4615 s.endBlock() 4616 bElse.AddEdgeTo(bAfter) 4617 4618 s.startBlock(bAfter) 4619 return s.variable(n, n.Type) 4620 } 4621 4622 // dottype generates SSA for a type assertion node. 4623 // commaok indicates whether to panic or return a bool. 4624 // If commaok is false, resok will be nil. 4625 func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { 4626 iface := s.expr(n.Left) // input interface 4627 target := s.expr(n.Right) // target type 4628 byteptr := s.f.Config.Types.BytePtr 4629 4630 if n.Type.IsInterface() { 4631 if n.Type.IsEmptyInterface() { 4632 // Converting to an empty interface. 4633 // Input could be an empty or nonempty interface. 4634 if Debug_typeassert > 0 { 4635 Warnl(n.Pos, "type assertion inlined") 4636 } 4637 4638 // Get itab/type field from input. 4639 itab := s.newValue1(ssa.OpITab, byteptr, iface) 4640 // Conversion succeeds iff that field is not nil. 4641 cond := s.newValue2(ssa.OpNeqPtr, types.Types[TBOOL], itab, s.constNil(byteptr)) 4642 4643 if n.Left.Type.IsEmptyInterface() && commaok { 4644 // Converting empty interface to empty interface with ,ok is just a nil check. 4645 return iface, cond 4646 } 4647 4648 // Branch on nilness. 4649 b := s.endBlock() 4650 b.Kind = ssa.BlockIf 4651 b.SetControl(cond) 4652 b.Likely = ssa.BranchLikely 4653 bOk := s.f.NewBlock(ssa.BlockPlain) 4654 bFail := s.f.NewBlock(ssa.BlockPlain) 4655 b.AddEdgeTo(bOk) 4656 b.AddEdgeTo(bFail) 4657 4658 if !commaok { 4659 // On failure, panic by calling panicnildottype. 4660 s.startBlock(bFail) 4661 s.rtcall(panicnildottype, false, nil, target) 4662 4663 // On success, return (perhaps modified) input interface. 4664 s.startBlock(bOk) 4665 if n.Left.Type.IsEmptyInterface() { 4666 res = iface // Use input interface unchanged. 4667 return 4668 } 4669 // Load type out of itab, build interface with existing idata. 4670 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) 4671 typ := s.load(byteptr, off) 4672 idata := s.newValue1(ssa.OpIData, n.Type, iface) 4673 res = s.newValue2(ssa.OpIMake, n.Type, typ, idata) 4674 return 4675 } 4676 4677 s.startBlock(bOk) 4678 // nonempty -> empty 4679 // Need to load type from itab 4680 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) 4681 s.vars[&typVar] = s.load(byteptr, off) 4682 s.endBlock() 4683 4684 // itab is nil, might as well use that as the nil result. 4685 s.startBlock(bFail) 4686 s.vars[&typVar] = itab 4687 s.endBlock() 4688 4689 // Merge point. 4690 bEnd := s.f.NewBlock(ssa.BlockPlain) 4691 bOk.AddEdgeTo(bEnd) 4692 bFail.AddEdgeTo(bEnd) 4693 s.startBlock(bEnd) 4694 idata := s.newValue1(ssa.OpIData, n.Type, iface) 4695 res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata) 4696 resok = cond 4697 delete(s.vars, &typVar) 4698 return 4699 } 4700 // converting to a nonempty interface needs a runtime call. 4701 if Debug_typeassert > 0 { 4702 Warnl(n.Pos, "type assertion not inlined") 4703 } 4704 if n.Left.Type.IsEmptyInterface() { 4705 if commaok { 4706 call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface) 4707 return call[0], call[1] 4708 } 4709 return s.rtcall(assertE2I, true, []*types.Type{n.Type}, target, iface)[0], nil 4710 } 4711 if commaok { 4712 call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface) 4713 return call[0], call[1] 4714 } 4715 return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil 4716 } 4717 4718 if Debug_typeassert > 0 { 4719 Warnl(n.Pos, "type assertion inlined") 4720 } 4721 4722 // Converting to a concrete type. 4723 direct := isdirectiface(n.Type) 4724 itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface 4725 if Debug_typeassert > 0 { 4726 Warnl(n.Pos, "type assertion inlined") 4727 } 4728 var targetITab *ssa.Value 4729 if n.Left.Type.IsEmptyInterface() { 4730 // Looking for pointer to target type. 4731 targetITab = target 4732 } else { 4733 // Looking for pointer to itab for target type and source interface. 4734 targetITab = s.expr(n.List.First()) 4735 } 4736 4737 var tmp *Node // temporary for use with large types 4738 var addr *ssa.Value // address of tmp 4739 if commaok && !canSSAType(n.Type) { 4740 // unSSAable type, use temporary. 4741 // TODO: get rid of some of these temporaries. 4742 tmp = tempAt(n.Pos, s.curfn, n.Type) 4743 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem()) 4744 addr = s.addr(tmp, false) 4745 } 4746 4747 cond := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], itab, targetITab) 4748 b := s.endBlock() 4749 b.Kind = ssa.BlockIf 4750 b.SetControl(cond) 4751 b.Likely = ssa.BranchLikely 4752 4753 bOk := s.f.NewBlock(ssa.BlockPlain) 4754 bFail := s.f.NewBlock(ssa.BlockPlain) 4755 b.AddEdgeTo(bOk) 4756 b.AddEdgeTo(bFail) 4757 4758 if !commaok { 4759 // on failure, panic by calling panicdottype 4760 s.startBlock(bFail) 4761 taddr := s.expr(n.Right.Right) 4762 if n.Left.Type.IsEmptyInterface() { 4763 s.rtcall(panicdottypeE, false, nil, itab, target, taddr) 4764 } else { 4765 s.rtcall(panicdottypeI, false, nil, itab, target, taddr) 4766 } 4767 4768 // on success, return data from interface 4769 s.startBlock(bOk) 4770 if direct { 4771 return s.newValue1(ssa.OpIData, n.Type, iface), nil 4772 } 4773 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) 4774 return s.load(n.Type, p), nil 4775 } 4776 4777 // commaok is the more complicated case because we have 4778 // a control flow merge point. 4779 bEnd := s.f.NewBlock(ssa.BlockPlain) 4780 // Note that we need a new valVar each time (unlike okVar where we can 4781 // reuse the variable) because it might have a different type every time. 4782 valVar := &Node{Op: ONAME, Sym: &types.Sym{Name: "val"}} 4783 4784 // type assertion succeeded 4785 s.startBlock(bOk) 4786 if tmp == nil { 4787 if direct { 4788 s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface) 4789 } else { 4790 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) 4791 s.vars[valVar] = s.load(n.Type, p) 4792 } 4793 } else { 4794 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) 4795 s.move(n.Type, addr, p) 4796 } 4797 s.vars[&okVar] = s.constBool(true) 4798 s.endBlock() 4799 bOk.AddEdgeTo(bEnd) 4800 4801 // type assertion failed 4802 s.startBlock(bFail) 4803 if tmp == nil { 4804 s.vars[valVar] = s.zeroVal(n.Type) 4805 } else { 4806 s.zero(n.Type, addr) 4807 } 4808 s.vars[&okVar] = s.constBool(false) 4809 s.endBlock() 4810 bFail.AddEdgeTo(bEnd) 4811 4812 // merge point 4813 s.startBlock(bEnd) 4814 if tmp == nil { 4815 res = s.variable(valVar, n.Type) 4816 delete(s.vars, valVar) 4817 } else { 4818 res = s.load(n.Type, addr) 4819 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem()) 4820 } 4821 resok = s.variable(&okVar, types.Types[TBOOL]) 4822 delete(s.vars, &okVar) 4823 return res, resok 4824 } 4825 4826 // variable returns the value of a variable at the current location. 4827 func (s *state) variable(name *Node, t *types.Type) *ssa.Value { 4828 v := s.vars[name] 4829 if v != nil { 4830 return v 4831 } 4832 v = s.fwdVars[name] 4833 if v != nil { 4834 return v 4835 } 4836 4837 if s.curBlock == s.f.Entry { 4838 // No variable should be live at entry. 4839 s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, name, v) 4840 } 4841 // Make a FwdRef, which records a value that's live on block input. 4842 // We'll find the matching definition as part of insertPhis. 4843 v = s.newValue0A(ssa.OpFwdRef, t, name) 4844 s.fwdVars[name] = v 4845 s.addNamedValue(name, v) 4846 return v 4847 } 4848 4849 func (s *state) mem() *ssa.Value { 4850 return s.variable(&memVar, types.TypeMem) 4851 } 4852 4853 func (s *state) addNamedValue(n *Node, v *ssa.Value) { 4854 if n.Class() == Pxxx { 4855 // Don't track our dummy nodes (&memVar etc.). 4856 return 4857 } 4858 if n.IsAutoTmp() { 4859 // Don't track temporary variables. 4860 return 4861 } 4862 if n.Class() == PPARAMOUT { 4863 // Don't track named output values. This prevents return values 4864 // from being assigned too early. See #14591 and #14762. TODO: allow this. 4865 return 4866 } 4867 if n.Class() == PAUTO && n.Xoffset != 0 { 4868 s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset) 4869 } 4870 loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0} 4871 values, ok := s.f.NamedValues[loc] 4872 if !ok { 4873 s.f.Names = append(s.f.Names, loc) 4874 } 4875 s.f.NamedValues[loc] = append(values, v) 4876 } 4877 4878 // Branch is an unresolved branch. 4879 type Branch struct { 4880 P *obj.Prog // branch instruction 4881 B *ssa.Block // target 4882 } 4883 4884 // SSAGenState contains state needed during Prog generation. 4885 type SSAGenState struct { 4886 pp *Progs 4887 4888 // Branches remembers all the branch instructions we've seen 4889 // and where they would like to go. 4890 Branches []Branch 4891 4892 // bstart remembers where each block starts (indexed by block ID) 4893 bstart []*obj.Prog 4894 4895 // 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?) 4896 SSEto387 map[int16]int16 4897 // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8. 4898 ScratchFpMem *Node 4899 4900 maxarg int64 // largest frame size for arguments to calls made by the function 4901 4902 // Map from GC safe points to liveness index, generated by 4903 // liveness analysis. 4904 livenessMap LivenessMap 4905 4906 // lineRunStart records the beginning of the current run of instructions 4907 // within a single block sharing the same line number 4908 // Used to move statement marks to the beginning of such runs. 4909 lineRunStart *obj.Prog 4910 4911 // wasm: The number of values on the WebAssembly stack. This is only used as a safeguard. 4912 OnWasmStackSkipped int 4913 } 4914 4915 // Prog appends a new Prog. 4916 func (s *SSAGenState) Prog(as obj.As) *obj.Prog { 4917 p := s.pp.Prog(as) 4918 if ssa.LosesStmtMark(as) { 4919 return p 4920 } 4921 // Float a statement start to the beginning of any same-line run. 4922 // lineRunStart is reset at block boundaries, which appears to work well. 4923 if s.lineRunStart == nil || s.lineRunStart.Pos.Line() != p.Pos.Line() { 4924 s.lineRunStart = p 4925 } else if p.Pos.IsStmt() == src.PosIsStmt { 4926 s.lineRunStart.Pos = s.lineRunStart.Pos.WithIsStmt() 4927 p.Pos = p.Pos.WithNotStmt() 4928 } 4929 return p 4930 } 4931 4932 // Pc returns the current Prog. 4933 func (s *SSAGenState) Pc() *obj.Prog { 4934 return s.pp.next 4935 } 4936 4937 // SetPos sets the current source position. 4938 func (s *SSAGenState) SetPos(pos src.XPos) { 4939 s.pp.pos = pos 4940 } 4941 4942 // Br emits a single branch instruction and returns the instruction. 4943 // Not all architectures need the returned instruction, but otherwise 4944 // the boilerplate is common to all. 4945 func (s *SSAGenState) Br(op obj.As, target *ssa.Block) *obj.Prog { 4946 p := s.Prog(op) 4947 p.To.Type = obj.TYPE_BRANCH 4948 s.Branches = append(s.Branches, Branch{P: p, B: target}) 4949 return p 4950 } 4951 4952 // DebugFriendlySetPos adjusts Pos.IsStmt subject to heuristics 4953 // that reduce "jumpy" line number churn when debugging. 4954 // Spill/fill/copy instructions from the register allocator, 4955 // phi functions, and instructions with a no-pos position 4956 // are examples of instructions that can cause churn. 4957 func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) { 4958 switch v.Op { 4959 case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg: 4960 // These are not statements 4961 s.SetPos(v.Pos.WithNotStmt()) 4962 default: 4963 p := v.Pos 4964 if p != src.NoXPos { 4965 // If the position is defined, update the position. 4966 // Also convert default IsStmt to NotStmt; only 4967 // explicit statement boundaries should appear 4968 // in the generated code. 4969 if p.IsStmt() != src.PosIsStmt { 4970 p = p.WithNotStmt() 4971 } 4972 s.SetPos(p) 4973 } 4974 } 4975 } 4976 4977 // byXoffset implements sort.Interface for []*Node using Xoffset as the ordering. 4978 type byXoffset []*Node 4979 4980 func (s byXoffset) Len() int { return len(s) } 4981 func (s byXoffset) Less(i, j int) bool { return s[i].Xoffset < s[j].Xoffset } 4982 func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] } 4983 4984 func emitStackObjects(e *ssafn, pp *Progs) { 4985 var vars []*Node 4986 for _, n := range e.curfn.Func.Dcl { 4987 if livenessShouldTrack(n) && n.Addrtaken() { 4988 vars = append(vars, n) 4989 } 4990 } 4991 if len(vars) == 0 { 4992 return 4993 } 4994 4995 // Sort variables from lowest to highest address. 4996 sort.Sort(byXoffset(vars)) 4997 4998 // Populate the stack object data. 4999 // Format must match runtime/stack.go:stackObjectRecord. 5000 x := e.curfn.Func.lsym.Func.StackObjects 5001 off := 0 5002 off = duintptr(x, off, uint64(len(vars))) 5003 for _, v := range vars { 5004 // Note: arguments and return values have non-negative Xoffset, 5005 // in which case the offset is relative to argp. 5006 // Locals have a negative Xoffset, in which case the offset is relative to varp. 5007 off = duintptr(x, off, uint64(v.Xoffset)) 5008 if !typesym(v.Type).Siggen() { 5009 Fatalf("stack object's type symbol not generated for type %s", v.Type) 5010 } 5011 off = dsymptr(x, off, dtypesym(v.Type), 0) 5012 } 5013 5014 // Emit a funcdata pointing at the stack object data. 5015 p := pp.Prog(obj.AFUNCDATA) 5016 Addrconst(&p.From, objabi.FUNCDATA_StackObjects) 5017 p.To.Type = obj.TYPE_MEM 5018 p.To.Name = obj.NAME_EXTERN 5019 p.To.Sym = x 5020 5021 if debuglive != 0 { 5022 for _, v := range vars { 5023 Warnl(v.Pos, "stack object %v %s", v, v.Type.String()) 5024 } 5025 } 5026 } 5027 5028 // genssa appends entries to pp for each instruction in f. 5029 func genssa(f *ssa.Func, pp *Progs) { 5030 var s SSAGenState 5031 5032 e := f.Frontend().(*ssafn) 5033 5034 s.livenessMap = liveness(e, f) 5035 emitStackObjects(e, pp) 5036 5037 // Remember where each block starts. 5038 s.bstart = make([]*obj.Prog, f.NumBlocks()) 5039 s.pp = pp 5040 var progToValue map[*obj.Prog]*ssa.Value 5041 var progToBlock map[*obj.Prog]*ssa.Block 5042 var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point. 5043 if f.PrintOrHtmlSSA { 5044 progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues()) 5045 progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks()) 5046 f.Logf("genssa %s\n", f.Name) 5047 progToBlock[s.pp.next] = f.Blocks[0] 5048 } 5049 5050 if thearch.Use387 { 5051 s.SSEto387 = map[int16]int16{} 5052 } 5053 5054 s.ScratchFpMem = e.scratchFpMem 5055 5056 if Ctxt.Flag_locationlists { 5057 if cap(f.Cache.ValueToProgAfter) < f.NumValues() { 5058 f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues()) 5059 } 5060 valueToProgAfter = f.Cache.ValueToProgAfter[:f.NumValues()] 5061 for i := range valueToProgAfter { 5062 valueToProgAfter[i] = nil 5063 } 5064 } 5065 5066 // If the very first instruction is not tagged as a statement, 5067 // debuggers may attribute it to previous function in program. 5068 firstPos := src.NoXPos 5069 for _, v := range f.Entry.Values { 5070 if v.Pos.IsStmt() == src.PosIsStmt { 5071 firstPos = v.Pos 5072 v.Pos = firstPos.WithDefaultStmt() 5073 break 5074 } 5075 } 5076 5077 // Emit basic blocks 5078 for i, b := range f.Blocks { 5079 s.bstart[b.ID] = s.pp.next 5080 s.pp.nextLive = LivenessInvalid 5081 s.lineRunStart = nil 5082 5083 // Emit values in block 5084 thearch.SSAMarkMoves(&s, b) 5085 for _, v := range b.Values { 5086 x := s.pp.next 5087 s.DebugFriendlySetPosFrom(v) 5088 // Attach this safe point to the next 5089 // instruction. 5090 s.pp.nextLive = s.livenessMap.Get(v) 5091 switch v.Op { 5092 case ssa.OpInitMem: 5093 // memory arg needs no code 5094 case ssa.OpArg: 5095 // input args need no code 5096 case ssa.OpSP, ssa.OpSB: 5097 // nothing to do 5098 case ssa.OpSelect0, ssa.OpSelect1: 5099 // nothing to do 5100 case ssa.OpGetG: 5101 // nothing to do when there's a g register, 5102 // and checkLower complains if there's not 5103 case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive, ssa.OpVarKill: 5104 // nothing to do; already used by liveness 5105 case ssa.OpPhi: 5106 CheckLoweredPhi(v) 5107 case ssa.OpConvert: 5108 // nothing to do; no-op conversion for liveness 5109 if v.Args[0].Reg() != v.Reg() { 5110 v.Fatalf("OpConvert should be a no-op: %s; %s", v.Args[0].LongString(), v.LongString()) 5111 } 5112 default: 5113 // let the backend handle it 5114 // Special case for first line in function; move it to the start. 5115 if firstPos != src.NoXPos { 5116 s.SetPos(firstPos) 5117 firstPos = src.NoXPos 5118 } 5119 thearch.SSAGenValue(&s, v) 5120 } 5121 5122 if Ctxt.Flag_locationlists { 5123 valueToProgAfter[v.ID] = s.pp.next 5124 } 5125 5126 if f.PrintOrHtmlSSA { 5127 for ; x != s.pp.next; x = x.Link { 5128 progToValue[x] = v 5129 } 5130 } 5131 } 5132 // Emit control flow instructions for block 5133 var next *ssa.Block 5134 if i < len(f.Blocks)-1 && Debug['N'] == 0 { 5135 // If -N, leave next==nil so every block with successors 5136 // ends in a JMP (except call blocks - plive doesn't like 5137 // select{send,recv} followed by a JMP call). Helps keep 5138 // line numbers for otherwise empty blocks. 5139 next = f.Blocks[i+1] 5140 } 5141 x := s.pp.next 5142 s.SetPos(b.Pos) 5143 thearch.SSAGenBlock(&s, b, next) 5144 if f.PrintOrHtmlSSA { 5145 for ; x != s.pp.next; x = x.Link { 5146 progToBlock[x] = b 5147 } 5148 } 5149 } 5150 5151 if Ctxt.Flag_locationlists { 5152 e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(Ctxt, f, Debug_locationlist > 1, stackOffset) 5153 bstart := s.bstart 5154 // Note that at this moment, Prog.Pc is a sequence number; it's 5155 // not a real PC until after assembly, so this mapping has to 5156 // be done later. 5157 e.curfn.Func.DebugInfo.GetPC = func(b, v ssa.ID) int64 { 5158 switch v { 5159 case ssa.BlockStart.ID: 5160 return bstart[b].Pc 5161 case ssa.BlockEnd.ID: 5162 return e.curfn.Func.lsym.Size 5163 default: 5164 return valueToProgAfter[v].Pc 5165 } 5166 } 5167 } 5168 5169 // Resolove branchers, and relax DefaultStmt into NotStmt 5170 for _, br := range s.Branches { 5171 br.P.To.Val = s.bstart[br.B.ID] 5172 if br.P.Pos.IsStmt() != src.PosIsStmt { 5173 br.P.Pos = br.P.Pos.WithNotStmt() 5174 } 5175 } 5176 5177 if e.log { // spew to stdout 5178 filename := "" 5179 for p := pp.Text; p != nil; p = p.Link { 5180 if p.Pos.IsKnown() && p.InnermostFilename() != filename { 5181 filename = p.InnermostFilename() 5182 f.Logf("# %s\n", filename) 5183 } 5184 5185 var s string 5186 if v, ok := progToValue[p]; ok { 5187 s = v.String() 5188 } else if b, ok := progToBlock[p]; ok { 5189 s = b.String() 5190 } else { 5191 s = " " // most value and branch strings are 2-3 characters long 5192 } 5193 f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString()) 5194 } 5195 } 5196 if f.HTMLWriter != nil { // spew to ssa.html 5197 var buf bytes.Buffer 5198 buf.WriteString("<code>") 5199 buf.WriteString("<dl class=\"ssa-gen\">") 5200 filename := "" 5201 for p := pp.Text; p != nil; p = p.Link { 5202 // Don't spam every line with the file name, which is often huge. 5203 // Only print changes, and "unknown" is not a change. 5204 if p.Pos.IsKnown() && p.InnermostFilename() != filename { 5205 filename = p.InnermostFilename() 5206 buf.WriteString("<dt class=\"ssa-prog-src\"></dt><dd class=\"ssa-prog\">") 5207 buf.WriteString(html.EscapeString("# " + filename)) 5208 buf.WriteString("</dd>") 5209 } 5210 5211 buf.WriteString("<dt class=\"ssa-prog-src\">") 5212 if v, ok := progToValue[p]; ok { 5213 buf.WriteString(v.HTML()) 5214 } else if b, ok := progToBlock[p]; ok { 5215 buf.WriteString("<b>" + b.HTML() + "</b>") 5216 } 5217 buf.WriteString("</dt>") 5218 buf.WriteString("<dd class=\"ssa-prog\">") 5219 buf.WriteString(fmt.Sprintf("%.5d <span class=\"l%v line-number\">(%s)</span> %s", p.Pc, p.InnermostLineNumber(), p.InnermostLineNumberHTML(), html.EscapeString(p.InstructionString()))) 5220 buf.WriteString("</dd>") 5221 } 5222 buf.WriteString("</dl>") 5223 buf.WriteString("</code>") 5224 f.HTMLWriter.WriteColumn("genssa", "genssa", "ssa-prog", buf.String()) 5225 } 5226 5227 defframe(&s, e) 5228 if Debug['f'] != 0 { 5229 frame(0) 5230 } 5231 5232 f.HTMLWriter.Close() 5233 f.HTMLWriter = nil 5234 } 5235 5236 func defframe(s *SSAGenState, e *ssafn) { 5237 pp := s.pp 5238 5239 frame := Rnd(s.maxarg+e.stksize, int64(Widthreg)) 5240 if thearch.PadFrame != nil { 5241 frame = thearch.PadFrame(frame) 5242 } 5243 5244 // Fill in argument and frame size. 5245 pp.Text.To.Type = obj.TYPE_TEXTSIZE 5246 pp.Text.To.Val = int32(Rnd(e.curfn.Type.ArgWidth(), int64(Widthreg))) 5247 pp.Text.To.Offset = frame 5248 5249 // Insert code to zero ambiguously live variables so that the 5250 // garbage collector only sees initialized values when it 5251 // looks for pointers. 5252 p := pp.Text 5253 var lo, hi int64 5254 5255 // Opaque state for backend to use. Current backends use it to 5256 // keep track of which helper registers have been zeroed. 5257 var state uint32 5258 5259 // Iterate through declarations. They are sorted in decreasing Xoffset order. 5260 for _, n := range e.curfn.Func.Dcl { 5261 if !n.Name.Needzero() { 5262 continue 5263 } 5264 if n.Class() != PAUTO { 5265 Fatalf("needzero class %d", n.Class()) 5266 } 5267 if n.Type.Size()%int64(Widthptr) != 0 || n.Xoffset%int64(Widthptr) != 0 || n.Type.Size() == 0 { 5268 Fatalf("var %L has size %d offset %d", n, n.Type.Size(), n.Xoffset) 5269 } 5270 5271 if lo != hi && n.Xoffset+n.Type.Size() >= lo-int64(2*Widthreg) { 5272 // Merge with range we already have. 5273 lo = n.Xoffset 5274 continue 5275 } 5276 5277 // Zero old range 5278 p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state) 5279 5280 // Set new range. 5281 lo = n.Xoffset 5282 hi = lo + n.Type.Size() 5283 } 5284 5285 // Zero final range. 5286 thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state) 5287 } 5288 5289 type FloatingEQNEJump struct { 5290 Jump obj.As 5291 Index int 5292 } 5293 5294 func (s *SSAGenState) oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump) { 5295 p := s.Prog(jumps.Jump) 5296 p.To.Type = obj.TYPE_BRANCH 5297 p.Pos = b.Pos 5298 to := jumps.Index 5299 s.Branches = append(s.Branches, Branch{p, b.Succs[to].Block()}) 5300 } 5301 5302 func (s *SSAGenState) FPJump(b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) { 5303 switch next { 5304 case b.Succs[0].Block(): 5305 s.oneFPJump(b, &jumps[0][0]) 5306 s.oneFPJump(b, &jumps[0][1]) 5307 case b.Succs[1].Block(): 5308 s.oneFPJump(b, &jumps[1][0]) 5309 s.oneFPJump(b, &jumps[1][1]) 5310 default: 5311 s.oneFPJump(b, &jumps[1][0]) 5312 s.oneFPJump(b, &jumps[1][1]) 5313 q := s.Prog(obj.AJMP) 5314 q.Pos = b.Pos 5315 q.To.Type = obj.TYPE_BRANCH 5316 s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()}) 5317 } 5318 } 5319 5320 func AuxOffset(v *ssa.Value) (offset int64) { 5321 if v.Aux == nil { 5322 return 0 5323 } 5324 n, ok := v.Aux.(*Node) 5325 if !ok { 5326 v.Fatalf("bad aux type in %s\n", v.LongString()) 5327 } 5328 if n.Class() == PAUTO { 5329 return n.Xoffset 5330 } 5331 return 0 5332 } 5333 5334 // AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a. 5335 func AddAux(a *obj.Addr, v *ssa.Value) { 5336 AddAux2(a, v, v.AuxInt) 5337 } 5338 func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { 5339 if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR { 5340 v.Fatalf("bad AddAux addr %v", a) 5341 } 5342 // add integer offset 5343 a.Offset += offset 5344 5345 // If no additional symbol offset, we're done. 5346 if v.Aux == nil { 5347 return 5348 } 5349 // Add symbol's offset from its base register. 5350 switch n := v.Aux.(type) { 5351 case *obj.LSym: 5352 a.Name = obj.NAME_EXTERN 5353 a.Sym = n 5354 case *Node: 5355 if n.Class() == PPARAM || n.Class() == PPARAMOUT { 5356 a.Name = obj.NAME_PARAM 5357 a.Sym = n.Orig.Sym.Linksym() 5358 a.Offset += n.Xoffset 5359 break 5360 } 5361 a.Name = obj.NAME_AUTO 5362 a.Sym = n.Sym.Linksym() 5363 a.Offset += n.Xoffset 5364 default: 5365 v.Fatalf("aux in %s not implemented %#v", v, v.Aux) 5366 } 5367 } 5368 5369 // extendIndex extends v to a full int width. 5370 // panic using the given function if v does not fit in an int (only on 32-bit archs). 5371 func (s *state) extendIndex(v *ssa.Value, panicfn *obj.LSym) *ssa.Value { 5372 size := v.Type.Size() 5373 if size == s.config.PtrSize { 5374 return v 5375 } 5376 if size > s.config.PtrSize { 5377 // truncate 64-bit indexes on 32-bit pointer archs. Test the 5378 // high word and branch to out-of-bounds failure if it is not 0. 5379 if Debug['B'] == 0 { 5380 hi := s.newValue1(ssa.OpInt64Hi, types.Types[TUINT32], v) 5381 cmp := s.newValue2(ssa.OpEq32, types.Types[TBOOL], hi, s.constInt32(types.Types[TUINT32], 0)) 5382 s.check(cmp, panicfn) 5383 } 5384 return s.newValue1(ssa.OpTrunc64to32, types.Types[TINT], v) 5385 } 5386 5387 // Extend value to the required size 5388 var op ssa.Op 5389 if v.Type.IsSigned() { 5390 switch 10*size + s.config.PtrSize { 5391 case 14: 5392 op = ssa.OpSignExt8to32 5393 case 18: 5394 op = ssa.OpSignExt8to64 5395 case 24: 5396 op = ssa.OpSignExt16to32 5397 case 28: 5398 op = ssa.OpSignExt16to64 5399 case 48: 5400 op = ssa.OpSignExt32to64 5401 default: 5402 s.Fatalf("bad signed index extension %s", v.Type) 5403 } 5404 } else { 5405 switch 10*size + s.config.PtrSize { 5406 case 14: 5407 op = ssa.OpZeroExt8to32 5408 case 18: 5409 op = ssa.OpZeroExt8to64 5410 case 24: 5411 op = ssa.OpZeroExt16to32 5412 case 28: 5413 op = ssa.OpZeroExt16to64 5414 case 48: 5415 op = ssa.OpZeroExt32to64 5416 default: 5417 s.Fatalf("bad unsigned index extension %s", v.Type) 5418 } 5419 } 5420 return s.newValue1(op, types.Types[TINT], v) 5421 } 5422 5423 // CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values. 5424 // Called during ssaGenValue. 5425 func CheckLoweredPhi(v *ssa.Value) { 5426 if v.Op != ssa.OpPhi { 5427 v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString()) 5428 } 5429 if v.Type.IsMemory() { 5430 return 5431 } 5432 f := v.Block.Func 5433 loc := f.RegAlloc[v.ID] 5434 for _, a := range v.Args { 5435 if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead? 5436 v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func) 5437 } 5438 } 5439 } 5440 5441 // CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block. 5442 // The output of LoweredGetClosurePtr is generally hardwired to the correct register. 5443 // That register contains the closure pointer on closure entry. 5444 func CheckLoweredGetClosurePtr(v *ssa.Value) { 5445 entry := v.Block.Func.Entry 5446 if entry != v.Block || entry.Values[0] != v { 5447 Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v) 5448 } 5449 } 5450 5451 // AutoVar returns a *Node and int64 representing the auto variable and offset within it 5452 // where v should be spilled. 5453 func AutoVar(v *ssa.Value) (*Node, int64) { 5454 loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot) 5455 if v.Type.Size() > loc.Type.Size() { 5456 v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) 5457 } 5458 return loc.N.(*Node), loc.Off 5459 } 5460 5461 func AddrAuto(a *obj.Addr, v *ssa.Value) { 5462 n, off := AutoVar(v) 5463 a.Type = obj.TYPE_MEM 5464 a.Sym = n.Sym.Linksym() 5465 a.Reg = int16(thearch.REGSP) 5466 a.Offset = n.Xoffset + off 5467 if n.Class() == PPARAM || n.Class() == PPARAMOUT { 5468 a.Name = obj.NAME_PARAM 5469 } else { 5470 a.Name = obj.NAME_AUTO 5471 } 5472 } 5473 5474 func (s *SSAGenState) AddrScratch(a *obj.Addr) { 5475 if s.ScratchFpMem == nil { 5476 panic("no scratch memory available; forgot to declare usesScratch for Op?") 5477 } 5478 a.Type = obj.TYPE_MEM 5479 a.Name = obj.NAME_AUTO 5480 a.Sym = s.ScratchFpMem.Sym.Linksym() 5481 a.Reg = int16(thearch.REGSP) 5482 a.Offset = s.ScratchFpMem.Xoffset 5483 } 5484 5485 // Call returns a new CALL instruction for the SSA value v. 5486 // It uses PrepareCall to prepare the call. 5487 func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog { 5488 s.PrepareCall(v) 5489 5490 p := s.Prog(obj.ACALL) 5491 if sym, ok := v.Aux.(*obj.LSym); ok { 5492 p.To.Type = obj.TYPE_MEM 5493 p.To.Name = obj.NAME_EXTERN 5494 p.To.Sym = sym 5495 } else { 5496 // TODO(mdempsky): Can these differences be eliminated? 5497 switch thearch.LinkArch.Family { 5498 case sys.AMD64, sys.I386, sys.PPC64, sys.S390X, sys.Wasm: 5499 p.To.Type = obj.TYPE_REG 5500 case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64: 5501 p.To.Type = obj.TYPE_MEM 5502 default: 5503 Fatalf("unknown indirect call family") 5504 } 5505 p.To.Reg = v.Args[0].Reg() 5506 } 5507 return p 5508 } 5509 5510 // PrepareCall prepares to emit a CALL instruction for v and does call-related bookkeeping. 5511 // It must be called immediately before emitting the actual CALL instruction, 5512 // since it emits PCDATA for the stack map at the call (calls are safe points). 5513 func (s *SSAGenState) PrepareCall(v *ssa.Value) { 5514 idx := s.livenessMap.Get(v) 5515 if !idx.Valid() { 5516 // typedmemclr and typedmemmove are write barriers and 5517 // deeply non-preemptible. They are unsafe points and 5518 // hence should not have liveness maps. 5519 if sym, _ := v.Aux.(*obj.LSym); !(sym == typedmemclr || sym == typedmemmove) { 5520 Fatalf("missing stack map index for %v", v.LongString()) 5521 } 5522 } 5523 5524 if sym, _ := v.Aux.(*obj.LSym); sym == Deferreturn { 5525 // Deferred calls will appear to be returning to 5526 // the CALL deferreturn(SB) that we are about to emit. 5527 // However, the stack trace code will show the line 5528 // of the instruction byte before the return PC. 5529 // To avoid that being an unrelated instruction, 5530 // insert an actual hardware NOP that will have the right line number. 5531 // This is different from obj.ANOP, which is a virtual no-op 5532 // that doesn't make it into the instruction stream. 5533 thearch.Ginsnop(s.pp) 5534 } 5535 5536 if sym, ok := v.Aux.(*obj.LSym); ok { 5537 // Record call graph information for nowritebarrierrec 5538 // analysis. 5539 if nowritebarrierrecCheck != nil { 5540 nowritebarrierrecCheck.recordCall(s.pp.curfn, sym, v.Pos) 5541 } 5542 } 5543 5544 if s.maxarg < v.AuxInt { 5545 s.maxarg = v.AuxInt 5546 } 5547 } 5548 5549 // fieldIdx finds the index of the field referred to by the ODOT node n. 5550 func fieldIdx(n *Node) int { 5551 t := n.Left.Type 5552 f := n.Sym 5553 if !t.IsStruct() { 5554 panic("ODOT's LHS is not a struct") 5555 } 5556 5557 var i int 5558 for _, t1 := range t.Fields().Slice() { 5559 if t1.Sym != f { 5560 i++ 5561 continue 5562 } 5563 if t1.Offset != n.Xoffset { 5564 panic("field offset doesn't match") 5565 } 5566 return i 5567 } 5568 panic(fmt.Sprintf("can't find field in expr %v\n", n)) 5569 5570 // TODO: keep the result of this function somewhere in the ODOT Node 5571 // so we don't have to recompute it each time we need it. 5572 } 5573 5574 // ssafn holds frontend information about a function that the backend is processing. 5575 // It also exports a bunch of compiler services for the ssa backend. 5576 type ssafn struct { 5577 curfn *Node 5578 strings map[string]interface{} // map from constant string to data symbols 5579 scratchFpMem *Node // temp for floating point register / memory moves on some architectures 5580 stksize int64 // stack size for current frame 5581 stkptrsize int64 // prefix of stack containing pointers 5582 log bool // print ssa debug to the stdout 5583 } 5584 5585 // StringData returns a symbol (a *types.Sym wrapped in an interface) which 5586 // is the data component of a global string constant containing s. 5587 func (e *ssafn) StringData(s string) interface{} { 5588 if aux, ok := e.strings[s]; ok { 5589 return aux 5590 } 5591 if e.strings == nil { 5592 e.strings = make(map[string]interface{}) 5593 } 5594 data := stringsym(e.curfn.Pos, s) 5595 e.strings[s] = data 5596 return data 5597 } 5598 5599 func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode { 5600 n := tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list 5601 return n 5602 } 5603 5604 func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 5605 n := name.N.(*Node) 5606 ptrType := types.NewPtr(types.Types[TUINT8]) 5607 lenType := types.Types[TINT] 5608 if n.Class() == PAUTO && !n.Addrtaken() { 5609 // Split this string up into two separate variables. 5610 p := e.splitSlot(&name, ".ptr", 0, ptrType) 5611 l := e.splitSlot(&name, ".len", ptrType.Size(), lenType) 5612 return p, l 5613 } 5614 // Return the two parts of the larger variable. 5615 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)} 5616 } 5617 5618 func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 5619 n := name.N.(*Node) 5620 u := types.Types[TUINTPTR] 5621 t := types.NewPtr(types.Types[TUINT8]) 5622 if n.Class() == PAUTO && !n.Addrtaken() { 5623 // Split this interface up into two separate variables. 5624 f := ".itab" 5625 if n.Type.IsEmptyInterface() { 5626 f = ".type" 5627 } 5628 c := e.splitSlot(&name, f, 0, u) // see comment in plive.go:onebitwalktype1. 5629 d := e.splitSlot(&name, ".data", u.Size(), t) 5630 return c, d 5631 } 5632 // Return the two parts of the larger variable. 5633 return ssa.LocalSlot{N: n, Type: u, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)} 5634 } 5635 5636 func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) { 5637 n := name.N.(*Node) 5638 ptrType := types.NewPtr(name.Type.Elem()) 5639 lenType := types.Types[TINT] 5640 if n.Class() == PAUTO && !n.Addrtaken() { 5641 // Split this slice up into three separate variables. 5642 p := e.splitSlot(&name, ".ptr", 0, ptrType) 5643 l := e.splitSlot(&name, ".len", ptrType.Size(), lenType) 5644 c := e.splitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType) 5645 return p, l, c 5646 } 5647 // Return the three parts of the larger variable. 5648 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, 5649 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}, 5650 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)} 5651 } 5652 5653 func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 5654 n := name.N.(*Node) 5655 s := name.Type.Size() / 2 5656 var t *types.Type 5657 if s == 8 { 5658 t = types.Types[TFLOAT64] 5659 } else { 5660 t = types.Types[TFLOAT32] 5661 } 5662 if n.Class() == PAUTO && !n.Addrtaken() { 5663 // Split this complex up into two separate variables. 5664 r := e.splitSlot(&name, ".real", 0, t) 5665 i := e.splitSlot(&name, ".imag", t.Size(), t) 5666 return r, i 5667 } 5668 // Return the two parts of the larger variable. 5669 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s} 5670 } 5671 5672 func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 5673 n := name.N.(*Node) 5674 var t *types.Type 5675 if name.Type.IsSigned() { 5676 t = types.Types[TINT32] 5677 } else { 5678 t = types.Types[TUINT32] 5679 } 5680 if n.Class() == PAUTO && !n.Addrtaken() { 5681 // Split this int64 up into two separate variables. 5682 if thearch.LinkArch.ByteOrder == binary.BigEndian { 5683 return e.splitSlot(&name, ".hi", 0, t), e.splitSlot(&name, ".lo", t.Size(), types.Types[TUINT32]) 5684 } 5685 return e.splitSlot(&name, ".hi", t.Size(), t), e.splitSlot(&name, ".lo", 0, types.Types[TUINT32]) 5686 } 5687 // Return the two parts of the larger variable. 5688 if thearch.LinkArch.ByteOrder == binary.BigEndian { 5689 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off + 4} 5690 } 5691 return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off} 5692 } 5693 5694 func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot { 5695 n := name.N.(*Node) 5696 st := name.Type 5697 ft := st.FieldType(i) 5698 var offset int64 5699 for f := 0; f < i; f++ { 5700 offset += st.FieldType(f).Size() 5701 } 5702 if n.Class() == PAUTO && !n.Addrtaken() { 5703 // Note: the _ field may appear several times. But 5704 // have no fear, identically-named but distinct Autos are 5705 // ok, albeit maybe confusing for a debugger. 5706 return e.splitSlot(&name, "."+st.FieldName(i), offset, ft) 5707 } 5708 return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)} 5709 } 5710 5711 func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot { 5712 n := name.N.(*Node) 5713 at := name.Type 5714 if at.NumElem() != 1 { 5715 Fatalf("bad array size") 5716 } 5717 et := at.Elem() 5718 if n.Class() == PAUTO && !n.Addrtaken() { 5719 return e.splitSlot(&name, "[0]", 0, et) 5720 } 5721 return ssa.LocalSlot{N: n, Type: et, Off: name.Off} 5722 } 5723 5724 func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym { 5725 return itabsym(it, offset) 5726 } 5727 5728 // splitSlot returns a slot representing the data of parent starting at offset. 5729 func (e *ssafn) splitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot { 5730 s := &types.Sym{Name: parent.N.(*Node).Sym.Name + suffix, Pkg: localpkg} 5731 5732 n := &Node{ 5733 Name: new(Name), 5734 Op: ONAME, 5735 Pos: parent.N.(*Node).Pos, 5736 } 5737 n.Orig = n 5738 5739 s.Def = asTypesNode(n) 5740 asNode(s.Def).Name.SetUsed(true) 5741 n.Sym = s 5742 n.Type = t 5743 n.SetClass(PAUTO) 5744 n.SetAddable(true) 5745 n.Esc = EscNever 5746 n.Name.Curfn = e.curfn 5747 e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n) 5748 dowidth(t) 5749 return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset} 5750 } 5751 5752 func (e *ssafn) CanSSA(t *types.Type) bool { 5753 return canSSAType(t) 5754 } 5755 5756 func (e *ssafn) Line(pos src.XPos) string { 5757 return linestr(pos) 5758 } 5759 5760 // Log logs a message from the compiler. 5761 func (e *ssafn) Logf(msg string, args ...interface{}) { 5762 if e.log { 5763 fmt.Printf(msg, args...) 5764 } 5765 } 5766 5767 func (e *ssafn) Log() bool { 5768 return e.log 5769 } 5770 5771 // Fatal reports a compiler error and exits. 5772 func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) { 5773 lineno = pos 5774 nargs := append([]interface{}{e.curfn.funcname()}, args...) 5775 Fatalf("'%s': "+msg, nargs...) 5776 } 5777 5778 // Warnl reports a "warning", which is usually flag-triggered 5779 // logging output for the benefit of tests. 5780 func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) { 5781 Warnl(pos, fmt_, args...) 5782 } 5783 5784 func (e *ssafn) Debug_checknil() bool { 5785 return Debug_checknil != 0 5786 } 5787 5788 func (e *ssafn) UseWriteBarrier() bool { 5789 return use_writebarrier 5790 } 5791 5792 func (e *ssafn) Syslook(name string) *obj.LSym { 5793 switch name { 5794 case "goschedguarded": 5795 return goschedguarded 5796 case "writeBarrier": 5797 return writeBarrier 5798 case "gcWriteBarrier": 5799 return gcWriteBarrier 5800 case "typedmemmove": 5801 return typedmemmove 5802 case "typedmemclr": 5803 return typedmemclr 5804 } 5805 Fatalf("unknown Syslook func %v", name) 5806 return nil 5807 } 5808 5809 func (e *ssafn) SetWBPos(pos src.XPos) { 5810 e.curfn.Func.setWBPos(pos) 5811 } 5812 5813 func (n *Node) Typ() *types.Type { 5814 return n.Type 5815 } 5816 func (n *Node) StorageClass() ssa.StorageClass { 5817 switch n.Class() { 5818 case PPARAM: 5819 return ssa.ClassParam 5820 case PPARAMOUT: 5821 return ssa.ClassParamOut 5822 case PAUTO: 5823 return ssa.ClassAuto 5824 default: 5825 Fatalf("untranslatable storage class for %v: %s", n, n.Class()) 5826 return 0 5827 } 5828 }