github.com/corona10/go@v0.0.0-20180224231303-7a218942be57/src/cmd/compile/internal/gc/ssa.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "bytes" 9 "encoding/binary" 10 "fmt" 11 "html" 12 "os" 13 "sort" 14 15 "cmd/compile/internal/ssa" 16 "cmd/compile/internal/types" 17 "cmd/internal/obj" 18 "cmd/internal/objabi" 19 "cmd/internal/src" 20 "cmd/internal/sys" 21 ) 22 23 var ssaConfig *ssa.Config 24 var ssaCaches []ssa.Cache 25 26 func initssaconfig() { 27 types_ := ssa.Types{ 28 Bool: types.Types[TBOOL], 29 Int8: types.Types[TINT8], 30 Int16: types.Types[TINT16], 31 Int32: types.Types[TINT32], 32 Int64: types.Types[TINT64], 33 UInt8: types.Types[TUINT8], 34 UInt16: types.Types[TUINT16], 35 UInt32: types.Types[TUINT32], 36 UInt64: types.Types[TUINT64], 37 Float32: types.Types[TFLOAT32], 38 Float64: types.Types[TFLOAT64], 39 Int: types.Types[TINT], 40 UInt: types.Types[TUINT], 41 Uintptr: types.Types[TUINTPTR], 42 String: types.Types[TSTRING], 43 BytePtr: types.NewPtr(types.Types[TUINT8]), 44 Int32Ptr: types.NewPtr(types.Types[TINT32]), 45 UInt32Ptr: types.NewPtr(types.Types[TUINT32]), 46 IntPtr: types.NewPtr(types.Types[TINT]), 47 UintptrPtr: types.NewPtr(types.Types[TUINTPTR]), 48 Float32Ptr: types.NewPtr(types.Types[TFLOAT32]), 49 Float64Ptr: types.NewPtr(types.Types[TFLOAT64]), 50 BytePtrPtr: types.NewPtr(types.NewPtr(types.Types[TUINT8])), 51 } 52 53 if thearch.SoftFloat { 54 softfloatInit() 55 } 56 57 // Generate a few pointer types that are uncommon in the frontend but common in the backend. 58 // Caching is disabled in the backend, so generating these here avoids allocations. 59 _ = types.NewPtr(types.Types[TINTER]) // *interface{} 60 _ = types.NewPtr(types.NewPtr(types.Types[TSTRING])) // **string 61 _ = types.NewPtr(types.NewPtr(types.Idealstring)) // **string 62 _ = types.NewPtr(types.NewSlice(types.Types[TINTER])) // *[]interface{} 63 _ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte 64 _ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte 65 _ = types.NewPtr(types.NewSlice(types.Types[TSTRING])) // *[]string 66 _ = types.NewPtr(types.NewSlice(types.Idealstring)) // *[]string 67 _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8 68 _ = types.NewPtr(types.Types[TINT16]) // *int16 69 _ = types.NewPtr(types.Types[TINT64]) // *int64 70 _ = types.NewPtr(types.Errortype) // *error 71 types.NewPtrCacheEnabled = false 72 ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, types_, Ctxt, Debug['N'] == 0) 73 if thearch.LinkArch.Name == "386" { 74 ssaConfig.Set387(thearch.Use387) 75 } 76 ssaConfig.SoftFloat = thearch.SoftFloat 77 ssaCaches = make([]ssa.Cache, nBackendWorkers) 78 79 // Set up some runtime functions we'll need to call. 80 Newproc = sysfunc("newproc") 81 Deferproc = sysfunc("deferproc") 82 Deferreturn = sysfunc("deferreturn") 83 Duffcopy = sysfunc("duffcopy") 84 Duffzero = sysfunc("duffzero") 85 panicindex = sysfunc("panicindex") 86 panicslice = sysfunc("panicslice") 87 panicdivide = sysfunc("panicdivide") 88 growslice = sysfunc("growslice") 89 panicdottypeE = sysfunc("panicdottypeE") 90 panicdottypeI = sysfunc("panicdottypeI") 91 panicnildottype = sysfunc("panicnildottype") 92 assertE2I = sysfunc("assertE2I") 93 assertE2I2 = sysfunc("assertE2I2") 94 assertI2I = sysfunc("assertI2I") 95 assertI2I2 = sysfunc("assertI2I2") 96 goschedguarded = sysfunc("goschedguarded") 97 writeBarrier = sysfunc("writeBarrier") 98 gcWriteBarrier = sysfunc("gcWriteBarrier") 99 typedmemmove = sysfunc("typedmemmove") 100 typedmemclr = sysfunc("typedmemclr") 101 Udiv = sysfunc("udiv") 102 103 // GO386=387 runtime functions 104 ControlWord64trunc = sysfunc("controlWord64trunc") 105 ControlWord32 = sysfunc("controlWord32") 106 } 107 108 // buildssa builds an SSA function for fn. 109 // worker indicates which of the backend workers is doing the processing. 110 func buildssa(fn *Node, worker int) *ssa.Func { 111 name := fn.funcname() 112 printssa := name == os.Getenv("GOSSAFUNC") 113 if printssa { 114 fmt.Println("generating SSA for", name) 115 dumplist("buildssa-enter", fn.Func.Enter) 116 dumplist("buildssa-body", fn.Nbody) 117 dumplist("buildssa-exit", fn.Func.Exit) 118 } 119 120 var s state 121 s.pushLine(fn.Pos) 122 defer s.popLine() 123 124 s.hasdefer = fn.Func.HasDefer() 125 if fn.Func.Pragma&CgoUnsafeArgs != 0 { 126 s.cgoUnsafeArgs = true 127 } 128 129 fe := ssafn{ 130 curfn: fn, 131 log: printssa, 132 } 133 s.curfn = fn 134 135 s.f = ssa.NewFunc(&fe) 136 s.config = ssaConfig 137 s.f.Config = ssaConfig 138 s.f.Cache = &ssaCaches[worker] 139 s.f.Cache.Reset() 140 s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH", name) 141 s.f.Name = name 142 if fn.Func.Pragma&Nosplit != 0 { 143 s.f.NoSplit = true 144 } 145 s.exitCode = fn.Func.Exit 146 s.panics = map[funcLine]*ssa.Block{} 147 s.softFloat = s.config.SoftFloat 148 149 if name == os.Getenv("GOSSAFUNC") { 150 s.f.HTMLWriter = ssa.NewHTMLWriter("ssa.html", s.f.Frontend(), name) 151 // TODO: generate and print a mapping from nodes to values and blocks 152 } 153 154 // Allocate starting block 155 s.f.Entry = s.f.NewBlock(ssa.BlockPlain) 156 157 // Allocate starting values 158 s.labels = map[string]*ssaLabel{} 159 s.labeledNodes = map[*Node]*ssaLabel{} 160 s.fwdVars = map[*Node]*ssa.Value{} 161 s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem) 162 s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead 163 s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR]) 164 165 s.startBlock(s.f.Entry) 166 s.vars[&memVar] = s.startmem 167 168 // Generate addresses of local declarations 169 s.decladdrs = map[*Node]*ssa.Value{} 170 for _, n := range fn.Func.Dcl { 171 switch n.Class() { 172 case PPARAM, PPARAMOUT: 173 s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), n, s.sp) 174 if n.Class() == PPARAMOUT && s.canSSA(n) { 175 // Save ssa-able PPARAMOUT variables so we can 176 // store them back to the stack at the end of 177 // the function. 178 s.returns = append(s.returns, n) 179 } 180 case PAUTO: 181 // processed at each use, to prevent Addr coming 182 // before the decl. 183 case PAUTOHEAP: 184 // moved to heap - already handled by frontend 185 case PFUNC: 186 // local function - already handled by frontend 187 default: 188 s.Fatalf("local variable with class %v unimplemented", n.Class()) 189 } 190 } 191 192 // Populate SSAable arguments. 193 for _, n := range fn.Func.Dcl { 194 if n.Class() == PPARAM && s.canSSA(n) { 195 s.vars[n] = s.newValue0A(ssa.OpArg, n.Type, n) 196 } 197 } 198 199 // Convert the AST-based IR to the SSA-based IR 200 s.stmtList(fn.Func.Enter) 201 s.stmtList(fn.Nbody) 202 203 // fallthrough to exit 204 if s.curBlock != nil { 205 s.pushLine(fn.Func.Endlineno) 206 s.exit() 207 s.popLine() 208 } 209 210 for _, b := range s.f.Blocks { 211 if b.Pos != src.NoXPos { 212 s.updateUnsetPredPos(b) 213 } 214 } 215 216 s.insertPhis() 217 218 // Don't carry reference this around longer than necessary 219 s.exitCode = Nodes{} 220 221 // Main call to ssa package to compile function 222 ssa.Compile(s.f) 223 return s.f 224 } 225 226 // updateUnsetPredPos propagates the earliest-value position information for b 227 // towards all of b's predecessors that need a position, and recurs on that 228 // predecessor if its position is updated. B should have a non-empty position. 229 func (s *state) updateUnsetPredPos(b *ssa.Block) { 230 if b.Pos == src.NoXPos { 231 s.Fatalf("Block %s should have a position", b) 232 } 233 bestPos := src.NoXPos 234 for _, e := range b.Preds { 235 p := e.Block() 236 if !p.LackingPos() { 237 continue 238 } 239 if bestPos == src.NoXPos { 240 bestPos = b.Pos 241 for _, v := range b.Values { 242 if v.LackingPos() { 243 continue 244 } 245 if v.Pos != src.NoXPos { 246 // Assume values are still in roughly textual order; 247 // TODO: could also seek minimum position? 248 bestPos = v.Pos 249 break 250 } 251 } 252 } 253 p.Pos = bestPos 254 s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay. 255 } 256 return 257 } 258 259 type state struct { 260 // configuration (arch) information 261 config *ssa.Config 262 263 // function we're building 264 f *ssa.Func 265 266 // Node for function 267 curfn *Node 268 269 // labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f 270 labels map[string]*ssaLabel 271 labeledNodes map[*Node]*ssaLabel 272 273 // Code that must precede any return 274 // (e.g., copying value of heap-escaped paramout back to true paramout) 275 exitCode Nodes 276 277 // unlabeled break and continue statement tracking 278 breakTo *ssa.Block // current target for plain break statement 279 continueTo *ssa.Block // current target for plain continue statement 280 281 // current location where we're interpreting the AST 282 curBlock *ssa.Block 283 284 // variable assignments in the current block (map from variable symbol to ssa value) 285 // *Node is the unique identifier (an ONAME Node) for the variable. 286 // TODO: keep a single varnum map, then make all of these maps slices instead? 287 vars map[*Node]*ssa.Value 288 289 // fwdVars are variables that are used before they are defined in the current block. 290 // This map exists just to coalesce multiple references into a single FwdRef op. 291 // *Node is the unique identifier (an ONAME Node) for the variable. 292 fwdVars map[*Node]*ssa.Value 293 294 // all defined variables at the end of each block. Indexed by block ID. 295 defvars []map[*Node]*ssa.Value 296 297 // addresses of PPARAM and PPARAMOUT variables. 298 decladdrs map[*Node]*ssa.Value 299 300 // starting values. Memory, stack pointer, and globals pointer 301 startmem *ssa.Value 302 sp *ssa.Value 303 sb *ssa.Value 304 305 // line number stack. The current line number is top of stack 306 line []src.XPos 307 // the last line number processed; it may have been popped 308 lastPos src.XPos 309 310 // list of panic calls by function name and line number. 311 // Used to deduplicate panic calls. 312 panics map[funcLine]*ssa.Block 313 314 // list of PPARAMOUT (return) variables. 315 returns []*Node 316 317 cgoUnsafeArgs bool 318 hasdefer bool // whether the function contains a defer statement 319 softFloat bool 320 } 321 322 type funcLine struct { 323 f *obj.LSym 324 base *src.PosBase 325 line uint 326 } 327 328 type ssaLabel struct { 329 target *ssa.Block // block identified by this label 330 breakTarget *ssa.Block // block to break to in control flow node identified by this label 331 continueTarget *ssa.Block // block to continue to in control flow node identified by this label 332 } 333 334 // label returns the label associated with sym, creating it if necessary. 335 func (s *state) label(sym *types.Sym) *ssaLabel { 336 lab := s.labels[sym.Name] 337 if lab == nil { 338 lab = new(ssaLabel) 339 s.labels[sym.Name] = lab 340 } 341 return lab 342 } 343 344 func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) } 345 func (s *state) Log() bool { return s.f.Log() } 346 func (s *state) Fatalf(msg string, args ...interface{}) { 347 s.f.Frontend().Fatalf(s.peekPos(), msg, args...) 348 } 349 func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) } 350 func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() } 351 352 var ( 353 // dummy node for the memory variable 354 memVar = Node{Op: ONAME, Sym: &types.Sym{Name: "mem"}} 355 356 // dummy nodes for temporary variables 357 ptrVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}} 358 lenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}} 359 newlenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}} 360 capVar = Node{Op: ONAME, Sym: &types.Sym{Name: "cap"}} 361 typVar = Node{Op: ONAME, Sym: &types.Sym{Name: "typ"}} 362 okVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ok"}} 363 ) 364 365 // startBlock sets the current block we're generating code in to b. 366 func (s *state) startBlock(b *ssa.Block) { 367 if s.curBlock != nil { 368 s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock) 369 } 370 s.curBlock = b 371 s.vars = map[*Node]*ssa.Value{} 372 for n := range s.fwdVars { 373 delete(s.fwdVars, n) 374 } 375 } 376 377 // endBlock marks the end of generating code for the current block. 378 // Returns the (former) current block. Returns nil if there is no current 379 // block, i.e. if no code flows to the current execution point. 380 func (s *state) endBlock() *ssa.Block { 381 b := s.curBlock 382 if b == nil { 383 return nil 384 } 385 for len(s.defvars) <= int(b.ID) { 386 s.defvars = append(s.defvars, nil) 387 } 388 s.defvars[b.ID] = s.vars 389 s.curBlock = nil 390 s.vars = nil 391 if b.LackingPos() { 392 // Empty plain blocks get the line of their successor (handled after all blocks created), 393 // except for increment blocks in For statements (handled in ssa conversion of OFOR), 394 // and for blocks ending in GOTO/BREAK/CONTINUE. 395 b.Pos = src.NoXPos 396 } else { 397 b.Pos = s.lastPos 398 } 399 return b 400 } 401 402 // pushLine pushes a line number on the line number stack. 403 func (s *state) pushLine(line src.XPos) { 404 if !line.IsKnown() { 405 // the frontend may emit node with line number missing, 406 // use the parent line number in this case. 407 line = s.peekPos() 408 if Debug['K'] != 0 { 409 Warn("buildssa: unknown position (line 0)") 410 } 411 } else { 412 s.lastPos = line 413 } 414 415 s.line = append(s.line, line) 416 } 417 418 // popLine pops the top of the line number stack. 419 func (s *state) popLine() { 420 s.line = s.line[:len(s.line)-1] 421 } 422 423 // peekPos peeks the top of the line number stack. 424 func (s *state) peekPos() src.XPos { 425 return s.line[len(s.line)-1] 426 } 427 428 // newValue0 adds a new value with no arguments to the current block. 429 func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value { 430 return s.curBlock.NewValue0(s.peekPos(), op, t) 431 } 432 433 // newValue0A adds a new value with no arguments and an aux value to the current block. 434 func (s *state) newValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value { 435 return s.curBlock.NewValue0A(s.peekPos(), op, t, aux) 436 } 437 438 // newValue0I adds a new value with no arguments and an auxint value to the current block. 439 func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value { 440 return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint) 441 } 442 443 // newValue1 adds a new value with one argument to the current block. 444 func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { 445 return s.curBlock.NewValue1(s.peekPos(), op, t, arg) 446 } 447 448 // newValue1A adds a new value with one argument and an aux value to the current block. 449 func (s *state) newValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 450 return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg) 451 } 452 453 // newValue1I adds a new value with one argument and an auxint value to the current block. 454 func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value { 455 return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg) 456 } 457 458 // newValue2 adds a new value with two arguments to the current block. 459 func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value { 460 return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1) 461 } 462 463 // newValue2I adds a new value with two arguments and an auxint value to the current block. 464 func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value { 465 return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1) 466 } 467 468 // newValue3 adds a new value with three arguments to the current block. 469 func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 470 return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2) 471 } 472 473 // newValue3I adds a new value with three arguments and an auxint value to the current block. 474 func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 475 return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2) 476 } 477 478 // newValue3A adds a new value with three arguments and an aux value to the current block. 479 func (s *state) newValue3A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 480 return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2) 481 } 482 483 // newValue4 adds a new value with four arguments to the current block. 484 func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value { 485 return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3) 486 } 487 488 // entryNewValue0 adds a new value with no arguments to the entry block. 489 func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value { 490 return s.f.Entry.NewValue0(src.NoXPos, op, t) 491 } 492 493 // entryNewValue0A adds a new value with no arguments and an aux value to the entry block. 494 func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value { 495 return s.f.Entry.NewValue0A(src.NoXPos, op, t, aux) 496 } 497 498 // entryNewValue1 adds a new value with one argument to the entry block. 499 func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { 500 return s.f.Entry.NewValue1(src.NoXPos, op, t, arg) 501 } 502 503 // entryNewValue1 adds a new value with one argument and an auxint value to the entry block. 504 func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value { 505 return s.f.Entry.NewValue1I(src.NoXPos, op, t, auxint, arg) 506 } 507 508 // entryNewValue1A adds a new value with one argument and an aux value to the entry block. 509 func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 510 return s.f.Entry.NewValue1A(src.NoXPos, op, t, aux, arg) 511 } 512 513 // entryNewValue2 adds a new value with two arguments to the entry block. 514 func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value { 515 return s.f.Entry.NewValue2(src.NoXPos, op, t, arg0, arg1) 516 } 517 518 // const* routines add a new const value to the entry block. 519 func (s *state) constSlice(t *types.Type) *ssa.Value { 520 return s.f.ConstSlice(s.peekPos(), t) 521 } 522 func (s *state) constInterface(t *types.Type) *ssa.Value { 523 return s.f.ConstInterface(s.peekPos(), t) 524 } 525 func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(s.peekPos(), t) } 526 func (s *state) constEmptyString(t *types.Type) *ssa.Value { 527 return s.f.ConstEmptyString(s.peekPos(), t) 528 } 529 func (s *state) constBool(c bool) *ssa.Value { 530 return s.f.ConstBool(s.peekPos(), types.Types[TBOOL], c) 531 } 532 func (s *state) constInt8(t *types.Type, c int8) *ssa.Value { 533 return s.f.ConstInt8(s.peekPos(), t, c) 534 } 535 func (s *state) constInt16(t *types.Type, c int16) *ssa.Value { 536 return s.f.ConstInt16(s.peekPos(), t, c) 537 } 538 func (s *state) constInt32(t *types.Type, c int32) *ssa.Value { 539 return s.f.ConstInt32(s.peekPos(), t, c) 540 } 541 func (s *state) constInt64(t *types.Type, c int64) *ssa.Value { 542 return s.f.ConstInt64(s.peekPos(), t, c) 543 } 544 func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value { 545 return s.f.ConstFloat32(s.peekPos(), t, c) 546 } 547 func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value { 548 return s.f.ConstFloat64(s.peekPos(), t, c) 549 } 550 func (s *state) constInt(t *types.Type, c int64) *ssa.Value { 551 if s.config.PtrSize == 8 { 552 return s.constInt64(t, c) 553 } 554 if int64(int32(c)) != c { 555 s.Fatalf("integer constant too big %d", c) 556 } 557 return s.constInt32(t, int32(c)) 558 } 559 func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value { 560 return s.f.ConstOffPtrSP(s.peekPos(), t, c, s.sp) 561 } 562 563 // newValueOrSfCall* are wrappers around newValue*, which may create a call to a 564 // soft-float runtime function instead (when emitting soft-float code). 565 func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { 566 if s.softFloat { 567 if c, ok := s.sfcall(op, arg); ok { 568 return c 569 } 570 } 571 return s.newValue1(op, t, arg) 572 } 573 func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value { 574 if s.softFloat { 575 if c, ok := s.sfcall(op, arg0, arg1); ok { 576 return c 577 } 578 } 579 return s.newValue2(op, t, arg0, arg1) 580 } 581 582 // stmtList converts the statement list n to SSA and adds it to s. 583 func (s *state) stmtList(l Nodes) { 584 for _, n := range l.Slice() { 585 s.stmt(n) 586 } 587 } 588 589 // stmt converts the statement n to SSA and adds it to s. 590 func (s *state) stmt(n *Node) { 591 if !(n.Op == OVARKILL || n.Op == OVARLIVE) { 592 // OVARKILL and OVARLIVE are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging. 593 s.pushLine(n.Pos) 594 defer s.popLine() 595 } 596 597 // If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere), 598 // then this code is dead. Stop here. 599 if s.curBlock == nil && n.Op != OLABEL { 600 return 601 } 602 603 s.stmtList(n.Ninit) 604 switch n.Op { 605 606 case OBLOCK: 607 s.stmtList(n.List) 608 609 // No-ops 610 case OEMPTY, ODCLCONST, ODCLTYPE, OFALL: 611 612 // Expression statements 613 case OCALLFUNC: 614 if isIntrinsicCall(n) { 615 s.intrinsicCall(n) 616 return 617 } 618 fallthrough 619 620 case OCALLMETH, OCALLINTER: 621 s.call(n, callNormal) 622 if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class() == PFUNC { 623 if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" || 624 n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block") { 625 m := s.mem() 626 b := s.endBlock() 627 b.Kind = ssa.BlockExit 628 b.SetControl(m) 629 // TODO: never rewrite OPANIC to OCALLFUNC in the 630 // first place. Need to wait until all backends 631 // go through SSA. 632 } 633 } 634 case ODEFER: 635 s.call(n.Left, callDefer) 636 case OPROC: 637 s.call(n.Left, callGo) 638 639 case OAS2DOTTYPE: 640 res, resok := s.dottype(n.Rlist.First(), true) 641 deref := false 642 if !canSSAType(n.Rlist.First().Type) { 643 if res.Op != ssa.OpLoad { 644 s.Fatalf("dottype of non-load") 645 } 646 mem := s.mem() 647 if mem.Op == ssa.OpVarKill { 648 mem = mem.Args[0] 649 } 650 if res.Args[1] != mem { 651 s.Fatalf("memory no longer live from 2-result dottype load") 652 } 653 deref = true 654 res = res.Args[0] 655 } 656 s.assign(n.List.First(), res, deref, 0) 657 s.assign(n.List.Second(), resok, false, 0) 658 return 659 660 case OAS2FUNC: 661 // We come here only when it is an intrinsic call returning two values. 662 if !isIntrinsicCall(n.Rlist.First()) { 663 s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Rlist.First()) 664 } 665 v := s.intrinsicCall(n.Rlist.First()) 666 v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v) 667 v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v) 668 s.assign(n.List.First(), v1, false, 0) 669 s.assign(n.List.Second(), v2, false, 0) 670 return 671 672 case ODCL: 673 if n.Left.Class() == PAUTOHEAP { 674 Fatalf("DCL %v", n) 675 } 676 677 case OLABEL: 678 sym := n.Left.Sym 679 lab := s.label(sym) 680 681 // Associate label with its control flow node, if any 682 if ctl := n.labeledControl(); ctl != nil { 683 s.labeledNodes[ctl] = lab 684 } 685 686 // The label might already have a target block via a goto. 687 if lab.target == nil { 688 lab.target = s.f.NewBlock(ssa.BlockPlain) 689 } 690 691 // Go to that label. 692 // (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.) 693 if s.curBlock != nil { 694 b := s.endBlock() 695 b.AddEdgeTo(lab.target) 696 } 697 s.startBlock(lab.target) 698 699 case OGOTO: 700 sym := n.Left.Sym 701 702 lab := s.label(sym) 703 if lab.target == nil { 704 lab.target = s.f.NewBlock(ssa.BlockPlain) 705 } 706 707 b := s.endBlock() 708 b.Pos = s.lastPos // Do this even if b is an empty block. 709 b.AddEdgeTo(lab.target) 710 711 case OAS: 712 if n.Left == n.Right && n.Left.Op == ONAME { 713 // An x=x assignment. No point in doing anything 714 // here. In addition, skipping this assignment 715 // prevents generating: 716 // VARDEF x 717 // COPY x -> x 718 // which is bad because x is incorrectly considered 719 // dead before the vardef. See issue #14904. 720 return 721 } 722 723 // Evaluate RHS. 724 rhs := n.Right 725 if rhs != nil { 726 switch rhs.Op { 727 case OSTRUCTLIT, OARRAYLIT, OSLICELIT: 728 // All literals with nonzero fields have already been 729 // rewritten during walk. Any that remain are just T{} 730 // or equivalents. Use the zero value. 731 if !iszero(rhs) { 732 Fatalf("literal with nonzero value in SSA: %v", rhs) 733 } 734 rhs = nil 735 case OAPPEND: 736 // Check whether we're writing the result of an append back to the same slice. 737 // If so, we handle it specially to avoid write barriers on the fast 738 // (non-growth) path. 739 if !samesafeexpr(n.Left, rhs.List.First()) || Debug['N'] != 0 { 740 break 741 } 742 // If the slice can be SSA'd, it'll be on the stack, 743 // so there will be no write barriers, 744 // so there's no need to attempt to prevent them. 745 if s.canSSA(n.Left) { 746 if Debug_append > 0 { // replicating old diagnostic message 747 Warnl(n.Pos, "append: len-only update (in local slice)") 748 } 749 break 750 } 751 if Debug_append > 0 { 752 Warnl(n.Pos, "append: len-only update") 753 } 754 s.append(rhs, true) 755 return 756 } 757 } 758 759 if isblank(n.Left) { 760 // _ = rhs 761 // Just evaluate rhs for side-effects. 762 if rhs != nil { 763 s.expr(rhs) 764 } 765 return 766 } 767 768 var t *types.Type 769 if n.Right != nil { 770 t = n.Right.Type 771 } else { 772 t = n.Left.Type 773 } 774 775 var r *ssa.Value 776 deref := !canSSAType(t) 777 if deref { 778 if rhs == nil { 779 r = nil // Signal assign to use OpZero. 780 } else { 781 r = s.addr(rhs, false) 782 } 783 } else { 784 if rhs == nil { 785 r = s.zeroVal(t) 786 } else { 787 r = s.expr(rhs) 788 } 789 } 790 791 var skip skipMask 792 if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) { 793 // We're assigning a slicing operation back to its source. 794 // Don't write back fields we aren't changing. See issue #14855. 795 i, j, k := rhs.SliceBounds() 796 if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) { 797 // [0:...] is the same as [:...] 798 i = nil 799 } 800 // TODO: detect defaults for len/cap also. 801 // Currently doesn't really work because (*p)[:len(*p)] appears here as: 802 // tmp = len(*p) 803 // (*p)[:tmp] 804 //if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) { 805 // j = nil 806 //} 807 //if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) { 808 // k = nil 809 //} 810 if i == nil { 811 skip |= skipPtr 812 if j == nil { 813 skip |= skipLen 814 } 815 if k == nil { 816 skip |= skipCap 817 } 818 } 819 } 820 821 s.assign(n.Left, r, deref, skip) 822 823 case OIF: 824 bThen := s.f.NewBlock(ssa.BlockPlain) 825 bEnd := s.f.NewBlock(ssa.BlockPlain) 826 var bElse *ssa.Block 827 var likely int8 828 if n.Likely() { 829 likely = 1 830 } 831 if n.Rlist.Len() != 0 { 832 bElse = s.f.NewBlock(ssa.BlockPlain) 833 s.condBranch(n.Left, bThen, bElse, likely) 834 } else { 835 s.condBranch(n.Left, bThen, bEnd, likely) 836 } 837 838 s.startBlock(bThen) 839 s.stmtList(n.Nbody) 840 if b := s.endBlock(); b != nil { 841 b.AddEdgeTo(bEnd) 842 } 843 844 if n.Rlist.Len() != 0 { 845 s.startBlock(bElse) 846 s.stmtList(n.Rlist) 847 if b := s.endBlock(); b != nil { 848 b.AddEdgeTo(bEnd) 849 } 850 } 851 s.startBlock(bEnd) 852 853 case ORETURN: 854 s.stmtList(n.List) 855 b := s.exit() 856 b.Pos = s.lastPos 857 858 case ORETJMP: 859 s.stmtList(n.List) 860 b := s.exit() 861 b.Kind = ssa.BlockRetJmp // override BlockRet 862 b.Aux = n.Sym.Linksym() 863 864 case OCONTINUE, OBREAK: 865 var to *ssa.Block 866 if n.Left == nil { 867 // plain break/continue 868 switch n.Op { 869 case OCONTINUE: 870 to = s.continueTo 871 case OBREAK: 872 to = s.breakTo 873 } 874 } else { 875 // labeled break/continue; look up the target 876 sym := n.Left.Sym 877 lab := s.label(sym) 878 switch n.Op { 879 case OCONTINUE: 880 to = lab.continueTarget 881 case OBREAK: 882 to = lab.breakTarget 883 } 884 } 885 886 b := s.endBlock() 887 b.Pos = s.lastPos // Do this even if b is an empty block. 888 b.AddEdgeTo(to) 889 890 case OFOR, OFORUNTIL: 891 // OFOR: for Ninit; Left; Right { Nbody } 892 // For = cond; body; incr 893 // Foruntil = body; incr; cond 894 bCond := s.f.NewBlock(ssa.BlockPlain) 895 bBody := s.f.NewBlock(ssa.BlockPlain) 896 bIncr := s.f.NewBlock(ssa.BlockPlain) 897 bEnd := s.f.NewBlock(ssa.BlockPlain) 898 899 // first, jump to condition test (OFOR) or body (OFORUNTIL) 900 b := s.endBlock() 901 if n.Op == OFOR { 902 b.AddEdgeTo(bCond) 903 // generate code to test condition 904 s.startBlock(bCond) 905 if n.Left != nil { 906 s.condBranch(n.Left, bBody, bEnd, 1) 907 } else { 908 b := s.endBlock() 909 b.Kind = ssa.BlockPlain 910 b.AddEdgeTo(bBody) 911 } 912 913 } else { 914 b.AddEdgeTo(bBody) 915 } 916 917 // set up for continue/break in body 918 prevContinue := s.continueTo 919 prevBreak := s.breakTo 920 s.continueTo = bIncr 921 s.breakTo = bEnd 922 lab := s.labeledNodes[n] 923 if lab != nil { 924 // labeled for loop 925 lab.continueTarget = bIncr 926 lab.breakTarget = bEnd 927 } 928 929 // generate body 930 s.startBlock(bBody) 931 s.stmtList(n.Nbody) 932 933 // tear down continue/break 934 s.continueTo = prevContinue 935 s.breakTo = prevBreak 936 if lab != nil { 937 lab.continueTarget = nil 938 lab.breakTarget = nil 939 } 940 941 // done with body, goto incr 942 if b := s.endBlock(); b != nil { 943 b.AddEdgeTo(bIncr) 944 } 945 946 // generate incr 947 s.startBlock(bIncr) 948 if n.Right != nil { 949 s.stmt(n.Right) 950 } 951 if b := s.endBlock(); b != nil { 952 b.AddEdgeTo(bCond) 953 // It can happen that bIncr ends in a block containing only VARKILL, 954 // and that muddles the debugging experience. 955 if n.Op != OFORUNTIL && b.Pos == src.NoXPos { 956 b.Pos = bCond.Pos 957 } 958 } 959 960 if n.Op == OFORUNTIL { 961 // generate code to test condition 962 s.startBlock(bCond) 963 if n.Left != nil { 964 s.condBranch(n.Left, bBody, bEnd, 1) 965 } else { 966 b := s.endBlock() 967 b.Kind = ssa.BlockPlain 968 b.AddEdgeTo(bBody) 969 } 970 } 971 972 s.startBlock(bEnd) 973 974 case OSWITCH, OSELECT: 975 // These have been mostly rewritten by the front end into their Nbody fields. 976 // Our main task is to correctly hook up any break statements. 977 bEnd := s.f.NewBlock(ssa.BlockPlain) 978 979 prevBreak := s.breakTo 980 s.breakTo = bEnd 981 lab := s.labeledNodes[n] 982 if lab != nil { 983 // labeled 984 lab.breakTarget = bEnd 985 } 986 987 // generate body code 988 s.stmtList(n.Nbody) 989 990 s.breakTo = prevBreak 991 if lab != nil { 992 lab.breakTarget = nil 993 } 994 995 // walk adds explicit OBREAK nodes to the end of all reachable code paths. 996 // If we still have a current block here, then mark it unreachable. 997 if s.curBlock != nil { 998 m := s.mem() 999 b := s.endBlock() 1000 b.Kind = ssa.BlockExit 1001 b.SetControl(m) 1002 } 1003 s.startBlock(bEnd) 1004 1005 case OVARKILL: 1006 // Insert a varkill op to record that a variable is no longer live. 1007 // We only care about liveness info at call sites, so putting the 1008 // varkill in the store chain is enough to keep it correctly ordered 1009 // with respect to call ops. 1010 if !s.canSSA(n.Left) { 1011 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, n.Left, s.mem()) 1012 } 1013 1014 case OVARLIVE: 1015 // Insert a varlive op to record that a variable is still live. 1016 if !n.Left.Addrtaken() { 1017 s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left) 1018 } 1019 switch n.Left.Class() { 1020 case PAUTO, PPARAM, PPARAMOUT: 1021 default: 1022 s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left) 1023 } 1024 s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem()) 1025 1026 case OCHECKNIL: 1027 p := s.expr(n.Left) 1028 s.nilCheck(p) 1029 1030 default: 1031 s.Fatalf("unhandled stmt %v", n.Op) 1032 } 1033 } 1034 1035 // exit processes any code that needs to be generated just before returning. 1036 // It returns a BlockRet block that ends the control flow. Its control value 1037 // will be set to the final memory state. 1038 func (s *state) exit() *ssa.Block { 1039 if s.hasdefer { 1040 s.rtcall(Deferreturn, true, nil) 1041 } 1042 1043 // Run exit code. Typically, this code copies heap-allocated PPARAMOUT 1044 // variables back to the stack. 1045 s.stmtList(s.exitCode) 1046 1047 // Store SSAable PPARAMOUT variables back to stack locations. 1048 for _, n := range s.returns { 1049 addr := s.decladdrs[n] 1050 val := s.variable(n, n.Type) 1051 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) 1052 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, n.Type, addr, val, s.mem()) 1053 // TODO: if val is ever spilled, we'd like to use the 1054 // PPARAMOUT slot for spilling it. That won't happen 1055 // currently. 1056 } 1057 1058 // Do actual return. 1059 m := s.mem() 1060 b := s.endBlock() 1061 b.Kind = ssa.BlockRet 1062 b.SetControl(m) 1063 return b 1064 } 1065 1066 type opAndType struct { 1067 op Op 1068 etype types.EType 1069 } 1070 1071 var opToSSA = map[opAndType]ssa.Op{ 1072 opAndType{OADD, TINT8}: ssa.OpAdd8, 1073 opAndType{OADD, TUINT8}: ssa.OpAdd8, 1074 opAndType{OADD, TINT16}: ssa.OpAdd16, 1075 opAndType{OADD, TUINT16}: ssa.OpAdd16, 1076 opAndType{OADD, TINT32}: ssa.OpAdd32, 1077 opAndType{OADD, TUINT32}: ssa.OpAdd32, 1078 opAndType{OADD, TPTR32}: ssa.OpAdd32, 1079 opAndType{OADD, TINT64}: ssa.OpAdd64, 1080 opAndType{OADD, TUINT64}: ssa.OpAdd64, 1081 opAndType{OADD, TPTR64}: ssa.OpAdd64, 1082 opAndType{OADD, TFLOAT32}: ssa.OpAdd32F, 1083 opAndType{OADD, TFLOAT64}: ssa.OpAdd64F, 1084 1085 opAndType{OSUB, TINT8}: ssa.OpSub8, 1086 opAndType{OSUB, TUINT8}: ssa.OpSub8, 1087 opAndType{OSUB, TINT16}: ssa.OpSub16, 1088 opAndType{OSUB, TUINT16}: ssa.OpSub16, 1089 opAndType{OSUB, TINT32}: ssa.OpSub32, 1090 opAndType{OSUB, TUINT32}: ssa.OpSub32, 1091 opAndType{OSUB, TINT64}: ssa.OpSub64, 1092 opAndType{OSUB, TUINT64}: ssa.OpSub64, 1093 opAndType{OSUB, TFLOAT32}: ssa.OpSub32F, 1094 opAndType{OSUB, TFLOAT64}: ssa.OpSub64F, 1095 1096 opAndType{ONOT, TBOOL}: ssa.OpNot, 1097 1098 opAndType{OMINUS, TINT8}: ssa.OpNeg8, 1099 opAndType{OMINUS, TUINT8}: ssa.OpNeg8, 1100 opAndType{OMINUS, TINT16}: ssa.OpNeg16, 1101 opAndType{OMINUS, TUINT16}: ssa.OpNeg16, 1102 opAndType{OMINUS, TINT32}: ssa.OpNeg32, 1103 opAndType{OMINUS, TUINT32}: ssa.OpNeg32, 1104 opAndType{OMINUS, TINT64}: ssa.OpNeg64, 1105 opAndType{OMINUS, TUINT64}: ssa.OpNeg64, 1106 opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F, 1107 opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F, 1108 1109 opAndType{OCOM, TINT8}: ssa.OpCom8, 1110 opAndType{OCOM, TUINT8}: ssa.OpCom8, 1111 opAndType{OCOM, TINT16}: ssa.OpCom16, 1112 opAndType{OCOM, TUINT16}: ssa.OpCom16, 1113 opAndType{OCOM, TINT32}: ssa.OpCom32, 1114 opAndType{OCOM, TUINT32}: ssa.OpCom32, 1115 opAndType{OCOM, TINT64}: ssa.OpCom64, 1116 opAndType{OCOM, TUINT64}: ssa.OpCom64, 1117 1118 opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag, 1119 opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag, 1120 opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal, 1121 opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal, 1122 1123 opAndType{OMUL, TINT8}: ssa.OpMul8, 1124 opAndType{OMUL, TUINT8}: ssa.OpMul8, 1125 opAndType{OMUL, TINT16}: ssa.OpMul16, 1126 opAndType{OMUL, TUINT16}: ssa.OpMul16, 1127 opAndType{OMUL, TINT32}: ssa.OpMul32, 1128 opAndType{OMUL, TUINT32}: ssa.OpMul32, 1129 opAndType{OMUL, TINT64}: ssa.OpMul64, 1130 opAndType{OMUL, TUINT64}: ssa.OpMul64, 1131 opAndType{OMUL, TFLOAT32}: ssa.OpMul32F, 1132 opAndType{OMUL, TFLOAT64}: ssa.OpMul64F, 1133 1134 opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F, 1135 opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F, 1136 1137 opAndType{ODIV, TINT8}: ssa.OpDiv8, 1138 opAndType{ODIV, TUINT8}: ssa.OpDiv8u, 1139 opAndType{ODIV, TINT16}: ssa.OpDiv16, 1140 opAndType{ODIV, TUINT16}: ssa.OpDiv16u, 1141 opAndType{ODIV, TINT32}: ssa.OpDiv32, 1142 opAndType{ODIV, TUINT32}: ssa.OpDiv32u, 1143 opAndType{ODIV, TINT64}: ssa.OpDiv64, 1144 opAndType{ODIV, TUINT64}: ssa.OpDiv64u, 1145 1146 opAndType{OMOD, TINT8}: ssa.OpMod8, 1147 opAndType{OMOD, TUINT8}: ssa.OpMod8u, 1148 opAndType{OMOD, TINT16}: ssa.OpMod16, 1149 opAndType{OMOD, TUINT16}: ssa.OpMod16u, 1150 opAndType{OMOD, TINT32}: ssa.OpMod32, 1151 opAndType{OMOD, TUINT32}: ssa.OpMod32u, 1152 opAndType{OMOD, TINT64}: ssa.OpMod64, 1153 opAndType{OMOD, TUINT64}: ssa.OpMod64u, 1154 1155 opAndType{OAND, TINT8}: ssa.OpAnd8, 1156 opAndType{OAND, TUINT8}: ssa.OpAnd8, 1157 opAndType{OAND, TINT16}: ssa.OpAnd16, 1158 opAndType{OAND, TUINT16}: ssa.OpAnd16, 1159 opAndType{OAND, TINT32}: ssa.OpAnd32, 1160 opAndType{OAND, TUINT32}: ssa.OpAnd32, 1161 opAndType{OAND, TINT64}: ssa.OpAnd64, 1162 opAndType{OAND, TUINT64}: ssa.OpAnd64, 1163 1164 opAndType{OOR, TINT8}: ssa.OpOr8, 1165 opAndType{OOR, TUINT8}: ssa.OpOr8, 1166 opAndType{OOR, TINT16}: ssa.OpOr16, 1167 opAndType{OOR, TUINT16}: ssa.OpOr16, 1168 opAndType{OOR, TINT32}: ssa.OpOr32, 1169 opAndType{OOR, TUINT32}: ssa.OpOr32, 1170 opAndType{OOR, TINT64}: ssa.OpOr64, 1171 opAndType{OOR, TUINT64}: ssa.OpOr64, 1172 1173 opAndType{OXOR, TINT8}: ssa.OpXor8, 1174 opAndType{OXOR, TUINT8}: ssa.OpXor8, 1175 opAndType{OXOR, TINT16}: ssa.OpXor16, 1176 opAndType{OXOR, TUINT16}: ssa.OpXor16, 1177 opAndType{OXOR, TINT32}: ssa.OpXor32, 1178 opAndType{OXOR, TUINT32}: ssa.OpXor32, 1179 opAndType{OXOR, TINT64}: ssa.OpXor64, 1180 opAndType{OXOR, TUINT64}: ssa.OpXor64, 1181 1182 opAndType{OEQ, TBOOL}: ssa.OpEqB, 1183 opAndType{OEQ, TINT8}: ssa.OpEq8, 1184 opAndType{OEQ, TUINT8}: ssa.OpEq8, 1185 opAndType{OEQ, TINT16}: ssa.OpEq16, 1186 opAndType{OEQ, TUINT16}: ssa.OpEq16, 1187 opAndType{OEQ, TINT32}: ssa.OpEq32, 1188 opAndType{OEQ, TUINT32}: ssa.OpEq32, 1189 opAndType{OEQ, TINT64}: ssa.OpEq64, 1190 opAndType{OEQ, TUINT64}: ssa.OpEq64, 1191 opAndType{OEQ, TINTER}: ssa.OpEqInter, 1192 opAndType{OEQ, TSLICE}: ssa.OpEqSlice, 1193 opAndType{OEQ, TFUNC}: ssa.OpEqPtr, 1194 opAndType{OEQ, TMAP}: ssa.OpEqPtr, 1195 opAndType{OEQ, TCHAN}: ssa.OpEqPtr, 1196 opAndType{OEQ, TPTR32}: ssa.OpEqPtr, 1197 opAndType{OEQ, TPTR64}: ssa.OpEqPtr, 1198 opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr, 1199 opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr, 1200 opAndType{OEQ, TFLOAT64}: ssa.OpEq64F, 1201 opAndType{OEQ, TFLOAT32}: ssa.OpEq32F, 1202 1203 opAndType{ONE, TBOOL}: ssa.OpNeqB, 1204 opAndType{ONE, TINT8}: ssa.OpNeq8, 1205 opAndType{ONE, TUINT8}: ssa.OpNeq8, 1206 opAndType{ONE, TINT16}: ssa.OpNeq16, 1207 opAndType{ONE, TUINT16}: ssa.OpNeq16, 1208 opAndType{ONE, TINT32}: ssa.OpNeq32, 1209 opAndType{ONE, TUINT32}: ssa.OpNeq32, 1210 opAndType{ONE, TINT64}: ssa.OpNeq64, 1211 opAndType{ONE, TUINT64}: ssa.OpNeq64, 1212 opAndType{ONE, TINTER}: ssa.OpNeqInter, 1213 opAndType{ONE, TSLICE}: ssa.OpNeqSlice, 1214 opAndType{ONE, TFUNC}: ssa.OpNeqPtr, 1215 opAndType{ONE, TMAP}: ssa.OpNeqPtr, 1216 opAndType{ONE, TCHAN}: ssa.OpNeqPtr, 1217 opAndType{ONE, TPTR32}: ssa.OpNeqPtr, 1218 opAndType{ONE, TPTR64}: ssa.OpNeqPtr, 1219 opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr, 1220 opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr, 1221 opAndType{ONE, TFLOAT64}: ssa.OpNeq64F, 1222 opAndType{ONE, TFLOAT32}: ssa.OpNeq32F, 1223 1224 opAndType{OLT, TINT8}: ssa.OpLess8, 1225 opAndType{OLT, TUINT8}: ssa.OpLess8U, 1226 opAndType{OLT, TINT16}: ssa.OpLess16, 1227 opAndType{OLT, TUINT16}: ssa.OpLess16U, 1228 opAndType{OLT, TINT32}: ssa.OpLess32, 1229 opAndType{OLT, TUINT32}: ssa.OpLess32U, 1230 opAndType{OLT, TINT64}: ssa.OpLess64, 1231 opAndType{OLT, TUINT64}: ssa.OpLess64U, 1232 opAndType{OLT, TFLOAT64}: ssa.OpLess64F, 1233 opAndType{OLT, TFLOAT32}: ssa.OpLess32F, 1234 1235 opAndType{OGT, TINT8}: ssa.OpGreater8, 1236 opAndType{OGT, TUINT8}: ssa.OpGreater8U, 1237 opAndType{OGT, TINT16}: ssa.OpGreater16, 1238 opAndType{OGT, TUINT16}: ssa.OpGreater16U, 1239 opAndType{OGT, TINT32}: ssa.OpGreater32, 1240 opAndType{OGT, TUINT32}: ssa.OpGreater32U, 1241 opAndType{OGT, TINT64}: ssa.OpGreater64, 1242 opAndType{OGT, TUINT64}: ssa.OpGreater64U, 1243 opAndType{OGT, TFLOAT64}: ssa.OpGreater64F, 1244 opAndType{OGT, TFLOAT32}: ssa.OpGreater32F, 1245 1246 opAndType{OLE, TINT8}: ssa.OpLeq8, 1247 opAndType{OLE, TUINT8}: ssa.OpLeq8U, 1248 opAndType{OLE, TINT16}: ssa.OpLeq16, 1249 opAndType{OLE, TUINT16}: ssa.OpLeq16U, 1250 opAndType{OLE, TINT32}: ssa.OpLeq32, 1251 opAndType{OLE, TUINT32}: ssa.OpLeq32U, 1252 opAndType{OLE, TINT64}: ssa.OpLeq64, 1253 opAndType{OLE, TUINT64}: ssa.OpLeq64U, 1254 opAndType{OLE, TFLOAT64}: ssa.OpLeq64F, 1255 opAndType{OLE, TFLOAT32}: ssa.OpLeq32F, 1256 1257 opAndType{OGE, TINT8}: ssa.OpGeq8, 1258 opAndType{OGE, TUINT8}: ssa.OpGeq8U, 1259 opAndType{OGE, TINT16}: ssa.OpGeq16, 1260 opAndType{OGE, TUINT16}: ssa.OpGeq16U, 1261 opAndType{OGE, TINT32}: ssa.OpGeq32, 1262 opAndType{OGE, TUINT32}: ssa.OpGeq32U, 1263 opAndType{OGE, TINT64}: ssa.OpGeq64, 1264 opAndType{OGE, TUINT64}: ssa.OpGeq64U, 1265 opAndType{OGE, TFLOAT64}: ssa.OpGeq64F, 1266 opAndType{OGE, TFLOAT32}: ssa.OpGeq32F, 1267 } 1268 1269 func (s *state) concreteEtype(t *types.Type) types.EType { 1270 e := t.Etype 1271 switch e { 1272 default: 1273 return e 1274 case TINT: 1275 if s.config.PtrSize == 8 { 1276 return TINT64 1277 } 1278 return TINT32 1279 case TUINT: 1280 if s.config.PtrSize == 8 { 1281 return TUINT64 1282 } 1283 return TUINT32 1284 case TUINTPTR: 1285 if s.config.PtrSize == 8 { 1286 return TUINT64 1287 } 1288 return TUINT32 1289 } 1290 } 1291 1292 func (s *state) ssaOp(op Op, t *types.Type) ssa.Op { 1293 etype := s.concreteEtype(t) 1294 x, ok := opToSSA[opAndType{op, etype}] 1295 if !ok { 1296 s.Fatalf("unhandled binary op %v %s", op, etype) 1297 } 1298 return x 1299 } 1300 1301 func floatForComplex(t *types.Type) *types.Type { 1302 if t.Size() == 8 { 1303 return types.Types[TFLOAT32] 1304 } else { 1305 return types.Types[TFLOAT64] 1306 } 1307 } 1308 1309 type opAndTwoTypes struct { 1310 op Op 1311 etype1 types.EType 1312 etype2 types.EType 1313 } 1314 1315 type twoTypes struct { 1316 etype1 types.EType 1317 etype2 types.EType 1318 } 1319 1320 type twoOpsAndType struct { 1321 op1 ssa.Op 1322 op2 ssa.Op 1323 intermediateType types.EType 1324 } 1325 1326 var fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1327 1328 twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32}, 1329 twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32}, 1330 twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32}, 1331 twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64}, 1332 1333 twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32}, 1334 twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32}, 1335 twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32}, 1336 twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64}, 1337 1338 twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1339 twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1340 twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32}, 1341 twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64}, 1342 1343 twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1344 twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1345 twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32}, 1346 twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64}, 1347 // unsigned 1348 twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32}, 1349 twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32}, 1350 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned 1351 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead 1352 1353 twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32}, 1354 twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32}, 1355 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned 1356 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead 1357 1358 twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1359 twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1360 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1361 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead 1362 1363 twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1364 twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1365 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1366 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead 1367 1368 // float 1369 twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32}, 1370 twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, TFLOAT64}, 1371 twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, TFLOAT32}, 1372 twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64}, 1373 } 1374 1375 // this map is used only for 32-bit arch, and only includes the difference 1376 // on 32-bit arch, don't use int64<->float conversion for uint32 1377 var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{ 1378 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32}, 1379 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32}, 1380 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32}, 1381 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32}, 1382 } 1383 1384 // uint64<->float conversions, only on machines that have intructions for that 1385 var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1386 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64}, 1387 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64}, 1388 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64}, 1389 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64}, 1390 } 1391 1392 var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{ 1393 opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8, 1394 opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8, 1395 opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16, 1396 opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16, 1397 opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32, 1398 opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32, 1399 opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64, 1400 opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64, 1401 1402 opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8, 1403 opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8, 1404 opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16, 1405 opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16, 1406 opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32, 1407 opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32, 1408 opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64, 1409 opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64, 1410 1411 opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8, 1412 opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8, 1413 opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16, 1414 opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16, 1415 opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32, 1416 opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32, 1417 opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64, 1418 opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64, 1419 1420 opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8, 1421 opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8, 1422 opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16, 1423 opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16, 1424 opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32, 1425 opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32, 1426 opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64, 1427 opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64, 1428 1429 opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8, 1430 opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8, 1431 opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16, 1432 opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16, 1433 opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32, 1434 opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32, 1435 opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64, 1436 opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64, 1437 1438 opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8, 1439 opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8, 1440 opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16, 1441 opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16, 1442 opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32, 1443 opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32, 1444 opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64, 1445 opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64, 1446 1447 opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8, 1448 opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8, 1449 opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16, 1450 opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16, 1451 opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32, 1452 opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32, 1453 opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64, 1454 opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64, 1455 1456 opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8, 1457 opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8, 1458 opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16, 1459 opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16, 1460 opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32, 1461 opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32, 1462 opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64, 1463 opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64, 1464 } 1465 1466 func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op { 1467 etype1 := s.concreteEtype(t) 1468 etype2 := s.concreteEtype(u) 1469 x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}] 1470 if !ok { 1471 s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2) 1472 } 1473 return x 1474 } 1475 1476 // expr converts the expression n to ssa, adds it to s and returns the ssa result. 1477 func (s *state) expr(n *Node) *ssa.Value { 1478 if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) { 1479 // ONAMEs and named OLITERALs have the line number 1480 // of the decl, not the use. See issue 14742. 1481 s.pushLine(n.Pos) 1482 defer s.popLine() 1483 } 1484 1485 s.stmtList(n.Ninit) 1486 switch n.Op { 1487 case OARRAYBYTESTRTMP: 1488 slice := s.expr(n.Left) 1489 ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice) 1490 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) 1491 return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) 1492 case OSTRARRAYBYTETMP: 1493 str := s.expr(n.Left) 1494 ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str) 1495 len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str) 1496 return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len) 1497 case OCFUNC: 1498 aux := n.Left.Sym.Linksym() 1499 return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb) 1500 case ONAME: 1501 if n.Class() == PFUNC { 1502 // "value" of a function is the address of the function's closure 1503 sym := funcsym(n.Sym).Linksym() 1504 return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb) 1505 } 1506 if s.canSSA(n) { 1507 return s.variable(n, n.Type) 1508 } 1509 addr := s.addr(n, false) 1510 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1511 case OCLOSUREVAR: 1512 addr := s.addr(n, false) 1513 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1514 case OLITERAL: 1515 switch u := n.Val().U.(type) { 1516 case *Mpint: 1517 i := u.Int64() 1518 switch n.Type.Size() { 1519 case 1: 1520 return s.constInt8(n.Type, int8(i)) 1521 case 2: 1522 return s.constInt16(n.Type, int16(i)) 1523 case 4: 1524 return s.constInt32(n.Type, int32(i)) 1525 case 8: 1526 return s.constInt64(n.Type, i) 1527 default: 1528 s.Fatalf("bad integer size %d", n.Type.Size()) 1529 return nil 1530 } 1531 case string: 1532 if u == "" { 1533 return s.constEmptyString(n.Type) 1534 } 1535 return s.entryNewValue0A(ssa.OpConstString, n.Type, u) 1536 case bool: 1537 return s.constBool(u) 1538 case *NilVal: 1539 t := n.Type 1540 switch { 1541 case t.IsSlice(): 1542 return s.constSlice(t) 1543 case t.IsInterface(): 1544 return s.constInterface(t) 1545 default: 1546 return s.constNil(t) 1547 } 1548 case *Mpflt: 1549 switch n.Type.Size() { 1550 case 4: 1551 return s.constFloat32(n.Type, u.Float32()) 1552 case 8: 1553 return s.constFloat64(n.Type, u.Float64()) 1554 default: 1555 s.Fatalf("bad float size %d", n.Type.Size()) 1556 return nil 1557 } 1558 case *Mpcplx: 1559 r := &u.Real 1560 i := &u.Imag 1561 switch n.Type.Size() { 1562 case 8: 1563 pt := types.Types[TFLOAT32] 1564 return s.newValue2(ssa.OpComplexMake, n.Type, 1565 s.constFloat32(pt, r.Float32()), 1566 s.constFloat32(pt, i.Float32())) 1567 case 16: 1568 pt := types.Types[TFLOAT64] 1569 return s.newValue2(ssa.OpComplexMake, n.Type, 1570 s.constFloat64(pt, r.Float64()), 1571 s.constFloat64(pt, i.Float64())) 1572 default: 1573 s.Fatalf("bad float size %d", n.Type.Size()) 1574 return nil 1575 } 1576 1577 default: 1578 s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype()) 1579 return nil 1580 } 1581 case OCONVNOP: 1582 to := n.Type 1583 from := n.Left.Type 1584 1585 // Assume everything will work out, so set up our return value. 1586 // Anything interesting that happens from here is a fatal. 1587 x := s.expr(n.Left) 1588 1589 // Special case for not confusing GC and liveness. 1590 // We don't want pointers accidentally classified 1591 // as not-pointers or vice-versa because of copy 1592 // elision. 1593 if to.IsPtrShaped() != from.IsPtrShaped() { 1594 return s.newValue2(ssa.OpConvert, to, x, s.mem()) 1595 } 1596 1597 v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type 1598 1599 // CONVNOP closure 1600 if to.Etype == TFUNC && from.IsPtrShaped() { 1601 return v 1602 } 1603 1604 // named <--> unnamed type or typed <--> untyped const 1605 if from.Etype == to.Etype { 1606 return v 1607 } 1608 1609 // unsafe.Pointer <--> *T 1610 if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() { 1611 return v 1612 } 1613 1614 // map <--> *hmap 1615 if to.Etype == TMAP && from.IsPtr() && 1616 to.MapType().Hmap == from.Elem() { 1617 return v 1618 } 1619 1620 dowidth(from) 1621 dowidth(to) 1622 if from.Width != to.Width { 1623 s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width) 1624 return nil 1625 } 1626 if etypesign(from.Etype) != etypesign(to.Etype) { 1627 s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype) 1628 return nil 1629 } 1630 1631 if instrumenting { 1632 // These appear to be fine, but they fail the 1633 // integer constraint below, so okay them here. 1634 // Sample non-integer conversion: map[string]string -> *uint8 1635 return v 1636 } 1637 1638 if etypesign(from.Etype) == 0 { 1639 s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to) 1640 return nil 1641 } 1642 1643 // integer, same width, same sign 1644 return v 1645 1646 case OCONV: 1647 x := s.expr(n.Left) 1648 ft := n.Left.Type // from type 1649 tt := n.Type // to type 1650 if ft.IsBoolean() && tt.IsKind(TUINT8) { 1651 // Bool -> uint8 is generated internally when indexing into runtime.staticbyte. 1652 return s.newValue1(ssa.OpCopy, n.Type, x) 1653 } 1654 if ft.IsInteger() && tt.IsInteger() { 1655 var op ssa.Op 1656 if tt.Size() == ft.Size() { 1657 op = ssa.OpCopy 1658 } else if tt.Size() < ft.Size() { 1659 // truncation 1660 switch 10*ft.Size() + tt.Size() { 1661 case 21: 1662 op = ssa.OpTrunc16to8 1663 case 41: 1664 op = ssa.OpTrunc32to8 1665 case 42: 1666 op = ssa.OpTrunc32to16 1667 case 81: 1668 op = ssa.OpTrunc64to8 1669 case 82: 1670 op = ssa.OpTrunc64to16 1671 case 84: 1672 op = ssa.OpTrunc64to32 1673 default: 1674 s.Fatalf("weird integer truncation %v -> %v", ft, tt) 1675 } 1676 } else if ft.IsSigned() { 1677 // sign extension 1678 switch 10*ft.Size() + tt.Size() { 1679 case 12: 1680 op = ssa.OpSignExt8to16 1681 case 14: 1682 op = ssa.OpSignExt8to32 1683 case 18: 1684 op = ssa.OpSignExt8to64 1685 case 24: 1686 op = ssa.OpSignExt16to32 1687 case 28: 1688 op = ssa.OpSignExt16to64 1689 case 48: 1690 op = ssa.OpSignExt32to64 1691 default: 1692 s.Fatalf("bad integer sign extension %v -> %v", ft, tt) 1693 } 1694 } else { 1695 // zero extension 1696 switch 10*ft.Size() + tt.Size() { 1697 case 12: 1698 op = ssa.OpZeroExt8to16 1699 case 14: 1700 op = ssa.OpZeroExt8to32 1701 case 18: 1702 op = ssa.OpZeroExt8to64 1703 case 24: 1704 op = ssa.OpZeroExt16to32 1705 case 28: 1706 op = ssa.OpZeroExt16to64 1707 case 48: 1708 op = ssa.OpZeroExt32to64 1709 default: 1710 s.Fatalf("weird integer sign extension %v -> %v", ft, tt) 1711 } 1712 } 1713 return s.newValue1(op, n.Type, x) 1714 } 1715 1716 if ft.IsFloat() || tt.IsFloat() { 1717 conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}] 1718 if s.config.RegSize == 4 && thearch.LinkArch.Family != sys.MIPS && !s.softFloat { 1719 if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { 1720 conv = conv1 1721 } 1722 } 1723 if thearch.LinkArch.Family == sys.ARM64 || s.softFloat { 1724 if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { 1725 conv = conv1 1726 } 1727 } 1728 1729 if thearch.LinkArch.Family == sys.MIPS && !s.softFloat { 1730 if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() { 1731 // tt is float32 or float64, and ft is also unsigned 1732 if tt.Size() == 4 { 1733 return s.uint32Tofloat32(n, x, ft, tt) 1734 } 1735 if tt.Size() == 8 { 1736 return s.uint32Tofloat64(n, x, ft, tt) 1737 } 1738 } else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() { 1739 // ft is float32 or float64, and tt is unsigned integer 1740 if ft.Size() == 4 { 1741 return s.float32ToUint32(n, x, ft, tt) 1742 } 1743 if ft.Size() == 8 { 1744 return s.float64ToUint32(n, x, ft, tt) 1745 } 1746 } 1747 } 1748 1749 if !ok { 1750 s.Fatalf("weird float conversion %v -> %v", ft, tt) 1751 } 1752 op1, op2, it := conv.op1, conv.op2, conv.intermediateType 1753 1754 if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid { 1755 // normal case, not tripping over unsigned 64 1756 if op1 == ssa.OpCopy { 1757 if op2 == ssa.OpCopy { 1758 return x 1759 } 1760 return s.newValueOrSfCall1(op2, n.Type, x) 1761 } 1762 if op2 == ssa.OpCopy { 1763 return s.newValueOrSfCall1(op1, n.Type, x) 1764 } 1765 return s.newValueOrSfCall1(op2, n.Type, s.newValueOrSfCall1(op1, types.Types[it], x)) 1766 } 1767 // Tricky 64-bit unsigned cases. 1768 if ft.IsInteger() { 1769 // tt is float32 or float64, and ft is also unsigned 1770 if tt.Size() == 4 { 1771 return s.uint64Tofloat32(n, x, ft, tt) 1772 } 1773 if tt.Size() == 8 { 1774 return s.uint64Tofloat64(n, x, ft, tt) 1775 } 1776 s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt) 1777 } 1778 // ft is float32 or float64, and tt is unsigned integer 1779 if ft.Size() == 4 { 1780 return s.float32ToUint64(n, x, ft, tt) 1781 } 1782 if ft.Size() == 8 { 1783 return s.float64ToUint64(n, x, ft, tt) 1784 } 1785 s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt) 1786 return nil 1787 } 1788 1789 if ft.IsComplex() && tt.IsComplex() { 1790 var op ssa.Op 1791 if ft.Size() == tt.Size() { 1792 switch ft.Size() { 1793 case 8: 1794 op = ssa.OpRound32F 1795 case 16: 1796 op = ssa.OpRound64F 1797 default: 1798 s.Fatalf("weird complex conversion %v -> %v", ft, tt) 1799 } 1800 } else if ft.Size() == 8 && tt.Size() == 16 { 1801 op = ssa.OpCvt32Fto64F 1802 } else if ft.Size() == 16 && tt.Size() == 8 { 1803 op = ssa.OpCvt64Fto32F 1804 } else { 1805 s.Fatalf("weird complex conversion %v -> %v", ft, tt) 1806 } 1807 ftp := floatForComplex(ft) 1808 ttp := floatForComplex(tt) 1809 return s.newValue2(ssa.OpComplexMake, tt, 1810 s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)), 1811 s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x))) 1812 } 1813 1814 s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype) 1815 return nil 1816 1817 case ODOTTYPE: 1818 res, _ := s.dottype(n, false) 1819 return res 1820 1821 // binary ops 1822 case OLT, OEQ, ONE, OLE, OGE, OGT: 1823 a := s.expr(n.Left) 1824 b := s.expr(n.Right) 1825 if n.Left.Type.IsComplex() { 1826 pt := floatForComplex(n.Left.Type) 1827 op := s.ssaOp(OEQ, pt) 1828 r := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)) 1829 i := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)) 1830 c := s.newValue2(ssa.OpAndB, types.Types[TBOOL], r, i) 1831 switch n.Op { 1832 case OEQ: 1833 return c 1834 case ONE: 1835 return s.newValue1(ssa.OpNot, types.Types[TBOOL], c) 1836 default: 1837 s.Fatalf("ordered complex compare %v", n.Op) 1838 } 1839 } 1840 if n.Left.Type.IsFloat() { 1841 return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b) 1842 } 1843 return s.newValue2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b) 1844 case OMUL: 1845 a := s.expr(n.Left) 1846 b := s.expr(n.Right) 1847 if n.Type.IsComplex() { 1848 mulop := ssa.OpMul64F 1849 addop := ssa.OpAdd64F 1850 subop := ssa.OpSub64F 1851 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1852 wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 1853 1854 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1855 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1856 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1857 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1858 1859 if pt != wt { // Widen for calculation 1860 areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal) 1861 breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal) 1862 aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag) 1863 bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag) 1864 } 1865 1866 xreal := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag)) 1867 ximag := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, bimag), s.newValueOrSfCall2(mulop, wt, aimag, breal)) 1868 1869 if pt != wt { // Narrow to store back 1870 xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal) 1871 ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag) 1872 } 1873 1874 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1875 } 1876 1877 if n.Type.IsFloat() { 1878 return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1879 } 1880 1881 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1882 1883 case ODIV: 1884 a := s.expr(n.Left) 1885 b := s.expr(n.Right) 1886 if n.Type.IsComplex() { 1887 // TODO this is not executed because the front-end substitutes a runtime call. 1888 // That probably ought to change; with modest optimization the widen/narrow 1889 // conversions could all be elided in larger expression trees. 1890 mulop := ssa.OpMul64F 1891 addop := ssa.OpAdd64F 1892 subop := ssa.OpSub64F 1893 divop := ssa.OpDiv64F 1894 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1895 wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 1896 1897 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1898 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1899 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1900 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1901 1902 if pt != wt { // Widen for calculation 1903 areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal) 1904 breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal) 1905 aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag) 1906 bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag) 1907 } 1908 1909 denom := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, breal, breal), s.newValueOrSfCall2(mulop, wt, bimag, bimag)) 1910 xreal := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag)) 1911 ximag := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, aimag, breal), s.newValueOrSfCall2(mulop, wt, areal, bimag)) 1912 1913 // TODO not sure if this is best done in wide precision or narrow 1914 // Double-rounding might be an issue. 1915 // Note that the pre-SSA implementation does the entire calculation 1916 // in wide format, so wide is compatible. 1917 xreal = s.newValueOrSfCall2(divop, wt, xreal, denom) 1918 ximag = s.newValueOrSfCall2(divop, wt, ximag, denom) 1919 1920 if pt != wt { // Narrow to store back 1921 xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal) 1922 ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag) 1923 } 1924 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1925 } 1926 if n.Type.IsFloat() { 1927 return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1928 } 1929 return s.intDivide(n, a, b) 1930 case OMOD: 1931 a := s.expr(n.Left) 1932 b := s.expr(n.Right) 1933 return s.intDivide(n, a, b) 1934 case OADD, OSUB: 1935 a := s.expr(n.Left) 1936 b := s.expr(n.Right) 1937 if n.Type.IsComplex() { 1938 pt := floatForComplex(n.Type) 1939 op := s.ssaOp(n.Op, pt) 1940 return s.newValue2(ssa.OpComplexMake, n.Type, 1941 s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)), 1942 s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))) 1943 } 1944 if n.Type.IsFloat() { 1945 return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1946 } 1947 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1948 case OAND, OOR, OXOR: 1949 a := s.expr(n.Left) 1950 b := s.expr(n.Right) 1951 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1952 case OLSH, ORSH: 1953 a := s.expr(n.Left) 1954 b := s.expr(n.Right) 1955 return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b) 1956 case OANDAND, OOROR: 1957 // To implement OANDAND (and OOROR), we introduce a 1958 // new temporary variable to hold the result. The 1959 // variable is associated with the OANDAND node in the 1960 // s.vars table (normally variables are only 1961 // associated with ONAME nodes). We convert 1962 // A && B 1963 // to 1964 // var = A 1965 // if var { 1966 // var = B 1967 // } 1968 // Using var in the subsequent block introduces the 1969 // necessary phi variable. 1970 el := s.expr(n.Left) 1971 s.vars[n] = el 1972 1973 b := s.endBlock() 1974 b.Kind = ssa.BlockIf 1975 b.SetControl(el) 1976 // In theory, we should set b.Likely here based on context. 1977 // However, gc only gives us likeliness hints 1978 // in a single place, for plain OIF statements, 1979 // and passing around context is finnicky, so don't bother for now. 1980 1981 bRight := s.f.NewBlock(ssa.BlockPlain) 1982 bResult := s.f.NewBlock(ssa.BlockPlain) 1983 if n.Op == OANDAND { 1984 b.AddEdgeTo(bRight) 1985 b.AddEdgeTo(bResult) 1986 } else if n.Op == OOROR { 1987 b.AddEdgeTo(bResult) 1988 b.AddEdgeTo(bRight) 1989 } 1990 1991 s.startBlock(bRight) 1992 er := s.expr(n.Right) 1993 s.vars[n] = er 1994 1995 b = s.endBlock() 1996 b.AddEdgeTo(bResult) 1997 1998 s.startBlock(bResult) 1999 return s.variable(n, types.Types[TBOOL]) 2000 case OCOMPLEX: 2001 r := s.expr(n.Left) 2002 i := s.expr(n.Right) 2003 return s.newValue2(ssa.OpComplexMake, n.Type, r, i) 2004 2005 // unary ops 2006 case OMINUS: 2007 a := s.expr(n.Left) 2008 if n.Type.IsComplex() { 2009 tp := floatForComplex(n.Type) 2010 negop := s.ssaOp(n.Op, tp) 2011 return s.newValue2(ssa.OpComplexMake, n.Type, 2012 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)), 2013 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a))) 2014 } 2015 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 2016 case ONOT, OCOM: 2017 a := s.expr(n.Left) 2018 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 2019 case OIMAG, OREAL: 2020 a := s.expr(n.Left) 2021 return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a) 2022 case OPLUS: 2023 return s.expr(n.Left) 2024 2025 case OADDR: 2026 return s.addr(n.Left, n.Bounded()) 2027 2028 case OINDREGSP: 2029 addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset) 2030 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 2031 2032 case OIND: 2033 p := s.exprPtr(n.Left, false, n.Pos) 2034 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 2035 2036 case ODOT: 2037 t := n.Left.Type 2038 if canSSAType(t) { 2039 v := s.expr(n.Left) 2040 return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v) 2041 } 2042 if n.Left.Op == OSTRUCTLIT { 2043 // All literals with nonzero fields have already been 2044 // rewritten during walk. Any that remain are just T{} 2045 // or equivalents. Use the zero value. 2046 if !iszero(n.Left) { 2047 Fatalf("literal with nonzero value in SSA: %v", n.Left) 2048 } 2049 return s.zeroVal(n.Type) 2050 } 2051 p := s.addr(n, false) 2052 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 2053 2054 case ODOTPTR: 2055 p := s.exprPtr(n.Left, false, n.Pos) 2056 p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type), n.Xoffset, p) 2057 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 2058 2059 case OINDEX: 2060 switch { 2061 case n.Left.Type.IsString(): 2062 if n.Bounded() && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) { 2063 // Replace "abc"[1] with 'b'. 2064 // Delayed until now because "abc"[1] is not an ideal constant. 2065 // See test/fixedbugs/issue11370.go. 2066 return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(n.Left.Val().U.(string)[n.Right.Int64()]))) 2067 } 2068 a := s.expr(n.Left) 2069 i := s.expr(n.Right) 2070 i = s.extendIndex(i, panicindex) 2071 if !n.Bounded() { 2072 len := s.newValue1(ssa.OpStringLen, types.Types[TINT], a) 2073 s.boundsCheck(i, len) 2074 } 2075 ptrtyp := s.f.Config.Types.BytePtr 2076 ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a) 2077 if Isconst(n.Right, CTINT) { 2078 ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr) 2079 } else { 2080 ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i) 2081 } 2082 return s.newValue2(ssa.OpLoad, types.Types[TUINT8], ptr, s.mem()) 2083 case n.Left.Type.IsSlice(): 2084 p := s.addr(n, false) 2085 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 2086 case n.Left.Type.IsArray(): 2087 if bound := n.Left.Type.NumElem(); bound <= 1 { 2088 // SSA can handle arrays of length at most 1. 2089 a := s.expr(n.Left) 2090 i := s.expr(n.Right) 2091 if bound == 0 { 2092 // Bounds check will never succeed. Might as well 2093 // use constants for the bounds check. 2094 z := s.constInt(types.Types[TINT], 0) 2095 s.boundsCheck(z, z) 2096 // The return value won't be live, return junk. 2097 return s.newValue0(ssa.OpUnknown, n.Type) 2098 } 2099 i = s.extendIndex(i, panicindex) 2100 if !n.Bounded() { 2101 s.boundsCheck(i, s.constInt(types.Types[TINT], bound)) 2102 } 2103 return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a) 2104 } 2105 p := s.addr(n, false) 2106 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 2107 default: 2108 s.Fatalf("bad type for index %v", n.Left.Type) 2109 return nil 2110 } 2111 2112 case OLEN, OCAP: 2113 switch { 2114 case n.Left.Type.IsSlice(): 2115 op := ssa.OpSliceLen 2116 if n.Op == OCAP { 2117 op = ssa.OpSliceCap 2118 } 2119 return s.newValue1(op, types.Types[TINT], s.expr(n.Left)) 2120 case n.Left.Type.IsString(): // string; not reachable for OCAP 2121 return s.newValue1(ssa.OpStringLen, types.Types[TINT], s.expr(n.Left)) 2122 case n.Left.Type.IsMap(), n.Left.Type.IsChan(): 2123 return s.referenceTypeBuiltin(n, s.expr(n.Left)) 2124 default: // array 2125 return s.constInt(types.Types[TINT], n.Left.Type.NumElem()) 2126 } 2127 2128 case OSPTR: 2129 a := s.expr(n.Left) 2130 if n.Left.Type.IsSlice() { 2131 return s.newValue1(ssa.OpSlicePtr, n.Type, a) 2132 } else { 2133 return s.newValue1(ssa.OpStringPtr, n.Type, a) 2134 } 2135 2136 case OITAB: 2137 a := s.expr(n.Left) 2138 return s.newValue1(ssa.OpITab, n.Type, a) 2139 2140 case OIDATA: 2141 a := s.expr(n.Left) 2142 return s.newValue1(ssa.OpIData, n.Type, a) 2143 2144 case OEFACE: 2145 tab := s.expr(n.Left) 2146 data := s.expr(n.Right) 2147 return s.newValue2(ssa.OpIMake, n.Type, tab, data) 2148 2149 case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR: 2150 v := s.expr(n.Left) 2151 var i, j, k *ssa.Value 2152 low, high, max := n.SliceBounds() 2153 if low != nil { 2154 i = s.extendIndex(s.expr(low), panicslice) 2155 } 2156 if high != nil { 2157 j = s.extendIndex(s.expr(high), panicslice) 2158 } 2159 if max != nil { 2160 k = s.extendIndex(s.expr(max), panicslice) 2161 } 2162 p, l, c := s.slice(n.Left.Type, v, i, j, k) 2163 return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c) 2164 2165 case OSLICESTR: 2166 v := s.expr(n.Left) 2167 var i, j *ssa.Value 2168 low, high, _ := n.SliceBounds() 2169 if low != nil { 2170 i = s.extendIndex(s.expr(low), panicslice) 2171 } 2172 if high != nil { 2173 j = s.extendIndex(s.expr(high), panicslice) 2174 } 2175 p, l, _ := s.slice(n.Left.Type, v, i, j, nil) 2176 return s.newValue2(ssa.OpStringMake, n.Type, p, l) 2177 2178 case OCALLFUNC: 2179 if isIntrinsicCall(n) { 2180 return s.intrinsicCall(n) 2181 } 2182 fallthrough 2183 2184 case OCALLINTER, OCALLMETH: 2185 a := s.call(n, callNormal) 2186 return s.newValue2(ssa.OpLoad, n.Type, a, s.mem()) 2187 2188 case OGETG: 2189 return s.newValue1(ssa.OpGetG, n.Type, s.mem()) 2190 2191 case OAPPEND: 2192 return s.append(n, false) 2193 2194 case OSTRUCTLIT, OARRAYLIT: 2195 // All literals with nonzero fields have already been 2196 // rewritten during walk. Any that remain are just T{} 2197 // or equivalents. Use the zero value. 2198 if !iszero(n) { 2199 Fatalf("literal with nonzero value in SSA: %v", n) 2200 } 2201 return s.zeroVal(n.Type) 2202 2203 default: 2204 s.Fatalf("unhandled expr %v", n.Op) 2205 return nil 2206 } 2207 } 2208 2209 // append converts an OAPPEND node to SSA. 2210 // If inplace is false, it converts the OAPPEND expression n to an ssa.Value, 2211 // adds it to s, and returns the Value. 2212 // If inplace is true, it writes the result of the OAPPEND expression n 2213 // back to the slice being appended to, and returns nil. 2214 // inplace MUST be set to false if the slice can be SSA'd. 2215 func (s *state) append(n *Node, inplace bool) *ssa.Value { 2216 // If inplace is false, process as expression "append(s, e1, e2, e3)": 2217 // 2218 // ptr, len, cap := s 2219 // newlen := len + 3 2220 // if newlen > cap { 2221 // ptr, len, cap = growslice(s, newlen) 2222 // newlen = len + 3 // recalculate to avoid a spill 2223 // } 2224 // // with write barriers, if needed: 2225 // *(ptr+len) = e1 2226 // *(ptr+len+1) = e2 2227 // *(ptr+len+2) = e3 2228 // return makeslice(ptr, newlen, cap) 2229 // 2230 // 2231 // If inplace is true, process as statement "s = append(s, e1, e2, e3)": 2232 // 2233 // a := &s 2234 // ptr, len, cap := s 2235 // newlen := len + 3 2236 // if newlen > cap { 2237 // newptr, len, newcap = growslice(ptr, len, cap, newlen) 2238 // vardef(a) // if necessary, advise liveness we are writing a new a 2239 // *a.cap = newcap // write before ptr to avoid a spill 2240 // *a.ptr = newptr // with write barrier 2241 // } 2242 // newlen = len + 3 // recalculate to avoid a spill 2243 // *a.len = newlen 2244 // // with write barriers, if needed: 2245 // *(ptr+len) = e1 2246 // *(ptr+len+1) = e2 2247 // *(ptr+len+2) = e3 2248 2249 et := n.Type.Elem() 2250 pt := types.NewPtr(et) 2251 2252 // Evaluate slice 2253 sn := n.List.First() // the slice node is the first in the list 2254 2255 var slice, addr *ssa.Value 2256 if inplace { 2257 addr = s.addr(sn, false) 2258 slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 2259 } else { 2260 slice = s.expr(sn) 2261 } 2262 2263 // Allocate new blocks 2264 grow := s.f.NewBlock(ssa.BlockPlain) 2265 assign := s.f.NewBlock(ssa.BlockPlain) 2266 2267 // Decide if we need to grow 2268 nargs := int64(n.List.Len() - 1) 2269 p := s.newValue1(ssa.OpSlicePtr, pt, slice) 2270 l := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) 2271 c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice) 2272 nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs)) 2273 2274 cmp := s.newValue2(s.ssaOp(OGT, types.Types[TINT]), types.Types[TBOOL], nl, c) 2275 s.vars[&ptrVar] = p 2276 2277 if !inplace { 2278 s.vars[&newlenVar] = nl 2279 s.vars[&capVar] = c 2280 } else { 2281 s.vars[&lenVar] = l 2282 } 2283 2284 b := s.endBlock() 2285 b.Kind = ssa.BlockIf 2286 b.Likely = ssa.BranchUnlikely 2287 b.SetControl(cmp) 2288 b.AddEdgeTo(grow) 2289 b.AddEdgeTo(assign) 2290 2291 // Call growslice 2292 s.startBlock(grow) 2293 taddr := s.expr(n.Left) 2294 r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[TINT], types.Types[TINT]}, taddr, p, l, c, nl) 2295 2296 if inplace { 2297 if sn.Op == ONAME && sn.Class() != PEXTERN { 2298 // Tell liveness we're about to build a new slice 2299 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem()) 2300 } 2301 capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_cap), addr) 2302 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], capaddr, r[2], s.mem()) 2303 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, pt, addr, r[0], s.mem()) 2304 // load the value we just stored to avoid having to spill it 2305 s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem()) 2306 s.vars[&lenVar] = r[1] // avoid a spill in the fast path 2307 } else { 2308 s.vars[&ptrVar] = r[0] 2309 s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs)) 2310 s.vars[&capVar] = r[2] 2311 } 2312 2313 b = s.endBlock() 2314 b.AddEdgeTo(assign) 2315 2316 // assign new elements to slots 2317 s.startBlock(assign) 2318 2319 if inplace { 2320 l = s.variable(&lenVar, types.Types[TINT]) // generates phi for len 2321 nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs)) 2322 lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_nel), addr) 2323 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenaddr, nl, s.mem()) 2324 } 2325 2326 // Evaluate args 2327 type argRec struct { 2328 // if store is true, we're appending the value v. If false, we're appending the 2329 // value at *v. 2330 v *ssa.Value 2331 store bool 2332 } 2333 args := make([]argRec, 0, nargs) 2334 for _, n := range n.List.Slice()[1:] { 2335 if canSSAType(n.Type) { 2336 args = append(args, argRec{v: s.expr(n), store: true}) 2337 } else { 2338 v := s.addr(n, false) 2339 args = append(args, argRec{v: v}) 2340 } 2341 } 2342 2343 p = s.variable(&ptrVar, pt) // generates phi for ptr 2344 if !inplace { 2345 nl = s.variable(&newlenVar, types.Types[TINT]) // generates phi for nl 2346 c = s.variable(&capVar, types.Types[TINT]) // generates phi for cap 2347 } 2348 p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l) 2349 for i, arg := range args { 2350 addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[TINT], int64(i))) 2351 if arg.store { 2352 s.storeType(et, addr, arg.v, 0) 2353 } else { 2354 store := s.newValue3I(ssa.OpMove, types.TypeMem, et.Size(), addr, arg.v, s.mem()) 2355 store.Aux = et 2356 s.vars[&memVar] = store 2357 } 2358 } 2359 2360 delete(s.vars, &ptrVar) 2361 if inplace { 2362 delete(s.vars, &lenVar) 2363 return nil 2364 } 2365 delete(s.vars, &newlenVar) 2366 delete(s.vars, &capVar) 2367 // make result 2368 return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c) 2369 } 2370 2371 // condBranch evaluates the boolean expression cond and branches to yes 2372 // if cond is true and no if cond is false. 2373 // This function is intended to handle && and || better than just calling 2374 // s.expr(cond) and branching on the result. 2375 func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) { 2376 switch cond.Op { 2377 case OANDAND: 2378 mid := s.f.NewBlock(ssa.BlockPlain) 2379 s.stmtList(cond.Ninit) 2380 s.condBranch(cond.Left, mid, no, max8(likely, 0)) 2381 s.startBlock(mid) 2382 s.condBranch(cond.Right, yes, no, likely) 2383 return 2384 // Note: if likely==1, then both recursive calls pass 1. 2385 // If likely==-1, then we don't have enough information to decide 2386 // whether the first branch is likely or not. So we pass 0 for 2387 // the likeliness of the first branch. 2388 // TODO: have the frontend give us branch prediction hints for 2389 // OANDAND and OOROR nodes (if it ever has such info). 2390 case OOROR: 2391 mid := s.f.NewBlock(ssa.BlockPlain) 2392 s.stmtList(cond.Ninit) 2393 s.condBranch(cond.Left, yes, mid, min8(likely, 0)) 2394 s.startBlock(mid) 2395 s.condBranch(cond.Right, yes, no, likely) 2396 return 2397 // Note: if likely==-1, then both recursive calls pass -1. 2398 // If likely==1, then we don't have enough info to decide 2399 // the likelihood of the first branch. 2400 case ONOT: 2401 s.stmtList(cond.Ninit) 2402 s.condBranch(cond.Left, no, yes, -likely) 2403 return 2404 } 2405 c := s.expr(cond) 2406 b := s.endBlock() 2407 b.Kind = ssa.BlockIf 2408 b.SetControl(c) 2409 b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness 2410 b.AddEdgeTo(yes) 2411 b.AddEdgeTo(no) 2412 } 2413 2414 type skipMask uint8 2415 2416 const ( 2417 skipPtr skipMask = 1 << iota 2418 skipLen 2419 skipCap 2420 ) 2421 2422 // assign does left = right. 2423 // Right has already been evaluated to ssa, left has not. 2424 // If deref is true, then we do left = *right instead (and right has already been nil-checked). 2425 // If deref is true and right == nil, just do left = 0. 2426 // skip indicates assignments (at the top level) that can be avoided. 2427 func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) { 2428 if left.Op == ONAME && isblank(left) { 2429 return 2430 } 2431 t := left.Type 2432 dowidth(t) 2433 if s.canSSA(left) { 2434 if deref { 2435 s.Fatalf("can SSA LHS %v but not RHS %s", left, right) 2436 } 2437 if left.Op == ODOT { 2438 // We're assigning to a field of an ssa-able value. 2439 // We need to build a new structure with the new value for the 2440 // field we're assigning and the old values for the other fields. 2441 // For instance: 2442 // type T struct {a, b, c int} 2443 // var T x 2444 // x.b = 5 2445 // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c} 2446 2447 // Grab information about the structure type. 2448 t := left.Left.Type 2449 nf := t.NumFields() 2450 idx := fieldIdx(left) 2451 2452 // Grab old value of structure. 2453 old := s.expr(left.Left) 2454 2455 // Make new structure. 2456 new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t) 2457 2458 // Add fields as args. 2459 for i := 0; i < nf; i++ { 2460 if i == idx { 2461 new.AddArg(right) 2462 } else { 2463 new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old)) 2464 } 2465 } 2466 2467 // Recursively assign the new value we've made to the base of the dot op. 2468 s.assign(left.Left, new, false, 0) 2469 // TODO: do we need to update named values here? 2470 return 2471 } 2472 if left.Op == OINDEX && left.Left.Type.IsArray() { 2473 // We're assigning to an element of an ssa-able array. 2474 // a[i] = v 2475 t := left.Left.Type 2476 n := t.NumElem() 2477 2478 i := s.expr(left.Right) // index 2479 if n == 0 { 2480 // The bounds check must fail. Might as well 2481 // ignore the actual index and just use zeros. 2482 z := s.constInt(types.Types[TINT], 0) 2483 s.boundsCheck(z, z) 2484 return 2485 } 2486 if n != 1 { 2487 s.Fatalf("assigning to non-1-length array") 2488 } 2489 // Rewrite to a = [1]{v} 2490 i = s.extendIndex(i, panicindex) 2491 s.boundsCheck(i, s.constInt(types.Types[TINT], 1)) 2492 v := s.newValue1(ssa.OpArrayMake1, t, right) 2493 s.assign(left.Left, v, false, 0) 2494 return 2495 } 2496 // Update variable assignment. 2497 s.vars[left] = right 2498 s.addNamedValue(left, right) 2499 return 2500 } 2501 // Left is not ssa-able. Compute its address. 2502 addr := s.addr(left, false) 2503 if left.Op == ONAME && left.Class() != PEXTERN && skip == 0 { 2504 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, left, s.mem()) 2505 } 2506 if isReflectHeaderDataField(left) { 2507 // Package unsafe's documentation says storing pointers into 2508 // reflect.SliceHeader and reflect.StringHeader's Data fields 2509 // is valid, even though they have type uintptr (#19168). 2510 // Mark it pointer type to signal the writebarrier pass to 2511 // insert a write barrier. 2512 t = types.Types[TUNSAFEPTR] 2513 } 2514 if deref { 2515 // Treat as a mem->mem move. 2516 var store *ssa.Value 2517 if right == nil { 2518 store = s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), addr, s.mem()) 2519 } else { 2520 store = s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), addr, right, s.mem()) 2521 } 2522 store.Aux = t 2523 s.vars[&memVar] = store 2524 return 2525 } 2526 // Treat as a store. 2527 s.storeType(t, addr, right, skip) 2528 } 2529 2530 // zeroVal returns the zero value for type t. 2531 func (s *state) zeroVal(t *types.Type) *ssa.Value { 2532 switch { 2533 case t.IsInteger(): 2534 switch t.Size() { 2535 case 1: 2536 return s.constInt8(t, 0) 2537 case 2: 2538 return s.constInt16(t, 0) 2539 case 4: 2540 return s.constInt32(t, 0) 2541 case 8: 2542 return s.constInt64(t, 0) 2543 default: 2544 s.Fatalf("bad sized integer type %v", t) 2545 } 2546 case t.IsFloat(): 2547 switch t.Size() { 2548 case 4: 2549 return s.constFloat32(t, 0) 2550 case 8: 2551 return s.constFloat64(t, 0) 2552 default: 2553 s.Fatalf("bad sized float type %v", t) 2554 } 2555 case t.IsComplex(): 2556 switch t.Size() { 2557 case 8: 2558 z := s.constFloat32(types.Types[TFLOAT32], 0) 2559 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2560 case 16: 2561 z := s.constFloat64(types.Types[TFLOAT64], 0) 2562 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2563 default: 2564 s.Fatalf("bad sized complex type %v", t) 2565 } 2566 2567 case t.IsString(): 2568 return s.constEmptyString(t) 2569 case t.IsPtrShaped(): 2570 return s.constNil(t) 2571 case t.IsBoolean(): 2572 return s.constBool(false) 2573 case t.IsInterface(): 2574 return s.constInterface(t) 2575 case t.IsSlice(): 2576 return s.constSlice(t) 2577 case t.IsStruct(): 2578 n := t.NumFields() 2579 v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t) 2580 for i := 0; i < n; i++ { 2581 v.AddArg(s.zeroVal(t.FieldType(i))) 2582 } 2583 return v 2584 case t.IsArray(): 2585 switch t.NumElem() { 2586 case 0: 2587 return s.entryNewValue0(ssa.OpArrayMake0, t) 2588 case 1: 2589 return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem())) 2590 } 2591 } 2592 s.Fatalf("zero for type %v not implemented", t) 2593 return nil 2594 } 2595 2596 type callKind int8 2597 2598 const ( 2599 callNormal callKind = iota 2600 callDefer 2601 callGo 2602 ) 2603 2604 type sfRtCallDef struct { 2605 rtfn *obj.LSym 2606 rtype types.EType 2607 } 2608 2609 var softFloatOps map[ssa.Op]sfRtCallDef 2610 2611 func softfloatInit() { 2612 // Some of these operations get transformed by sfcall. 2613 softFloatOps = map[ssa.Op]sfRtCallDef{ 2614 ssa.OpAdd32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32}, 2615 ssa.OpAdd64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64}, 2616 ssa.OpSub32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32}, 2617 ssa.OpSub64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64}, 2618 ssa.OpMul32F: sfRtCallDef{sysfunc("fmul32"), TFLOAT32}, 2619 ssa.OpMul64F: sfRtCallDef{sysfunc("fmul64"), TFLOAT64}, 2620 ssa.OpDiv32F: sfRtCallDef{sysfunc("fdiv32"), TFLOAT32}, 2621 ssa.OpDiv64F: sfRtCallDef{sysfunc("fdiv64"), TFLOAT64}, 2622 2623 ssa.OpEq64F: sfRtCallDef{sysfunc("feq64"), TBOOL}, 2624 ssa.OpEq32F: sfRtCallDef{sysfunc("feq32"), TBOOL}, 2625 ssa.OpNeq64F: sfRtCallDef{sysfunc("feq64"), TBOOL}, 2626 ssa.OpNeq32F: sfRtCallDef{sysfunc("feq32"), TBOOL}, 2627 ssa.OpLess64F: sfRtCallDef{sysfunc("fgt64"), TBOOL}, 2628 ssa.OpLess32F: sfRtCallDef{sysfunc("fgt32"), TBOOL}, 2629 ssa.OpGreater64F: sfRtCallDef{sysfunc("fgt64"), TBOOL}, 2630 ssa.OpGreater32F: sfRtCallDef{sysfunc("fgt32"), TBOOL}, 2631 ssa.OpLeq64F: sfRtCallDef{sysfunc("fge64"), TBOOL}, 2632 ssa.OpLeq32F: sfRtCallDef{sysfunc("fge32"), TBOOL}, 2633 ssa.OpGeq64F: sfRtCallDef{sysfunc("fge64"), TBOOL}, 2634 ssa.OpGeq32F: sfRtCallDef{sysfunc("fge32"), TBOOL}, 2635 2636 ssa.OpCvt32to32F: sfRtCallDef{sysfunc("fint32to32"), TFLOAT32}, 2637 ssa.OpCvt32Fto32: sfRtCallDef{sysfunc("f32toint32"), TINT32}, 2638 ssa.OpCvt64to32F: sfRtCallDef{sysfunc("fint64to32"), TFLOAT32}, 2639 ssa.OpCvt32Fto64: sfRtCallDef{sysfunc("f32toint64"), TINT64}, 2640 ssa.OpCvt64Uto32F: sfRtCallDef{sysfunc("fuint64to32"), TFLOAT32}, 2641 ssa.OpCvt32Fto64U: sfRtCallDef{sysfunc("f32touint64"), TUINT64}, 2642 ssa.OpCvt32to64F: sfRtCallDef{sysfunc("fint32to64"), TFLOAT64}, 2643 ssa.OpCvt64Fto32: sfRtCallDef{sysfunc("f64toint32"), TINT32}, 2644 ssa.OpCvt64to64F: sfRtCallDef{sysfunc("fint64to64"), TFLOAT64}, 2645 ssa.OpCvt64Fto64: sfRtCallDef{sysfunc("f64toint64"), TINT64}, 2646 ssa.OpCvt64Uto64F: sfRtCallDef{sysfunc("fuint64to64"), TFLOAT64}, 2647 ssa.OpCvt64Fto64U: sfRtCallDef{sysfunc("f64touint64"), TUINT64}, 2648 ssa.OpCvt32Fto64F: sfRtCallDef{sysfunc("f32to64"), TFLOAT64}, 2649 ssa.OpCvt64Fto32F: sfRtCallDef{sysfunc("f64to32"), TFLOAT32}, 2650 } 2651 } 2652 2653 // TODO: do not emit sfcall if operation can be optimized to constant in later 2654 // opt phase 2655 func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) { 2656 if callDef, ok := softFloatOps[op]; ok { 2657 switch op { 2658 case ssa.OpLess32F, 2659 ssa.OpLess64F, 2660 ssa.OpLeq32F, 2661 ssa.OpLeq64F: 2662 args[0], args[1] = args[1], args[0] 2663 case ssa.OpSub32F, 2664 ssa.OpSub64F: 2665 args[1] = s.newValue1(s.ssaOp(OMINUS, types.Types[callDef.rtype]), args[1].Type, args[1]) 2666 } 2667 2668 result := s.rtcall(callDef.rtfn, true, []*types.Type{types.Types[callDef.rtype]}, args...)[0] 2669 if op == ssa.OpNeq32F || op == ssa.OpNeq64F { 2670 result = s.newValue1(ssa.OpNot, result.Type, result) 2671 } 2672 return result, true 2673 } 2674 return nil, false 2675 } 2676 2677 var intrinsics map[intrinsicKey]intrinsicBuilder 2678 2679 // An intrinsicBuilder converts a call node n into an ssa value that 2680 // implements that call as an intrinsic. args is a list of arguments to the func. 2681 type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value 2682 2683 type intrinsicKey struct { 2684 arch *sys.Arch 2685 pkg string 2686 fn string 2687 } 2688 2689 func init() { 2690 intrinsics = map[intrinsicKey]intrinsicBuilder{} 2691 2692 var all []*sys.Arch 2693 var p4 []*sys.Arch 2694 var p8 []*sys.Arch 2695 for _, a := range sys.Archs { 2696 all = append(all, a) 2697 if a.PtrSize == 4 { 2698 p4 = append(p4, a) 2699 } else { 2700 p8 = append(p8, a) 2701 } 2702 } 2703 2704 // add adds the intrinsic b for pkg.fn for the given list of architectures. 2705 add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) { 2706 for _, a := range archs { 2707 intrinsics[intrinsicKey{a, pkg, fn}] = b 2708 } 2709 } 2710 // addF does the same as add but operates on architecture families. 2711 addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) { 2712 m := 0 2713 for _, f := range archFamilies { 2714 if f >= 32 { 2715 panic("too many architecture families") 2716 } 2717 m |= 1 << uint(f) 2718 } 2719 for _, a := range all { 2720 if m>>uint(a.Family)&1 != 0 { 2721 intrinsics[intrinsicKey{a, pkg, fn}] = b 2722 } 2723 } 2724 } 2725 // alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists. 2726 alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) { 2727 for _, a := range archs { 2728 if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok { 2729 intrinsics[intrinsicKey{a, pkg, fn}] = b 2730 } 2731 } 2732 } 2733 2734 /******** runtime ********/ 2735 if !instrumenting { 2736 add("runtime", "slicebytetostringtmp", 2737 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2738 // Compiler frontend optimizations emit OARRAYBYTESTRTMP nodes 2739 // for the backend instead of slicebytetostringtmp calls 2740 // when not instrumenting. 2741 slice := args[0] 2742 ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice) 2743 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) 2744 return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) 2745 }, 2746 all...) 2747 } 2748 add("runtime", "KeepAlive", 2749 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2750 data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0]) 2751 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem()) 2752 return nil 2753 }, 2754 all...) 2755 add("runtime", "getclosureptr", 2756 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2757 return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr) 2758 }, 2759 all...) 2760 2761 addF("runtime", "getcallerpc", 2762 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2763 return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr) 2764 }, sys.AMD64, sys.I386) 2765 2766 add("runtime", "getcallersp", 2767 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2768 return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr) 2769 }, 2770 all...) 2771 2772 /******** runtime/internal/sys ********/ 2773 addF("runtime/internal/sys", "Ctz32", 2774 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2775 return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0]) 2776 }, 2777 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 2778 addF("runtime/internal/sys", "Ctz64", 2779 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2780 return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0]) 2781 }, 2782 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 2783 addF("runtime/internal/sys", "Bswap32", 2784 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2785 return s.newValue1(ssa.OpBswap32, types.Types[TUINT32], args[0]) 2786 }, 2787 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X) 2788 addF("runtime/internal/sys", "Bswap64", 2789 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2790 return s.newValue1(ssa.OpBswap64, types.Types[TUINT64], args[0]) 2791 }, 2792 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X) 2793 2794 /******** runtime/internal/atomic ********/ 2795 addF("runtime/internal/atomic", "Load", 2796 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2797 v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem()) 2798 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2799 return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) 2800 }, 2801 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) 2802 addF("runtime/internal/atomic", "Load64", 2803 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2804 v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem()) 2805 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2806 return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) 2807 }, 2808 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) 2809 addF("runtime/internal/atomic", "Loadp", 2810 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2811 v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem()) 2812 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2813 return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v) 2814 }, 2815 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) 2816 2817 addF("runtime/internal/atomic", "Store", 2818 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2819 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem()) 2820 return nil 2821 }, 2822 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) 2823 addF("runtime/internal/atomic", "Store64", 2824 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2825 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem()) 2826 return nil 2827 }, 2828 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) 2829 addF("runtime/internal/atomic", "StorepNoWB", 2830 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2831 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem()) 2832 return nil 2833 }, 2834 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64) 2835 2836 addF("runtime/internal/atomic", "Xchg", 2837 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2838 v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem()) 2839 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2840 return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) 2841 }, 2842 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) 2843 addF("runtime/internal/atomic", "Xchg64", 2844 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2845 v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem()) 2846 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2847 return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) 2848 }, 2849 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) 2850 2851 addF("runtime/internal/atomic", "Xadd", 2852 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2853 v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem()) 2854 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2855 return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) 2856 }, 2857 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) 2858 addF("runtime/internal/atomic", "Xadd64", 2859 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2860 v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem()) 2861 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2862 return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) 2863 }, 2864 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) 2865 2866 addF("runtime/internal/atomic", "Cas", 2867 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2868 v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) 2869 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2870 return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) 2871 }, 2872 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) 2873 addF("runtime/internal/atomic", "Cas64", 2874 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2875 v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) 2876 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2877 return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) 2878 }, 2879 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) 2880 2881 addF("runtime/internal/atomic", "And8", 2882 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2883 s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem()) 2884 return nil 2885 }, 2886 sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64) 2887 addF("runtime/internal/atomic", "Or8", 2888 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2889 s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem()) 2890 return nil 2891 }, 2892 sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64) 2893 2894 alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...) 2895 alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...) 2896 alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...) 2897 alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...) 2898 alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...) 2899 alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...) 2900 alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...) 2901 alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...) 2902 alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...) 2903 alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...) 2904 alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...) 2905 alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...) 2906 alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...) 2907 alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...) 2908 alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...) 2909 alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...) 2910 2911 /******** math ********/ 2912 addF("math", "Sqrt", 2913 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2914 return s.newValue1(ssa.OpSqrt, types.Types[TFLOAT64], args[0]) 2915 }, 2916 sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.S390X) 2917 addF("math", "Trunc", 2918 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2919 return s.newValue1(ssa.OpTrunc, types.Types[TFLOAT64], args[0]) 2920 }, 2921 sys.ARM64, sys.PPC64, sys.S390X) 2922 addF("math", "Ceil", 2923 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2924 return s.newValue1(ssa.OpCeil, types.Types[TFLOAT64], args[0]) 2925 }, 2926 sys.ARM64, sys.PPC64, sys.S390X) 2927 addF("math", "Floor", 2928 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2929 return s.newValue1(ssa.OpFloor, types.Types[TFLOAT64], args[0]) 2930 }, 2931 sys.ARM64, sys.PPC64, sys.S390X) 2932 addF("math", "Round", 2933 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2934 return s.newValue1(ssa.OpRound, types.Types[TFLOAT64], args[0]) 2935 }, 2936 sys.ARM64, sys.S390X) 2937 addF("math", "RoundToEven", 2938 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2939 return s.newValue1(ssa.OpRoundToEven, types.Types[TFLOAT64], args[0]) 2940 }, 2941 sys.S390X) 2942 addF("math", "Abs", 2943 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2944 return s.newValue1(ssa.OpAbs, types.Types[TFLOAT64], args[0]) 2945 }, 2946 sys.PPC64) 2947 addF("math", "Copysign", 2948 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2949 return s.newValue2(ssa.OpCopysign, types.Types[TFLOAT64], args[0], args[1]) 2950 }, 2951 sys.PPC64) 2952 2953 makeRoundAMD64 := func(op ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2954 return func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2955 aux := syslook("support_sse41").Sym.Linksym() 2956 addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), aux, s.sb) 2957 v := s.newValue2(ssa.OpLoad, types.Types[TBOOL], addr, s.mem()) 2958 b := s.endBlock() 2959 b.Kind = ssa.BlockIf 2960 b.SetControl(v) 2961 bTrue := s.f.NewBlock(ssa.BlockPlain) 2962 bFalse := s.f.NewBlock(ssa.BlockPlain) 2963 bEnd := s.f.NewBlock(ssa.BlockPlain) 2964 b.AddEdgeTo(bTrue) 2965 b.AddEdgeTo(bFalse) 2966 b.Likely = ssa.BranchLikely // most machines have sse4.1 nowadays 2967 2968 // We have the intrinsic - use it directly. 2969 s.startBlock(bTrue) 2970 s.vars[n] = s.newValue1(op, types.Types[TFLOAT64], args[0]) 2971 s.endBlock().AddEdgeTo(bEnd) 2972 2973 // Call the pure Go version. 2974 s.startBlock(bFalse) 2975 a := s.call(n, callNormal) 2976 s.vars[n] = s.newValue2(ssa.OpLoad, types.Types[TFLOAT64], a, s.mem()) 2977 s.endBlock().AddEdgeTo(bEnd) 2978 2979 // Merge results. 2980 s.startBlock(bEnd) 2981 return s.variable(n, types.Types[TFLOAT64]) 2982 } 2983 } 2984 addF("math", "RoundToEven", 2985 makeRoundAMD64(ssa.OpRoundToEven), 2986 sys.AMD64) 2987 addF("math", "Floor", 2988 makeRoundAMD64(ssa.OpFloor), 2989 sys.AMD64) 2990 addF("math", "Ceil", 2991 makeRoundAMD64(ssa.OpCeil), 2992 sys.AMD64) 2993 addF("math", "Trunc", 2994 makeRoundAMD64(ssa.OpTrunc), 2995 sys.AMD64) 2996 2997 /******** math/bits ********/ 2998 addF("math/bits", "TrailingZeros64", 2999 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3000 return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0]) 3001 }, 3002 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3003 addF("math/bits", "TrailingZeros32", 3004 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3005 return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0]) 3006 }, 3007 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3008 addF("math/bits", "TrailingZeros16", 3009 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3010 x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0]) 3011 c := s.constInt32(types.Types[TUINT32], 1<<16) 3012 y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c) 3013 return s.newValue1(ssa.OpCtz32, types.Types[TINT], y) 3014 }, 3015 sys.ARM, sys.MIPS) 3016 addF("math/bits", "TrailingZeros16", 3017 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3018 x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0]) 3019 c := s.constInt64(types.Types[TUINT64], 1<<16) 3020 y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c) 3021 return s.newValue1(ssa.OpCtz64, types.Types[TINT], y) 3022 }, 3023 sys.AMD64, sys.ARM64, sys.S390X) 3024 addF("math/bits", "TrailingZeros8", 3025 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3026 x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0]) 3027 c := s.constInt32(types.Types[TUINT32], 1<<8) 3028 y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c) 3029 return s.newValue1(ssa.OpCtz32, types.Types[TINT], y) 3030 }, 3031 sys.ARM, sys.MIPS) 3032 addF("math/bits", "TrailingZeros8", 3033 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3034 x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0]) 3035 c := s.constInt64(types.Types[TUINT64], 1<<8) 3036 y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c) 3037 return s.newValue1(ssa.OpCtz64, types.Types[TINT], y) 3038 }, 3039 sys.AMD64, sys.ARM64, sys.S390X) 3040 alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...) 3041 alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...) 3042 // ReverseBytes inlines correctly, no need to intrinsify it. 3043 // ReverseBytes16 lowers to a rotate, no need for anything special here. 3044 addF("math/bits", "Len64", 3045 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3046 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0]) 3047 }, 3048 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3049 addF("math/bits", "Len32", 3050 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3051 if s.config.PtrSize == 4 { 3052 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0]) 3053 } 3054 x := s.newValue1(ssa.OpZeroExt32to64, types.Types[TUINT64], args[0]) 3055 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) 3056 }, 3057 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3058 addF("math/bits", "Len16", 3059 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3060 if s.config.PtrSize == 4 { 3061 x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0]) 3062 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x) 3063 } 3064 x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0]) 3065 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) 3066 }, 3067 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3068 // Note: disabled on AMD64 because the Go code is faster! 3069 addF("math/bits", "Len8", 3070 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3071 if s.config.PtrSize == 4 { 3072 x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0]) 3073 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x) 3074 } 3075 x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0]) 3076 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) 3077 }, 3078 sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3079 3080 addF("math/bits", "Len", 3081 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3082 if s.config.PtrSize == 4 { 3083 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0]) 3084 } 3085 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0]) 3086 }, 3087 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3088 // LeadingZeros is handled because it trivially calls Len. 3089 addF("math/bits", "Reverse64", 3090 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3091 return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0]) 3092 }, 3093 sys.ARM64) 3094 addF("math/bits", "Reverse32", 3095 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3096 return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0]) 3097 }, 3098 sys.ARM64) 3099 addF("math/bits", "Reverse16", 3100 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3101 return s.newValue1(ssa.OpBitRev16, types.Types[TINT], args[0]) 3102 }, 3103 sys.ARM64) 3104 addF("math/bits", "Reverse8", 3105 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3106 return s.newValue1(ssa.OpBitRev8, types.Types[TINT], args[0]) 3107 }, 3108 sys.ARM64) 3109 addF("math/bits", "Reverse", 3110 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3111 if s.config.PtrSize == 4 { 3112 return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0]) 3113 } 3114 return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0]) 3115 }, 3116 sys.ARM64) 3117 makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3118 return func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3119 aux := syslook("support_popcnt").Sym.Linksym() 3120 addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), aux, s.sb) 3121 v := s.newValue2(ssa.OpLoad, types.Types[TBOOL], addr, s.mem()) 3122 b := s.endBlock() 3123 b.Kind = ssa.BlockIf 3124 b.SetControl(v) 3125 bTrue := s.f.NewBlock(ssa.BlockPlain) 3126 bFalse := s.f.NewBlock(ssa.BlockPlain) 3127 bEnd := s.f.NewBlock(ssa.BlockPlain) 3128 b.AddEdgeTo(bTrue) 3129 b.AddEdgeTo(bFalse) 3130 b.Likely = ssa.BranchLikely // most machines have popcnt nowadays 3131 3132 // We have the intrinsic - use it directly. 3133 s.startBlock(bTrue) 3134 op := op64 3135 if s.config.PtrSize == 4 { 3136 op = op32 3137 } 3138 s.vars[n] = s.newValue1(op, types.Types[TINT], args[0]) 3139 s.endBlock().AddEdgeTo(bEnd) 3140 3141 // Call the pure Go version. 3142 s.startBlock(bFalse) 3143 a := s.call(n, callNormal) 3144 s.vars[n] = s.newValue2(ssa.OpLoad, types.Types[TINT], a, s.mem()) 3145 s.endBlock().AddEdgeTo(bEnd) 3146 3147 // Merge results. 3148 s.startBlock(bEnd) 3149 return s.variable(n, types.Types[TINT]) 3150 } 3151 } 3152 addF("math/bits", "OnesCount64", 3153 makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64), 3154 sys.AMD64) 3155 addF("math/bits", "OnesCount64", 3156 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3157 return s.newValue1(ssa.OpPopCount64, types.Types[TINT], args[0]) 3158 }, 3159 sys.PPC64, sys.ARM64) 3160 addF("math/bits", "OnesCount32", 3161 makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32), 3162 sys.AMD64) 3163 addF("math/bits", "OnesCount32", 3164 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3165 return s.newValue1(ssa.OpPopCount32, types.Types[TINT], args[0]) 3166 }, 3167 sys.PPC64, sys.ARM64) 3168 addF("math/bits", "OnesCount16", 3169 makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16), 3170 sys.AMD64) 3171 addF("math/bits", "OnesCount16", 3172 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3173 return s.newValue1(ssa.OpPopCount16, types.Types[TINT], args[0]) 3174 }, 3175 sys.ARM64) 3176 // Note: no OnesCount8, the Go implementation is faster - just a table load. 3177 addF("math/bits", "OnesCount", 3178 makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32), 3179 sys.AMD64) 3180 3181 /******** sync/atomic ********/ 3182 3183 // Note: these are disabled by flag_race in findIntrinsic below. 3184 alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...) 3185 alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...) 3186 alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...) 3187 alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...) 3188 alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...) 3189 alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...) 3190 alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...) 3191 3192 alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...) 3193 alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...) 3194 // Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap. 3195 alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...) 3196 alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...) 3197 alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...) 3198 alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...) 3199 3200 alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...) 3201 alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...) 3202 alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...) 3203 alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...) 3204 alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...) 3205 alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...) 3206 3207 alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...) 3208 alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...) 3209 alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...) 3210 alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...) 3211 alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...) 3212 alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...) 3213 3214 alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...) 3215 alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...) 3216 alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...) 3217 alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...) 3218 alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...) 3219 alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...) 3220 3221 /******** math/big ********/ 3222 add("math/big", "mulWW", 3223 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3224 return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1]) 3225 }, 3226 sys.ArchAMD64) 3227 add("math/big", "divWW", 3228 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3229 return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2]) 3230 }, 3231 sys.ArchAMD64) 3232 } 3233 3234 // findIntrinsic returns a function which builds the SSA equivalent of the 3235 // function identified by the symbol sym. If sym is not an intrinsic call, returns nil. 3236 func findIntrinsic(sym *types.Sym) intrinsicBuilder { 3237 if ssa.IntrinsicsDisable { 3238 return nil 3239 } 3240 if sym == nil || sym.Pkg == nil { 3241 return nil 3242 } 3243 pkg := sym.Pkg.Path 3244 if sym.Pkg == localpkg { 3245 pkg = myimportpath 3246 } 3247 if flag_race && pkg == "sync/atomic" { 3248 // The race detector needs to be able to intercept these calls. 3249 // We can't intrinsify them. 3250 return nil 3251 } 3252 // Skip intrinsifying math functions (which may contain hard-float 3253 // instructions) when soft-float 3254 if thearch.SoftFloat && pkg == "math" { 3255 return nil 3256 } 3257 3258 fn := sym.Name 3259 return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}] 3260 } 3261 3262 func isIntrinsicCall(n *Node) bool { 3263 if n == nil || n.Left == nil { 3264 return false 3265 } 3266 return findIntrinsic(n.Left.Sym) != nil 3267 } 3268 3269 // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation. 3270 func (s *state) intrinsicCall(n *Node) *ssa.Value { 3271 v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n)) 3272 if ssa.IntrinsicsDebug > 0 { 3273 x := v 3274 if x == nil { 3275 x = s.mem() 3276 } 3277 if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 { 3278 x = x.Args[0] 3279 } 3280 Warnl(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString()) 3281 } 3282 return v 3283 } 3284 3285 type callArg struct { 3286 offset int64 3287 v *ssa.Value 3288 } 3289 type byOffset []callArg 3290 3291 func (x byOffset) Len() int { return len(x) } 3292 func (x byOffset) Swap(i, j int) { x[i], x[j] = x[j], x[i] } 3293 func (x byOffset) Less(i, j int) bool { 3294 return x[i].offset < x[j].offset 3295 } 3296 3297 // intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them. 3298 func (s *state) intrinsicArgs(n *Node) []*ssa.Value { 3299 // This code is complicated because of how walk transforms calls. For a call node, 3300 // each entry in n.List is either an assignment to OINDREGSP which actually 3301 // stores an arg, or an assignment to a temporary which computes an arg 3302 // which is later assigned. 3303 // The args can also be out of order. 3304 // TODO: when walk goes away someday, this code can go away also. 3305 var args []callArg 3306 temps := map[*Node]*ssa.Value{} 3307 for _, a := range n.List.Slice() { 3308 if a.Op != OAS { 3309 s.Fatalf("non-assignment as a function argument %v", a.Op) 3310 } 3311 l, r := a.Left, a.Right 3312 switch l.Op { 3313 case ONAME: 3314 // Evaluate and store to "temporary". 3315 // Walk ensures these temporaries are dead outside of n. 3316 temps[l] = s.expr(r) 3317 case OINDREGSP: 3318 // Store a value to an argument slot. 3319 var v *ssa.Value 3320 if x, ok := temps[r]; ok { 3321 // This is a previously computed temporary. 3322 v = x 3323 } else { 3324 // This is an explicit value; evaluate it. 3325 v = s.expr(r) 3326 } 3327 args = append(args, callArg{l.Xoffset, v}) 3328 default: 3329 s.Fatalf("function argument assignment target not allowed: %v", l.Op) 3330 } 3331 } 3332 sort.Sort(byOffset(args)) 3333 res := make([]*ssa.Value, len(args)) 3334 for i, a := range args { 3335 res[i] = a.v 3336 } 3337 return res 3338 } 3339 3340 // Calls the function n using the specified call type. 3341 // Returns the address of the return value (or nil if none). 3342 func (s *state) call(n *Node, k callKind) *ssa.Value { 3343 var sym *types.Sym // target symbol (if static) 3344 var closure *ssa.Value // ptr to closure to run (if dynamic) 3345 var codeptr *ssa.Value // ptr to target code (if dynamic) 3346 var rcvr *ssa.Value // receiver to set 3347 fn := n.Left 3348 switch n.Op { 3349 case OCALLFUNC: 3350 if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC { 3351 sym = fn.Sym 3352 break 3353 } 3354 closure = s.expr(fn) 3355 case OCALLMETH: 3356 if fn.Op != ODOTMETH { 3357 Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) 3358 } 3359 if k == callNormal { 3360 sym = fn.Sym 3361 break 3362 } 3363 // Make a name n2 for the function. 3364 // fn.Sym might be sync.(*Mutex).Unlock. 3365 // Make a PFUNC node out of that, then evaluate it. 3366 // We get back an SSA value representing &sync.(*Mutex).Unlock·f. 3367 // We can then pass that to defer or go. 3368 n2 := newnamel(fn.Pos, fn.Sym) 3369 n2.Name.Curfn = s.curfn 3370 n2.SetClass(PFUNC) 3371 n2.Pos = fn.Pos 3372 n2.Type = types.Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it. 3373 closure = s.expr(n2) 3374 // Note: receiver is already assigned in n.List, so we don't 3375 // want to set it here. 3376 case OCALLINTER: 3377 if fn.Op != ODOTINTER { 3378 Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op) 3379 } 3380 i := s.expr(fn.Left) 3381 itab := s.newValue1(ssa.OpITab, types.Types[TUINTPTR], i) 3382 s.nilCheck(itab) 3383 itabidx := fn.Xoffset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab 3384 itab = s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab) 3385 if k == callNormal { 3386 codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], itab, s.mem()) 3387 } else { 3388 closure = itab 3389 } 3390 rcvr = s.newValue1(ssa.OpIData, types.Types[TUINTPTR], i) 3391 } 3392 dowidth(fn.Type) 3393 stksize := fn.Type.ArgWidth() // includes receiver 3394 3395 // Run all argument assignments. The arg slots have already 3396 // been offset by the appropriate amount (+2*widthptr for go/defer, 3397 // +widthptr for interface calls). 3398 // For OCALLMETH, the receiver is set in these statements. 3399 s.stmtList(n.List) 3400 3401 // Set receiver (for interface calls) 3402 if rcvr != nil { 3403 argStart := Ctxt.FixedFrameSize() 3404 if k != callNormal { 3405 argStart += int64(2 * Widthptr) 3406 } 3407 addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart) 3408 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], addr, rcvr, s.mem()) 3409 } 3410 3411 // Defer/go args 3412 if k != callNormal { 3413 // Write argsize and closure (args to Newproc/Deferproc). 3414 argStart := Ctxt.FixedFrameSize() 3415 argsize := s.constInt32(types.Types[TUINT32], int32(stksize)) 3416 addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart) 3417 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINT32], addr, argsize, s.mem()) 3418 addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr)) 3419 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], addr, closure, s.mem()) 3420 stksize += 2 * int64(Widthptr) 3421 } 3422 3423 // call target 3424 var call *ssa.Value 3425 switch { 3426 case k == callDefer: 3427 call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, Deferproc, s.mem()) 3428 case k == callGo: 3429 call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, Newproc, s.mem()) 3430 case closure != nil: 3431 codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], closure, s.mem()) 3432 call = s.newValue3(ssa.OpClosureCall, types.TypeMem, codeptr, closure, s.mem()) 3433 case codeptr != nil: 3434 call = s.newValue2(ssa.OpInterCall, types.TypeMem, codeptr, s.mem()) 3435 case sym != nil: 3436 call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, sym.Linksym(), s.mem()) 3437 default: 3438 Fatalf("bad call type %v %v", n.Op, n) 3439 } 3440 call.AuxInt = stksize // Call operations carry the argsize of the callee along with them 3441 s.vars[&memVar] = call 3442 3443 // Finish block for defers 3444 if k == callDefer { 3445 b := s.endBlock() 3446 b.Kind = ssa.BlockDefer 3447 b.SetControl(call) 3448 bNext := s.f.NewBlock(ssa.BlockPlain) 3449 b.AddEdgeTo(bNext) 3450 // Add recover edge to exit code. 3451 r := s.f.NewBlock(ssa.BlockPlain) 3452 s.startBlock(r) 3453 s.exit() 3454 b.AddEdgeTo(r) 3455 b.Likely = ssa.BranchLikely 3456 s.startBlock(bNext) 3457 } 3458 3459 res := n.Left.Type.Results() 3460 if res.NumFields() == 0 || k != callNormal { 3461 // call has no return value. Continue with the next statement. 3462 return nil 3463 } 3464 fp := res.Field(0) 3465 return s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize()) 3466 } 3467 3468 // etypesign returns the signed-ness of e, for integer/pointer etypes. 3469 // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer. 3470 func etypesign(e types.EType) int8 { 3471 switch e { 3472 case TINT8, TINT16, TINT32, TINT64, TINT: 3473 return -1 3474 case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR: 3475 return +1 3476 } 3477 return 0 3478 } 3479 3480 // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result. 3481 // The value that the returned Value represents is guaranteed to be non-nil. 3482 // If bounded is true then this address does not require a nil check for its operand 3483 // even if that would otherwise be implied. 3484 func (s *state) addr(n *Node, bounded bool) *ssa.Value { 3485 t := types.NewPtr(n.Type) 3486 switch n.Op { 3487 case ONAME: 3488 switch n.Class() { 3489 case PEXTERN: 3490 // global variable 3491 v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym.Linksym(), s.sb) 3492 // TODO: Make OpAddr use AuxInt as well as Aux. 3493 if n.Xoffset != 0 { 3494 v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v) 3495 } 3496 return v 3497 case PPARAM: 3498 // parameter slot 3499 v := s.decladdrs[n] 3500 if v != nil { 3501 return v 3502 } 3503 if n == nodfp { 3504 // Special arg that points to the frame pointer (Used by ORECOVER). 3505 return s.entryNewValue1A(ssa.OpAddr, t, n, s.sp) 3506 } 3507 s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs) 3508 return nil 3509 case PAUTO: 3510 return s.newValue1A(ssa.OpAddr, t, n, s.sp) 3511 case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early. 3512 // ensure that we reuse symbols for out parameters so 3513 // that cse works on their addresses 3514 return s.newValue1A(ssa.OpAddr, t, n, s.sp) 3515 default: 3516 s.Fatalf("variable address class %v not implemented", n.Class()) 3517 return nil 3518 } 3519 case OINDREGSP: 3520 // indirect off REGSP 3521 // used for storing/loading arguments/returns to/from callees 3522 return s.constOffPtrSP(t, n.Xoffset) 3523 case OINDEX: 3524 if n.Left.Type.IsSlice() { 3525 a := s.expr(n.Left) 3526 i := s.expr(n.Right) 3527 i = s.extendIndex(i, panicindex) 3528 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], a) 3529 if !n.Bounded() { 3530 s.boundsCheck(i, len) 3531 } 3532 p := s.newValue1(ssa.OpSlicePtr, t, a) 3533 return s.newValue2(ssa.OpPtrIndex, t, p, i) 3534 } else { // array 3535 a := s.addr(n.Left, bounded) 3536 i := s.expr(n.Right) 3537 i = s.extendIndex(i, panicindex) 3538 len := s.constInt(types.Types[TINT], n.Left.Type.NumElem()) 3539 if !n.Bounded() { 3540 s.boundsCheck(i, len) 3541 } 3542 return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left.Type.Elem()), a, i) 3543 } 3544 case OIND: 3545 return s.exprPtr(n.Left, bounded, n.Pos) 3546 case ODOT: 3547 p := s.addr(n.Left, bounded) 3548 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p) 3549 case ODOTPTR: 3550 p := s.exprPtr(n.Left, bounded, n.Pos) 3551 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p) 3552 case OCLOSUREVAR: 3553 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, 3554 s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)) 3555 case OCONVNOP: 3556 addr := s.addr(n.Left, bounded) 3557 return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type 3558 case OCALLFUNC, OCALLINTER, OCALLMETH: 3559 return s.call(n, callNormal) 3560 case ODOTTYPE: 3561 v, _ := s.dottype(n, false) 3562 if v.Op != ssa.OpLoad { 3563 s.Fatalf("dottype of non-load") 3564 } 3565 if v.Args[1] != s.mem() { 3566 s.Fatalf("memory no longer live from dottype load") 3567 } 3568 return v.Args[0] 3569 default: 3570 s.Fatalf("unhandled addr %v", n.Op) 3571 return nil 3572 } 3573 } 3574 3575 // canSSA reports whether n is SSA-able. 3576 // n must be an ONAME (or an ODOT sequence with an ONAME base). 3577 func (s *state) canSSA(n *Node) bool { 3578 if Debug['N'] != 0 { 3579 return false 3580 } 3581 for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) { 3582 n = n.Left 3583 } 3584 if n.Op != ONAME { 3585 return false 3586 } 3587 if n.Addrtaken() { 3588 return false 3589 } 3590 if n.isParamHeapCopy() { 3591 return false 3592 } 3593 if n.Class() == PAUTOHEAP { 3594 Fatalf("canSSA of PAUTOHEAP %v", n) 3595 } 3596 switch n.Class() { 3597 case PEXTERN: 3598 return false 3599 case PPARAMOUT: 3600 if s.hasdefer { 3601 // TODO: handle this case? Named return values must be 3602 // in memory so that the deferred function can see them. 3603 // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false } 3604 // Or maybe not, see issue 18860. Even unnamed return values 3605 // must be written back so if a defer recovers, the caller can see them. 3606 return false 3607 } 3608 if s.cgoUnsafeArgs { 3609 // Cgo effectively takes the address of all result args, 3610 // but the compiler can't see that. 3611 return false 3612 } 3613 } 3614 if n.Class() == PPARAM && n.Sym != nil && n.Sym.Name == ".this" { 3615 // wrappers generated by genwrapper need to update 3616 // the .this pointer in place. 3617 // TODO: treat as a PPARMOUT? 3618 return false 3619 } 3620 return canSSAType(n.Type) 3621 // TODO: try to make more variables SSAable? 3622 } 3623 3624 // canSSA reports whether variables of type t are SSA-able. 3625 func canSSAType(t *types.Type) bool { 3626 dowidth(t) 3627 if t.Width > int64(4*Widthptr) { 3628 // 4*Widthptr is an arbitrary constant. We want it 3629 // to be at least 3*Widthptr so slices can be registerized. 3630 // Too big and we'll introduce too much register pressure. 3631 return false 3632 } 3633 switch t.Etype { 3634 case TARRAY: 3635 // We can't do larger arrays because dynamic indexing is 3636 // not supported on SSA variables. 3637 // TODO: allow if all indexes are constant. 3638 if t.NumElem() <= 1 { 3639 return canSSAType(t.Elem()) 3640 } 3641 return false 3642 case TSTRUCT: 3643 if t.NumFields() > ssa.MaxStruct { 3644 return false 3645 } 3646 for _, t1 := range t.Fields().Slice() { 3647 if !canSSAType(t1.Type) { 3648 return false 3649 } 3650 } 3651 return true 3652 default: 3653 return true 3654 } 3655 } 3656 3657 // exprPtr evaluates n to a pointer and nil-checks it. 3658 func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value { 3659 p := s.expr(n) 3660 if bounded || n.NonNil() { 3661 if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 { 3662 s.f.Warnl(lineno, "removed nil check") 3663 } 3664 return p 3665 } 3666 s.nilCheck(p) 3667 return p 3668 } 3669 3670 // nilCheck generates nil pointer checking code. 3671 // Used only for automatically inserted nil checks, 3672 // not for user code like 'x != nil'. 3673 func (s *state) nilCheck(ptr *ssa.Value) { 3674 if disable_checknil != 0 || s.curfn.Func.NilCheckDisabled() { 3675 return 3676 } 3677 s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem()) 3678 } 3679 3680 // boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not. 3681 // Starts a new block on return. 3682 // idx is already converted to full int width. 3683 func (s *state) boundsCheck(idx, len *ssa.Value) { 3684 if Debug['B'] != 0 { 3685 return 3686 } 3687 3688 // bounds check 3689 cmp := s.newValue2(ssa.OpIsInBounds, types.Types[TBOOL], idx, len) 3690 s.check(cmp, panicindex) 3691 } 3692 3693 // sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not. 3694 // Starts a new block on return. 3695 // idx and len are already converted to full int width. 3696 func (s *state) sliceBoundsCheck(idx, len *ssa.Value) { 3697 if Debug['B'] != 0 { 3698 return 3699 } 3700 3701 // bounds check 3702 cmp := s.newValue2(ssa.OpIsSliceInBounds, types.Types[TBOOL], idx, len) 3703 s.check(cmp, panicslice) 3704 } 3705 3706 // If cmp (a bool) is false, panic using the given function. 3707 func (s *state) check(cmp *ssa.Value, fn *obj.LSym) { 3708 b := s.endBlock() 3709 b.Kind = ssa.BlockIf 3710 b.SetControl(cmp) 3711 b.Likely = ssa.BranchLikely 3712 bNext := s.f.NewBlock(ssa.BlockPlain) 3713 line := s.peekPos() 3714 pos := Ctxt.PosTable.Pos(line) 3715 fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()} 3716 bPanic := s.panics[fl] 3717 if bPanic == nil { 3718 bPanic = s.f.NewBlock(ssa.BlockPlain) 3719 s.panics[fl] = bPanic 3720 s.startBlock(bPanic) 3721 // The panic call takes/returns memory to ensure that the right 3722 // memory state is observed if the panic happens. 3723 s.rtcall(fn, false, nil) 3724 } 3725 b.AddEdgeTo(bNext) 3726 b.AddEdgeTo(bPanic) 3727 s.startBlock(bNext) 3728 } 3729 3730 func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value { 3731 needcheck := true 3732 switch b.Op { 3733 case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64: 3734 if b.AuxInt != 0 { 3735 needcheck = false 3736 } 3737 } 3738 if needcheck { 3739 // do a size-appropriate check for zero 3740 cmp := s.newValue2(s.ssaOp(ONE, n.Type), types.Types[TBOOL], b, s.zeroVal(n.Type)) 3741 s.check(cmp, panicdivide) 3742 } 3743 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 3744 } 3745 3746 // rtcall issues a call to the given runtime function fn with the listed args. 3747 // Returns a slice of results of the given result types. 3748 // The call is added to the end of the current block. 3749 // If returns is false, the block is marked as an exit block. 3750 func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value { 3751 // Write args to the stack 3752 off := Ctxt.FixedFrameSize() 3753 for _, arg := range args { 3754 t := arg.Type 3755 off = Rnd(off, t.Alignment()) 3756 ptr := s.constOffPtrSP(t.PtrTo(), off) 3757 size := t.Size() 3758 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, ptr, arg, s.mem()) 3759 off += size 3760 } 3761 off = Rnd(off, int64(Widthreg)) 3762 3763 // Issue call 3764 call := s.newValue1A(ssa.OpStaticCall, types.TypeMem, fn, s.mem()) 3765 s.vars[&memVar] = call 3766 3767 if !returns { 3768 // Finish block 3769 b := s.endBlock() 3770 b.Kind = ssa.BlockExit 3771 b.SetControl(call) 3772 call.AuxInt = off - Ctxt.FixedFrameSize() 3773 if len(results) > 0 { 3774 Fatalf("panic call can't have results") 3775 } 3776 return nil 3777 } 3778 3779 // Load results 3780 res := make([]*ssa.Value, len(results)) 3781 for i, t := range results { 3782 off = Rnd(off, t.Alignment()) 3783 ptr := s.constOffPtrSP(types.NewPtr(t), off) 3784 res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem()) 3785 off += t.Size() 3786 } 3787 off = Rnd(off, int64(Widthptr)) 3788 3789 // Remember how much callee stack space we needed. 3790 call.AuxInt = off 3791 3792 return res 3793 } 3794 3795 // do *left = right for type t. 3796 func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask) { 3797 if skip == 0 && (!types.Haspointers(t) || ssa.IsStackAddr(left)) { 3798 // Known to not have write barrier. Store the whole type. 3799 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem()) 3800 return 3801 } 3802 3803 // store scalar fields first, so write barrier stores for 3804 // pointer fields can be grouped together, and scalar values 3805 // don't need to be live across the write barrier call. 3806 // TODO: if the writebarrier pass knows how to reorder stores, 3807 // we can do a single store here as long as skip==0. 3808 s.storeTypeScalars(t, left, right, skip) 3809 if skip&skipPtr == 0 && types.Haspointers(t) { 3810 s.storeTypePtrs(t, left, right) 3811 } 3812 } 3813 3814 // do *left = right for all scalar (non-pointer) parts of t. 3815 func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) { 3816 switch { 3817 case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex(): 3818 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem()) 3819 case t.IsPtrShaped(): 3820 // no scalar fields. 3821 case t.IsString(): 3822 if skip&skipLen != 0 { 3823 return 3824 } 3825 len := s.newValue1(ssa.OpStringLen, types.Types[TINT], right) 3826 lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left) 3827 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenAddr, len, s.mem()) 3828 case t.IsSlice(): 3829 if skip&skipLen == 0 { 3830 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], right) 3831 lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left) 3832 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenAddr, len, s.mem()) 3833 } 3834 if skip&skipCap == 0 { 3835 cap := s.newValue1(ssa.OpSliceCap, types.Types[TINT], right) 3836 capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left) 3837 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], capAddr, cap, s.mem()) 3838 } 3839 case t.IsInterface(): 3840 // itab field doesn't need a write barrier (even though it is a pointer). 3841 itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right) 3842 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], left, itab, s.mem()) 3843 case t.IsStruct(): 3844 n := t.NumFields() 3845 for i := 0; i < n; i++ { 3846 ft := t.FieldType(i) 3847 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3848 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3849 s.storeTypeScalars(ft, addr, val, 0) 3850 } 3851 case t.IsArray() && t.NumElem() == 0: 3852 // nothing 3853 case t.IsArray() && t.NumElem() == 1: 3854 s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0) 3855 default: 3856 s.Fatalf("bad write barrier type %v", t) 3857 } 3858 } 3859 3860 // do *left = right for all pointer parts of t. 3861 func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) { 3862 switch { 3863 case t.IsPtrShaped(): 3864 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem()) 3865 case t.IsString(): 3866 ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right) 3867 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem()) 3868 case t.IsSlice(): 3869 elType := types.NewPtr(t.Elem()) 3870 ptr := s.newValue1(ssa.OpSlicePtr, elType, right) 3871 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, elType, left, ptr, s.mem()) 3872 case t.IsInterface(): 3873 // itab field is treated as a scalar. 3874 idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right) 3875 idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left) 3876 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, idataAddr, idata, s.mem()) 3877 case t.IsStruct(): 3878 n := t.NumFields() 3879 for i := 0; i < n; i++ { 3880 ft := t.FieldType(i) 3881 if !types.Haspointers(ft) { 3882 continue 3883 } 3884 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3885 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3886 s.storeTypePtrs(ft, addr, val) 3887 } 3888 case t.IsArray() && t.NumElem() == 0: 3889 // nothing 3890 case t.IsArray() && t.NumElem() == 1: 3891 s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right)) 3892 default: 3893 s.Fatalf("bad write barrier type %v", t) 3894 } 3895 } 3896 3897 // slice computes the slice v[i:j:k] and returns ptr, len, and cap of result. 3898 // i,j,k may be nil, in which case they are set to their default value. 3899 // t is a slice, ptr to array, or string type. 3900 func (s *state) slice(t *types.Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) { 3901 var elemtype *types.Type 3902 var ptrtype *types.Type 3903 var ptr *ssa.Value 3904 var len *ssa.Value 3905 var cap *ssa.Value 3906 zero := s.constInt(types.Types[TINT], 0) 3907 switch { 3908 case t.IsSlice(): 3909 elemtype = t.Elem() 3910 ptrtype = types.NewPtr(elemtype) 3911 ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v) 3912 len = s.newValue1(ssa.OpSliceLen, types.Types[TINT], v) 3913 cap = s.newValue1(ssa.OpSliceCap, types.Types[TINT], v) 3914 case t.IsString(): 3915 elemtype = types.Types[TUINT8] 3916 ptrtype = types.NewPtr(elemtype) 3917 ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v) 3918 len = s.newValue1(ssa.OpStringLen, types.Types[TINT], v) 3919 cap = len 3920 case t.IsPtr(): 3921 if !t.Elem().IsArray() { 3922 s.Fatalf("bad ptr to array in slice %v\n", t) 3923 } 3924 elemtype = t.Elem().Elem() 3925 ptrtype = types.NewPtr(elemtype) 3926 s.nilCheck(v) 3927 ptr = v 3928 len = s.constInt(types.Types[TINT], t.Elem().NumElem()) 3929 cap = len 3930 default: 3931 s.Fatalf("bad type in slice %v\n", t) 3932 } 3933 3934 // Set default values 3935 if i == nil { 3936 i = zero 3937 } 3938 if j == nil { 3939 j = len 3940 } 3941 if k == nil { 3942 k = cap 3943 } 3944 3945 // Panic if slice indices are not in bounds. 3946 s.sliceBoundsCheck(i, j) 3947 if j != k { 3948 s.sliceBoundsCheck(j, k) 3949 } 3950 if k != cap { 3951 s.sliceBoundsCheck(k, cap) 3952 } 3953 3954 // Generate the following code assuming that indexes are in bounds. 3955 // The masking is to make sure that we don't generate a slice 3956 // that points to the next object in memory. 3957 // rlen = j - i 3958 // rcap = k - i 3959 // delta = i * elemsize 3960 // rptr = p + delta&mask(rcap) 3961 // result = (SliceMake rptr rlen rcap) 3962 // where mask(x) is 0 if x==0 and -1 if x>0. 3963 subOp := s.ssaOp(OSUB, types.Types[TINT]) 3964 mulOp := s.ssaOp(OMUL, types.Types[TINT]) 3965 andOp := s.ssaOp(OAND, types.Types[TINT]) 3966 rlen := s.newValue2(subOp, types.Types[TINT], j, i) 3967 var rcap *ssa.Value 3968 switch { 3969 case t.IsString(): 3970 // Capacity of the result is unimportant. However, we use 3971 // rcap to test if we've generated a zero-length slice. 3972 // Use length of strings for that. 3973 rcap = rlen 3974 case j == k: 3975 rcap = rlen 3976 default: 3977 rcap = s.newValue2(subOp, types.Types[TINT], k, i) 3978 } 3979 3980 var rptr *ssa.Value 3981 if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 { 3982 // No pointer arithmetic necessary. 3983 rptr = ptr 3984 } else { 3985 // delta = # of bytes to offset pointer by. 3986 delta := s.newValue2(mulOp, types.Types[TINT], i, s.constInt(types.Types[TINT], elemtype.Width)) 3987 // If we're slicing to the point where the capacity is zero, 3988 // zero out the delta. 3989 mask := s.newValue1(ssa.OpSlicemask, types.Types[TINT], rcap) 3990 delta = s.newValue2(andOp, types.Types[TINT], delta, mask) 3991 // Compute rptr = ptr + delta 3992 rptr = s.newValue2(ssa.OpAddPtr, ptrtype, ptr, delta) 3993 } 3994 3995 return rptr, rlen, rcap 3996 } 3997 3998 type u642fcvtTab struct { 3999 geq, cvt2F, and, rsh, or, add ssa.Op 4000 one func(*state, *types.Type, int64) *ssa.Value 4001 } 4002 4003 var u64_f64 = u642fcvtTab{ 4004 geq: ssa.OpGeq64, 4005 cvt2F: ssa.OpCvt64to64F, 4006 and: ssa.OpAnd64, 4007 rsh: ssa.OpRsh64Ux64, 4008 or: ssa.OpOr64, 4009 add: ssa.OpAdd64F, 4010 one: (*state).constInt64, 4011 } 4012 4013 var u64_f32 = u642fcvtTab{ 4014 geq: ssa.OpGeq64, 4015 cvt2F: ssa.OpCvt64to32F, 4016 and: ssa.OpAnd64, 4017 rsh: ssa.OpRsh64Ux64, 4018 or: ssa.OpOr64, 4019 add: ssa.OpAdd32F, 4020 one: (*state).constInt64, 4021 } 4022 4023 func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4024 return s.uint64Tofloat(&u64_f64, n, x, ft, tt) 4025 } 4026 4027 func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4028 return s.uint64Tofloat(&u64_f32, n, x, ft, tt) 4029 } 4030 4031 func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4032 // if x >= 0 { 4033 // result = (floatY) x 4034 // } else { 4035 // y = uintX(x) ; y = x & 1 4036 // z = uintX(x) ; z = z >> 1 4037 // z = z >> 1 4038 // z = z | y 4039 // result = floatY(z) 4040 // result = result + result 4041 // } 4042 // 4043 // Code borrowed from old code generator. 4044 // What's going on: large 64-bit "unsigned" looks like 4045 // negative number to hardware's integer-to-float 4046 // conversion. However, because the mantissa is only 4047 // 63 bits, we don't need the LSB, so instead we do an 4048 // unsigned right shift (divide by two), convert, and 4049 // double. However, before we do that, we need to be 4050 // sure that we do not lose a "1" if that made the 4051 // difference in the resulting rounding. Therefore, we 4052 // preserve it, and OR (not ADD) it back in. The case 4053 // that matters is when the eleven discarded bits are 4054 // equal to 10000000001; that rounds up, and the 1 cannot 4055 // be lost else it would round down if the LSB of the 4056 // candidate mantissa is 0. 4057 cmp := s.newValue2(cvttab.geq, types.Types[TBOOL], x, s.zeroVal(ft)) 4058 b := s.endBlock() 4059 b.Kind = ssa.BlockIf 4060 b.SetControl(cmp) 4061 b.Likely = ssa.BranchLikely 4062 4063 bThen := s.f.NewBlock(ssa.BlockPlain) 4064 bElse := s.f.NewBlock(ssa.BlockPlain) 4065 bAfter := s.f.NewBlock(ssa.BlockPlain) 4066 4067 b.AddEdgeTo(bThen) 4068 s.startBlock(bThen) 4069 a0 := s.newValue1(cvttab.cvt2F, tt, x) 4070 s.vars[n] = a0 4071 s.endBlock() 4072 bThen.AddEdgeTo(bAfter) 4073 4074 b.AddEdgeTo(bElse) 4075 s.startBlock(bElse) 4076 one := cvttab.one(s, ft, 1) 4077 y := s.newValue2(cvttab.and, ft, x, one) 4078 z := s.newValue2(cvttab.rsh, ft, x, one) 4079 z = s.newValue2(cvttab.or, ft, z, y) 4080 a := s.newValue1(cvttab.cvt2F, tt, z) 4081 a1 := s.newValue2(cvttab.add, tt, a, a) 4082 s.vars[n] = a1 4083 s.endBlock() 4084 bElse.AddEdgeTo(bAfter) 4085 4086 s.startBlock(bAfter) 4087 return s.variable(n, n.Type) 4088 } 4089 4090 type u322fcvtTab struct { 4091 cvtI2F, cvtF2F ssa.Op 4092 } 4093 4094 var u32_f64 = u322fcvtTab{ 4095 cvtI2F: ssa.OpCvt32to64F, 4096 cvtF2F: ssa.OpCopy, 4097 } 4098 4099 var u32_f32 = u322fcvtTab{ 4100 cvtI2F: ssa.OpCvt32to32F, 4101 cvtF2F: ssa.OpCvt64Fto32F, 4102 } 4103 4104 func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4105 return s.uint32Tofloat(&u32_f64, n, x, ft, tt) 4106 } 4107 4108 func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4109 return s.uint32Tofloat(&u32_f32, n, x, ft, tt) 4110 } 4111 4112 func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4113 // if x >= 0 { 4114 // result = floatY(x) 4115 // } else { 4116 // result = floatY(float64(x) + (1<<32)) 4117 // } 4118 cmp := s.newValue2(ssa.OpGeq32, types.Types[TBOOL], x, s.zeroVal(ft)) 4119 b := s.endBlock() 4120 b.Kind = ssa.BlockIf 4121 b.SetControl(cmp) 4122 b.Likely = ssa.BranchLikely 4123 4124 bThen := s.f.NewBlock(ssa.BlockPlain) 4125 bElse := s.f.NewBlock(ssa.BlockPlain) 4126 bAfter := s.f.NewBlock(ssa.BlockPlain) 4127 4128 b.AddEdgeTo(bThen) 4129 s.startBlock(bThen) 4130 a0 := s.newValue1(cvttab.cvtI2F, tt, x) 4131 s.vars[n] = a0 4132 s.endBlock() 4133 bThen.AddEdgeTo(bAfter) 4134 4135 b.AddEdgeTo(bElse) 4136 s.startBlock(bElse) 4137 a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[TFLOAT64], x) 4138 twoToThe32 := s.constFloat64(types.Types[TFLOAT64], float64(1<<32)) 4139 a2 := s.newValue2(ssa.OpAdd64F, types.Types[TFLOAT64], a1, twoToThe32) 4140 a3 := s.newValue1(cvttab.cvtF2F, tt, a2) 4141 4142 s.vars[n] = a3 4143 s.endBlock() 4144 bElse.AddEdgeTo(bAfter) 4145 4146 s.startBlock(bAfter) 4147 return s.variable(n, n.Type) 4148 } 4149 4150 // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels. 4151 func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value { 4152 if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() { 4153 s.Fatalf("node must be a map or a channel") 4154 } 4155 // if n == nil { 4156 // return 0 4157 // } else { 4158 // // len 4159 // return *((*int)n) 4160 // // cap 4161 // return *(((*int)n)+1) 4162 // } 4163 lenType := n.Type 4164 nilValue := s.constNil(types.Types[TUINTPTR]) 4165 cmp := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], x, nilValue) 4166 b := s.endBlock() 4167 b.Kind = ssa.BlockIf 4168 b.SetControl(cmp) 4169 b.Likely = ssa.BranchUnlikely 4170 4171 bThen := s.f.NewBlock(ssa.BlockPlain) 4172 bElse := s.f.NewBlock(ssa.BlockPlain) 4173 bAfter := s.f.NewBlock(ssa.BlockPlain) 4174 4175 // length/capacity of a nil map/chan is zero 4176 b.AddEdgeTo(bThen) 4177 s.startBlock(bThen) 4178 s.vars[n] = s.zeroVal(lenType) 4179 s.endBlock() 4180 bThen.AddEdgeTo(bAfter) 4181 4182 b.AddEdgeTo(bElse) 4183 s.startBlock(bElse) 4184 switch n.Op { 4185 case OLEN: 4186 // length is stored in the first word for map/chan 4187 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem()) 4188 case OCAP: 4189 // capacity is stored in the second word for chan 4190 sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x) 4191 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem()) 4192 default: 4193 s.Fatalf("op must be OLEN or OCAP") 4194 } 4195 s.endBlock() 4196 bElse.AddEdgeTo(bAfter) 4197 4198 s.startBlock(bAfter) 4199 return s.variable(n, lenType) 4200 } 4201 4202 type f2uCvtTab struct { 4203 ltf, cvt2U, subf, or ssa.Op 4204 floatValue func(*state, *types.Type, float64) *ssa.Value 4205 intValue func(*state, *types.Type, int64) *ssa.Value 4206 cutoff uint64 4207 } 4208 4209 var f32_u64 = f2uCvtTab{ 4210 ltf: ssa.OpLess32F, 4211 cvt2U: ssa.OpCvt32Fto64, 4212 subf: ssa.OpSub32F, 4213 or: ssa.OpOr64, 4214 floatValue: (*state).constFloat32, 4215 intValue: (*state).constInt64, 4216 cutoff: 9223372036854775808, 4217 } 4218 4219 var f64_u64 = f2uCvtTab{ 4220 ltf: ssa.OpLess64F, 4221 cvt2U: ssa.OpCvt64Fto64, 4222 subf: ssa.OpSub64F, 4223 or: ssa.OpOr64, 4224 floatValue: (*state).constFloat64, 4225 intValue: (*state).constInt64, 4226 cutoff: 9223372036854775808, 4227 } 4228 4229 var f32_u32 = f2uCvtTab{ 4230 ltf: ssa.OpLess32F, 4231 cvt2U: ssa.OpCvt32Fto32, 4232 subf: ssa.OpSub32F, 4233 or: ssa.OpOr32, 4234 floatValue: (*state).constFloat32, 4235 intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) }, 4236 cutoff: 2147483648, 4237 } 4238 4239 var f64_u32 = f2uCvtTab{ 4240 ltf: ssa.OpLess64F, 4241 cvt2U: ssa.OpCvt64Fto32, 4242 subf: ssa.OpSub64F, 4243 or: ssa.OpOr32, 4244 floatValue: (*state).constFloat64, 4245 intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) }, 4246 cutoff: 2147483648, 4247 } 4248 4249 func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4250 return s.floatToUint(&f32_u64, n, x, ft, tt) 4251 } 4252 func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4253 return s.floatToUint(&f64_u64, n, x, ft, tt) 4254 } 4255 4256 func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4257 return s.floatToUint(&f32_u32, n, x, ft, tt) 4258 } 4259 4260 func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4261 return s.floatToUint(&f64_u32, n, x, ft, tt) 4262 } 4263 4264 func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4265 // cutoff:=1<<(intY_Size-1) 4266 // if x < floatX(cutoff) { 4267 // result = uintY(x) 4268 // } else { 4269 // y = x - floatX(cutoff) 4270 // z = uintY(y) 4271 // result = z | -(cutoff) 4272 // } 4273 cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff)) 4274 cmp := s.newValue2(cvttab.ltf, types.Types[TBOOL], x, cutoff) 4275 b := s.endBlock() 4276 b.Kind = ssa.BlockIf 4277 b.SetControl(cmp) 4278 b.Likely = ssa.BranchLikely 4279 4280 bThen := s.f.NewBlock(ssa.BlockPlain) 4281 bElse := s.f.NewBlock(ssa.BlockPlain) 4282 bAfter := s.f.NewBlock(ssa.BlockPlain) 4283 4284 b.AddEdgeTo(bThen) 4285 s.startBlock(bThen) 4286 a0 := s.newValue1(cvttab.cvt2U, tt, x) 4287 s.vars[n] = a0 4288 s.endBlock() 4289 bThen.AddEdgeTo(bAfter) 4290 4291 b.AddEdgeTo(bElse) 4292 s.startBlock(bElse) 4293 y := s.newValue2(cvttab.subf, ft, x, cutoff) 4294 y = s.newValue1(cvttab.cvt2U, tt, y) 4295 z := cvttab.intValue(s, tt, int64(-cvttab.cutoff)) 4296 a1 := s.newValue2(cvttab.or, tt, y, z) 4297 s.vars[n] = a1 4298 s.endBlock() 4299 bElse.AddEdgeTo(bAfter) 4300 4301 s.startBlock(bAfter) 4302 return s.variable(n, n.Type) 4303 } 4304 4305 // dottype generates SSA for a type assertion node. 4306 // commaok indicates whether to panic or return a bool. 4307 // If commaok is false, resok will be nil. 4308 func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { 4309 iface := s.expr(n.Left) // input interface 4310 target := s.expr(n.Right) // target type 4311 byteptr := s.f.Config.Types.BytePtr 4312 4313 if n.Type.IsInterface() { 4314 if n.Type.IsEmptyInterface() { 4315 // Converting to an empty interface. 4316 // Input could be an empty or nonempty interface. 4317 if Debug_typeassert > 0 { 4318 Warnl(n.Pos, "type assertion inlined") 4319 } 4320 4321 // Get itab/type field from input. 4322 itab := s.newValue1(ssa.OpITab, byteptr, iface) 4323 // Conversion succeeds iff that field is not nil. 4324 cond := s.newValue2(ssa.OpNeqPtr, types.Types[TBOOL], itab, s.constNil(byteptr)) 4325 4326 if n.Left.Type.IsEmptyInterface() && commaok { 4327 // Converting empty interface to empty interface with ,ok is just a nil check. 4328 return iface, cond 4329 } 4330 4331 // Branch on nilness. 4332 b := s.endBlock() 4333 b.Kind = ssa.BlockIf 4334 b.SetControl(cond) 4335 b.Likely = ssa.BranchLikely 4336 bOk := s.f.NewBlock(ssa.BlockPlain) 4337 bFail := s.f.NewBlock(ssa.BlockPlain) 4338 b.AddEdgeTo(bOk) 4339 b.AddEdgeTo(bFail) 4340 4341 if !commaok { 4342 // On failure, panic by calling panicnildottype. 4343 s.startBlock(bFail) 4344 s.rtcall(panicnildottype, false, nil, target) 4345 4346 // On success, return (perhaps modified) input interface. 4347 s.startBlock(bOk) 4348 if n.Left.Type.IsEmptyInterface() { 4349 res = iface // Use input interface unchanged. 4350 return 4351 } 4352 // Load type out of itab, build interface with existing idata. 4353 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) 4354 typ := s.newValue2(ssa.OpLoad, byteptr, off, s.mem()) 4355 idata := s.newValue1(ssa.OpIData, n.Type, iface) 4356 res = s.newValue2(ssa.OpIMake, n.Type, typ, idata) 4357 return 4358 } 4359 4360 s.startBlock(bOk) 4361 // nonempty -> empty 4362 // Need to load type from itab 4363 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) 4364 s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem()) 4365 s.endBlock() 4366 4367 // itab is nil, might as well use that as the nil result. 4368 s.startBlock(bFail) 4369 s.vars[&typVar] = itab 4370 s.endBlock() 4371 4372 // Merge point. 4373 bEnd := s.f.NewBlock(ssa.BlockPlain) 4374 bOk.AddEdgeTo(bEnd) 4375 bFail.AddEdgeTo(bEnd) 4376 s.startBlock(bEnd) 4377 idata := s.newValue1(ssa.OpIData, n.Type, iface) 4378 res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata) 4379 resok = cond 4380 delete(s.vars, &typVar) 4381 return 4382 } 4383 // converting to a nonempty interface needs a runtime call. 4384 if Debug_typeassert > 0 { 4385 Warnl(n.Pos, "type assertion not inlined") 4386 } 4387 if n.Left.Type.IsEmptyInterface() { 4388 if commaok { 4389 call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface) 4390 return call[0], call[1] 4391 } 4392 return s.rtcall(assertE2I, true, []*types.Type{n.Type}, target, iface)[0], nil 4393 } 4394 if commaok { 4395 call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface) 4396 return call[0], call[1] 4397 } 4398 return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil 4399 } 4400 4401 if Debug_typeassert > 0 { 4402 Warnl(n.Pos, "type assertion inlined") 4403 } 4404 4405 // Converting to a concrete type. 4406 direct := isdirectiface(n.Type) 4407 itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface 4408 if Debug_typeassert > 0 { 4409 Warnl(n.Pos, "type assertion inlined") 4410 } 4411 var targetITab *ssa.Value 4412 if n.Left.Type.IsEmptyInterface() { 4413 // Looking for pointer to target type. 4414 targetITab = target 4415 } else { 4416 // Looking for pointer to itab for target type and source interface. 4417 targetITab = s.expr(n.List.First()) 4418 } 4419 4420 var tmp *Node // temporary for use with large types 4421 var addr *ssa.Value // address of tmp 4422 if commaok && !canSSAType(n.Type) { 4423 // unSSAable type, use temporary. 4424 // TODO: get rid of some of these temporaries. 4425 tmp = tempAt(n.Pos, s.curfn, n.Type) 4426 addr = s.addr(tmp, false) 4427 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem()) 4428 } 4429 4430 cond := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], itab, targetITab) 4431 b := s.endBlock() 4432 b.Kind = ssa.BlockIf 4433 b.SetControl(cond) 4434 b.Likely = ssa.BranchLikely 4435 4436 bOk := s.f.NewBlock(ssa.BlockPlain) 4437 bFail := s.f.NewBlock(ssa.BlockPlain) 4438 b.AddEdgeTo(bOk) 4439 b.AddEdgeTo(bFail) 4440 4441 if !commaok { 4442 // on failure, panic by calling panicdottype 4443 s.startBlock(bFail) 4444 taddr := s.expr(n.Right.Right) 4445 if n.Left.Type.IsEmptyInterface() { 4446 s.rtcall(panicdottypeE, false, nil, itab, target, taddr) 4447 } else { 4448 s.rtcall(panicdottypeI, false, nil, itab, target, taddr) 4449 } 4450 4451 // on success, return data from interface 4452 s.startBlock(bOk) 4453 if direct { 4454 return s.newValue1(ssa.OpIData, n.Type, iface), nil 4455 } 4456 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) 4457 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()), nil 4458 } 4459 4460 // commaok is the more complicated case because we have 4461 // a control flow merge point. 4462 bEnd := s.f.NewBlock(ssa.BlockPlain) 4463 // Note that we need a new valVar each time (unlike okVar where we can 4464 // reuse the variable) because it might have a different type every time. 4465 valVar := &Node{Op: ONAME, Sym: &types.Sym{Name: "val"}} 4466 4467 // type assertion succeeded 4468 s.startBlock(bOk) 4469 if tmp == nil { 4470 if direct { 4471 s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface) 4472 } else { 4473 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) 4474 s.vars[valVar] = s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 4475 } 4476 } else { 4477 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) 4478 store := s.newValue3I(ssa.OpMove, types.TypeMem, n.Type.Size(), addr, p, s.mem()) 4479 store.Aux = n.Type 4480 s.vars[&memVar] = store 4481 } 4482 s.vars[&okVar] = s.constBool(true) 4483 s.endBlock() 4484 bOk.AddEdgeTo(bEnd) 4485 4486 // type assertion failed 4487 s.startBlock(bFail) 4488 if tmp == nil { 4489 s.vars[valVar] = s.zeroVal(n.Type) 4490 } else { 4491 store := s.newValue2I(ssa.OpZero, types.TypeMem, n.Type.Size(), addr, s.mem()) 4492 store.Aux = n.Type 4493 s.vars[&memVar] = store 4494 } 4495 s.vars[&okVar] = s.constBool(false) 4496 s.endBlock() 4497 bFail.AddEdgeTo(bEnd) 4498 4499 // merge point 4500 s.startBlock(bEnd) 4501 if tmp == nil { 4502 res = s.variable(valVar, n.Type) 4503 delete(s.vars, valVar) 4504 } else { 4505 res = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 4506 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem()) 4507 } 4508 resok = s.variable(&okVar, types.Types[TBOOL]) 4509 delete(s.vars, &okVar) 4510 return res, resok 4511 } 4512 4513 // variable returns the value of a variable at the current location. 4514 func (s *state) variable(name *Node, t *types.Type) *ssa.Value { 4515 v := s.vars[name] 4516 if v != nil { 4517 return v 4518 } 4519 v = s.fwdVars[name] 4520 if v != nil { 4521 return v 4522 } 4523 4524 if s.curBlock == s.f.Entry { 4525 // No variable should be live at entry. 4526 s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, name, v) 4527 } 4528 // Make a FwdRef, which records a value that's live on block input. 4529 // We'll find the matching definition as part of insertPhis. 4530 v = s.newValue0A(ssa.OpFwdRef, t, name) 4531 s.fwdVars[name] = v 4532 s.addNamedValue(name, v) 4533 return v 4534 } 4535 4536 func (s *state) mem() *ssa.Value { 4537 return s.variable(&memVar, types.TypeMem) 4538 } 4539 4540 func (s *state) addNamedValue(n *Node, v *ssa.Value) { 4541 if n.Class() == Pxxx { 4542 // Don't track our dummy nodes (&memVar etc.). 4543 return 4544 } 4545 if n.IsAutoTmp() { 4546 // Don't track temporary variables. 4547 return 4548 } 4549 if n.Class() == PPARAMOUT { 4550 // Don't track named output values. This prevents return values 4551 // from being assigned too early. See #14591 and #14762. TODO: allow this. 4552 return 4553 } 4554 if n.Class() == PAUTO && n.Xoffset != 0 { 4555 s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset) 4556 } 4557 loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0} 4558 values, ok := s.f.NamedValues[loc] 4559 if !ok { 4560 s.f.Names = append(s.f.Names, loc) 4561 } 4562 s.f.NamedValues[loc] = append(values, v) 4563 } 4564 4565 // Branch is an unresolved branch. 4566 type Branch struct { 4567 P *obj.Prog // branch instruction 4568 B *ssa.Block // target 4569 } 4570 4571 // SSAGenState contains state needed during Prog generation. 4572 type SSAGenState struct { 4573 pp *Progs 4574 4575 // Branches remembers all the branch instructions we've seen 4576 // and where they would like to go. 4577 Branches []Branch 4578 4579 // bstart remembers where each block starts (indexed by block ID) 4580 bstart []*obj.Prog 4581 4582 // 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?) 4583 SSEto387 map[int16]int16 4584 // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8. 4585 ScratchFpMem *Node 4586 4587 maxarg int64 // largest frame size for arguments to calls made by the function 4588 4589 // Map from GC safe points to stack map index, generated by 4590 // liveness analysis. 4591 stackMapIndex map[*ssa.Value]int 4592 } 4593 4594 // Prog appends a new Prog. 4595 func (s *SSAGenState) Prog(as obj.As) *obj.Prog { 4596 return s.pp.Prog(as) 4597 } 4598 4599 // Pc returns the current Prog. 4600 func (s *SSAGenState) Pc() *obj.Prog { 4601 return s.pp.next 4602 } 4603 4604 // SetPos sets the current source position. 4605 func (s *SSAGenState) SetPos(pos src.XPos) { 4606 s.pp.pos = pos 4607 } 4608 4609 // DebugFriendlySetPos sets the position subject to heuristics 4610 // that reduce "jumpy" line number churn when debugging. 4611 // Spill/fill/copy instructions from the register allocator, 4612 // phi functions, and instructions with a no-pos position 4613 // are examples of instructions that can cause churn. 4614 func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) { 4615 // The two choices here are either to leave lineno unchanged, 4616 // or to explicitly set it to src.NoXPos. Leaving it unchanged 4617 // (reusing the preceding line number) produces slightly better- 4618 // looking assembly language output from the compiler, and is 4619 // expected by some already-existing tests. 4620 // The debug information appears to be the same in either case 4621 switch v.Op { 4622 case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg: 4623 // leave the position unchanged from beginning of block 4624 // or previous line number. 4625 default: 4626 if v.Pos != src.NoXPos { 4627 s.SetPos(v.Pos) 4628 } 4629 } 4630 } 4631 4632 // genssa appends entries to pp for each instruction in f. 4633 func genssa(f *ssa.Func, pp *Progs) { 4634 var s SSAGenState 4635 4636 e := f.Frontend().(*ssafn) 4637 4638 s.stackMapIndex = liveness(e, f) 4639 4640 // Remember where each block starts. 4641 s.bstart = make([]*obj.Prog, f.NumBlocks()) 4642 s.pp = pp 4643 var progToValue map[*obj.Prog]*ssa.Value 4644 var progToBlock map[*obj.Prog]*ssa.Block 4645 var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point. 4646 var logProgs = e.log 4647 if logProgs { 4648 progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues()) 4649 progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks()) 4650 f.Logf("genssa %s\n", f.Name) 4651 progToBlock[s.pp.next] = f.Blocks[0] 4652 } 4653 4654 if thearch.Use387 { 4655 s.SSEto387 = map[int16]int16{} 4656 } 4657 4658 s.ScratchFpMem = e.scratchFpMem 4659 4660 if Ctxt.Flag_locationlists { 4661 if cap(f.Cache.ValueToProgAfter) < f.NumValues() { 4662 f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues()) 4663 } 4664 valueToProgAfter = f.Cache.ValueToProgAfter[:f.NumValues()] 4665 for i := range valueToProgAfter { 4666 valueToProgAfter[i] = nil 4667 } 4668 } 4669 4670 // Emit basic blocks 4671 for i, b := range f.Blocks { 4672 s.bstart[b.ID] = s.pp.next 4673 4674 // Emit values in block 4675 thearch.SSAMarkMoves(&s, b) 4676 for _, v := range b.Values { 4677 x := s.pp.next 4678 s.DebugFriendlySetPosFrom(v) 4679 switch v.Op { 4680 case ssa.OpInitMem: 4681 // memory arg needs no code 4682 case ssa.OpArg: 4683 // input args need no code 4684 case ssa.OpSP, ssa.OpSB: 4685 // nothing to do 4686 case ssa.OpSelect0, ssa.OpSelect1: 4687 // nothing to do 4688 case ssa.OpGetG: 4689 // nothing to do when there's a g register, 4690 // and checkLower complains if there's not 4691 case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive: 4692 // nothing to do; already used by liveness 4693 case ssa.OpVarKill: 4694 // Zero variable if it is ambiguously live. 4695 // After the VARKILL anything this variable references 4696 // might be collected. If it were to become live again later, 4697 // the GC will see references to already-collected objects. 4698 // See issue 20029. 4699 n := v.Aux.(*Node) 4700 if n.Name.Needzero() { 4701 if n.Class() != PAUTO { 4702 v.Fatalf("zero of variable which isn't PAUTO %v", n) 4703 } 4704 if n.Type.Size()%int64(Widthptr) != 0 { 4705 v.Fatalf("zero of variable not a multiple of ptr size %v", n) 4706 } 4707 thearch.ZeroAuto(s.pp, n) 4708 } 4709 case ssa.OpPhi: 4710 CheckLoweredPhi(v) 4711 default: 4712 // let the backend handle it 4713 thearch.SSAGenValue(&s, v) 4714 } 4715 4716 if Ctxt.Flag_locationlists { 4717 valueToProgAfter[v.ID] = s.pp.next 4718 } 4719 4720 if logProgs { 4721 for ; x != s.pp.next; x = x.Link { 4722 progToValue[x] = v 4723 } 4724 } 4725 } 4726 4727 // Emit control flow instructions for block 4728 var next *ssa.Block 4729 if i < len(f.Blocks)-1 && Debug['N'] == 0 { 4730 // If -N, leave next==nil so every block with successors 4731 // ends in a JMP (except call blocks - plive doesn't like 4732 // select{send,recv} followed by a JMP call). Helps keep 4733 // line numbers for otherwise empty blocks. 4734 next = f.Blocks[i+1] 4735 } 4736 x := s.pp.next 4737 s.SetPos(b.Pos) 4738 thearch.SSAGenBlock(&s, b, next) 4739 if logProgs { 4740 for ; x != s.pp.next; x = x.Link { 4741 progToBlock[x] = b 4742 } 4743 } 4744 } 4745 4746 if Ctxt.Flag_locationlists { 4747 e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(Ctxt, f, Debug_locationlist > 1, stackOffset) 4748 bstart := s.bstart 4749 // Note that at this moment, Prog.Pc is a sequence number; it's 4750 // not a real PC until after assembly, so this mapping has to 4751 // be done later. 4752 e.curfn.Func.DebugInfo.GetPC = func(b, v ssa.ID) int64 { 4753 switch v { 4754 case ssa.BlockStart.ID: 4755 return int64(bstart[b].Pc) 4756 case ssa.BlockEnd.ID: 4757 return int64(e.curfn.Func.lsym.Size) 4758 default: 4759 return int64(valueToProgAfter[v].Pc) 4760 } 4761 } 4762 } 4763 4764 // Resolve branches 4765 for _, br := range s.Branches { 4766 br.P.To.Val = s.bstart[br.B.ID] 4767 } 4768 4769 if logProgs { 4770 filename := "" 4771 for p := pp.Text; p != nil; p = p.Link { 4772 if p.Pos.IsKnown() && p.InnermostFilename() != filename { 4773 filename = p.InnermostFilename() 4774 f.Logf("# %s\n", filename) 4775 } 4776 4777 var s string 4778 if v, ok := progToValue[p]; ok { 4779 s = v.String() 4780 } else if b, ok := progToBlock[p]; ok { 4781 s = b.String() 4782 } else { 4783 s = " " // most value and branch strings are 2-3 characters long 4784 } 4785 f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString()) 4786 } 4787 if f.HTMLWriter != nil { 4788 // LineHist is defunct now - this code won't do 4789 // anything. 4790 // TODO: fix this (ideally without a global variable) 4791 // saved := pp.Text.Ctxt.LineHist.PrintFilenameOnly 4792 // pp.Text.Ctxt.LineHist.PrintFilenameOnly = true 4793 var buf bytes.Buffer 4794 buf.WriteString("<code>") 4795 buf.WriteString("<dl class=\"ssa-gen\">") 4796 filename := "" 4797 for p := pp.Text; p != nil; p = p.Link { 4798 // Don't spam every line with the file name, which is often huge. 4799 // Only print changes, and "unknown" is not a change. 4800 if p.Pos.IsKnown() && p.InnermostFilename() != filename { 4801 filename = p.InnermostFilename() 4802 buf.WriteString("<dt class=\"ssa-prog-src\"></dt><dd class=\"ssa-prog\">") 4803 buf.WriteString(html.EscapeString("# " + filename)) 4804 buf.WriteString("</dd>") 4805 } 4806 4807 buf.WriteString("<dt class=\"ssa-prog-src\">") 4808 if v, ok := progToValue[p]; ok { 4809 buf.WriteString(v.HTML()) 4810 } else if b, ok := progToBlock[p]; ok { 4811 buf.WriteString("<b>" + b.HTML() + "</b>") 4812 } 4813 buf.WriteString("</dt>") 4814 buf.WriteString("<dd class=\"ssa-prog\">") 4815 buf.WriteString(fmt.Sprintf("%.5d <span class=\"line-number\">(%s)</span> %s", p.Pc, p.InnermostLineNumber(), html.EscapeString(p.InstructionString()))) 4816 buf.WriteString("</dd>") 4817 } 4818 buf.WriteString("</dl>") 4819 buf.WriteString("</code>") 4820 f.HTMLWriter.WriteColumn("genssa", "ssa-prog", buf.String()) 4821 // pp.Text.Ctxt.LineHist.PrintFilenameOnly = saved 4822 } 4823 } 4824 4825 defframe(&s, e) 4826 if Debug['f'] != 0 { 4827 frame(0) 4828 } 4829 4830 f.HTMLWriter.Close() 4831 f.HTMLWriter = nil 4832 } 4833 4834 func defframe(s *SSAGenState, e *ssafn) { 4835 pp := s.pp 4836 4837 frame := Rnd(s.maxarg+e.stksize, int64(Widthreg)) 4838 if thearch.PadFrame != nil { 4839 frame = thearch.PadFrame(frame) 4840 } 4841 4842 // Fill in argument and frame size. 4843 pp.Text.To.Type = obj.TYPE_TEXTSIZE 4844 pp.Text.To.Val = int32(Rnd(e.curfn.Type.ArgWidth(), int64(Widthreg))) 4845 pp.Text.To.Offset = frame 4846 4847 // Insert code to zero ambiguously live variables so that the 4848 // garbage collector only sees initialized values when it 4849 // looks for pointers. 4850 p := pp.Text 4851 var lo, hi int64 4852 4853 // Opaque state for backend to use. Current backends use it to 4854 // keep track of which helper registers have been zeroed. 4855 var state uint32 4856 4857 // Iterate through declarations. They are sorted in decreasing Xoffset order. 4858 for _, n := range e.curfn.Func.Dcl { 4859 if !n.Name.Needzero() { 4860 continue 4861 } 4862 if n.Class() != PAUTO { 4863 Fatalf("needzero class %d", n.Class()) 4864 } 4865 if n.Type.Size()%int64(Widthptr) != 0 || n.Xoffset%int64(Widthptr) != 0 || n.Type.Size() == 0 { 4866 Fatalf("var %L has size %d offset %d", n, n.Type.Size(), n.Xoffset) 4867 } 4868 4869 if lo != hi && n.Xoffset+n.Type.Size() >= lo-int64(2*Widthreg) { 4870 // Merge with range we already have. 4871 lo = n.Xoffset 4872 continue 4873 } 4874 4875 // Zero old range 4876 p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state) 4877 4878 // Set new range. 4879 lo = n.Xoffset 4880 hi = lo + n.Type.Size() 4881 } 4882 4883 // Zero final range. 4884 thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state) 4885 } 4886 4887 type FloatingEQNEJump struct { 4888 Jump obj.As 4889 Index int 4890 } 4891 4892 func (s *SSAGenState) oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump) { 4893 p := s.Prog(jumps.Jump) 4894 p.To.Type = obj.TYPE_BRANCH 4895 p.Pos = b.Pos 4896 to := jumps.Index 4897 s.Branches = append(s.Branches, Branch{p, b.Succs[to].Block()}) 4898 } 4899 4900 func (s *SSAGenState) FPJump(b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) { 4901 switch next { 4902 case b.Succs[0].Block(): 4903 s.oneFPJump(b, &jumps[0][0]) 4904 s.oneFPJump(b, &jumps[0][1]) 4905 case b.Succs[1].Block(): 4906 s.oneFPJump(b, &jumps[1][0]) 4907 s.oneFPJump(b, &jumps[1][1]) 4908 default: 4909 s.oneFPJump(b, &jumps[1][0]) 4910 s.oneFPJump(b, &jumps[1][1]) 4911 q := s.Prog(obj.AJMP) 4912 q.Pos = b.Pos 4913 q.To.Type = obj.TYPE_BRANCH 4914 s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()}) 4915 } 4916 } 4917 4918 func AuxOffset(v *ssa.Value) (offset int64) { 4919 if v.Aux == nil { 4920 return 0 4921 } 4922 n, ok := v.Aux.(*Node) 4923 if !ok { 4924 v.Fatalf("bad aux type in %s\n", v.LongString()) 4925 } 4926 if n.Class() == PAUTO { 4927 return n.Xoffset 4928 } 4929 return 0 4930 } 4931 4932 // AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a. 4933 func AddAux(a *obj.Addr, v *ssa.Value) { 4934 AddAux2(a, v, v.AuxInt) 4935 } 4936 func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { 4937 if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR { 4938 v.Fatalf("bad AddAux addr %v", a) 4939 } 4940 // add integer offset 4941 a.Offset += offset 4942 4943 // If no additional symbol offset, we're done. 4944 if v.Aux == nil { 4945 return 4946 } 4947 // Add symbol's offset from its base register. 4948 switch n := v.Aux.(type) { 4949 case *obj.LSym: 4950 a.Name = obj.NAME_EXTERN 4951 a.Sym = n 4952 case *Node: 4953 if n.Class() == PPARAM || n.Class() == PPARAMOUT { 4954 a.Name = obj.NAME_PARAM 4955 a.Sym = n.Orig.Sym.Linksym() 4956 a.Offset += n.Xoffset 4957 break 4958 } 4959 a.Name = obj.NAME_AUTO 4960 a.Sym = n.Sym.Linksym() 4961 a.Offset += n.Xoffset 4962 default: 4963 v.Fatalf("aux in %s not implemented %#v", v, v.Aux) 4964 } 4965 } 4966 4967 // extendIndex extends v to a full int width. 4968 // panic using the given function if v does not fit in an int (only on 32-bit archs). 4969 func (s *state) extendIndex(v *ssa.Value, panicfn *obj.LSym) *ssa.Value { 4970 size := v.Type.Size() 4971 if size == s.config.PtrSize { 4972 return v 4973 } 4974 if size > s.config.PtrSize { 4975 // truncate 64-bit indexes on 32-bit pointer archs. Test the 4976 // high word and branch to out-of-bounds failure if it is not 0. 4977 if Debug['B'] == 0 { 4978 hi := s.newValue1(ssa.OpInt64Hi, types.Types[TUINT32], v) 4979 cmp := s.newValue2(ssa.OpEq32, types.Types[TBOOL], hi, s.constInt32(types.Types[TUINT32], 0)) 4980 s.check(cmp, panicfn) 4981 } 4982 return s.newValue1(ssa.OpTrunc64to32, types.Types[TINT], v) 4983 } 4984 4985 // Extend value to the required size 4986 var op ssa.Op 4987 if v.Type.IsSigned() { 4988 switch 10*size + s.config.PtrSize { 4989 case 14: 4990 op = ssa.OpSignExt8to32 4991 case 18: 4992 op = ssa.OpSignExt8to64 4993 case 24: 4994 op = ssa.OpSignExt16to32 4995 case 28: 4996 op = ssa.OpSignExt16to64 4997 case 48: 4998 op = ssa.OpSignExt32to64 4999 default: 5000 s.Fatalf("bad signed index extension %s", v.Type) 5001 } 5002 } else { 5003 switch 10*size + s.config.PtrSize { 5004 case 14: 5005 op = ssa.OpZeroExt8to32 5006 case 18: 5007 op = ssa.OpZeroExt8to64 5008 case 24: 5009 op = ssa.OpZeroExt16to32 5010 case 28: 5011 op = ssa.OpZeroExt16to64 5012 case 48: 5013 op = ssa.OpZeroExt32to64 5014 default: 5015 s.Fatalf("bad unsigned index extension %s", v.Type) 5016 } 5017 } 5018 return s.newValue1(op, types.Types[TINT], v) 5019 } 5020 5021 // CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values. 5022 // Called during ssaGenValue. 5023 func CheckLoweredPhi(v *ssa.Value) { 5024 if v.Op != ssa.OpPhi { 5025 v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString()) 5026 } 5027 if v.Type.IsMemory() { 5028 return 5029 } 5030 f := v.Block.Func 5031 loc := f.RegAlloc[v.ID] 5032 for _, a := range v.Args { 5033 if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead? 5034 v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func) 5035 } 5036 } 5037 } 5038 5039 // CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block. 5040 // The output of LoweredGetClosurePtr is generally hardwired to the correct register. 5041 // That register contains the closure pointer on closure entry. 5042 func CheckLoweredGetClosurePtr(v *ssa.Value) { 5043 entry := v.Block.Func.Entry 5044 if entry != v.Block || entry.Values[0] != v { 5045 Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v) 5046 } 5047 } 5048 5049 // AutoVar returns a *Node and int64 representing the auto variable and offset within it 5050 // where v should be spilled. 5051 func AutoVar(v *ssa.Value) (*Node, int64) { 5052 loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot) 5053 if v.Type.Size() > loc.Type.Size() { 5054 v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) 5055 } 5056 return loc.N.(*Node), loc.Off 5057 } 5058 5059 func AddrAuto(a *obj.Addr, v *ssa.Value) { 5060 n, off := AutoVar(v) 5061 a.Type = obj.TYPE_MEM 5062 a.Sym = n.Sym.Linksym() 5063 a.Reg = int16(thearch.REGSP) 5064 a.Offset = n.Xoffset + off 5065 if n.Class() == PPARAM || n.Class() == PPARAMOUT { 5066 a.Name = obj.NAME_PARAM 5067 } else { 5068 a.Name = obj.NAME_AUTO 5069 } 5070 } 5071 5072 func (s *SSAGenState) AddrScratch(a *obj.Addr) { 5073 if s.ScratchFpMem == nil { 5074 panic("no scratch memory available; forgot to declare usesScratch for Op?") 5075 } 5076 a.Type = obj.TYPE_MEM 5077 a.Name = obj.NAME_AUTO 5078 a.Sym = s.ScratchFpMem.Sym.Linksym() 5079 a.Reg = int16(thearch.REGSP) 5080 a.Offset = s.ScratchFpMem.Xoffset 5081 } 5082 5083 func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog { 5084 idx, ok := s.stackMapIndex[v] 5085 if !ok { 5086 Fatalf("missing stack map index for %v", v.LongString()) 5087 } 5088 p := s.Prog(obj.APCDATA) 5089 Addrconst(&p.From, objabi.PCDATA_StackMapIndex) 5090 Addrconst(&p.To, int64(idx)) 5091 5092 if sym, _ := v.Aux.(*obj.LSym); sym == Deferreturn { 5093 // Deferred calls will appear to be returning to 5094 // the CALL deferreturn(SB) that we are about to emit. 5095 // However, the stack trace code will show the line 5096 // of the instruction byte before the return PC. 5097 // To avoid that being an unrelated instruction, 5098 // insert an actual hardware NOP that will have the right line number. 5099 // This is different from obj.ANOP, which is a virtual no-op 5100 // that doesn't make it into the instruction stream. 5101 thearch.Ginsnop(s.pp) 5102 } 5103 5104 p = s.Prog(obj.ACALL) 5105 if sym, ok := v.Aux.(*obj.LSym); ok { 5106 p.To.Type = obj.TYPE_MEM 5107 p.To.Name = obj.NAME_EXTERN 5108 p.To.Sym = sym 5109 5110 // Record call graph information for nowritebarrierrec 5111 // analysis. 5112 if nowritebarrierrecCheck != nil { 5113 nowritebarrierrecCheck.recordCall(s.pp.curfn, sym, v.Pos) 5114 } 5115 } else { 5116 // TODO(mdempsky): Can these differences be eliminated? 5117 switch thearch.LinkArch.Family { 5118 case sys.AMD64, sys.I386, sys.PPC64, sys.S390X: 5119 p.To.Type = obj.TYPE_REG 5120 case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64: 5121 p.To.Type = obj.TYPE_MEM 5122 default: 5123 Fatalf("unknown indirect call family") 5124 } 5125 p.To.Reg = v.Args[0].Reg() 5126 } 5127 if s.maxarg < v.AuxInt { 5128 s.maxarg = v.AuxInt 5129 } 5130 return p 5131 } 5132 5133 // fieldIdx finds the index of the field referred to by the ODOT node n. 5134 func fieldIdx(n *Node) int { 5135 t := n.Left.Type 5136 f := n.Sym 5137 if !t.IsStruct() { 5138 panic("ODOT's LHS is not a struct") 5139 } 5140 5141 var i int 5142 for _, t1 := range t.Fields().Slice() { 5143 if t1.Sym != f { 5144 i++ 5145 continue 5146 } 5147 if t1.Offset != n.Xoffset { 5148 panic("field offset doesn't match") 5149 } 5150 return i 5151 } 5152 panic(fmt.Sprintf("can't find field in expr %v\n", n)) 5153 5154 // TODO: keep the result of this function somewhere in the ODOT Node 5155 // so we don't have to recompute it each time we need it. 5156 } 5157 5158 // ssafn holds frontend information about a function that the backend is processing. 5159 // It also exports a bunch of compiler services for the ssa backend. 5160 type ssafn struct { 5161 curfn *Node 5162 strings map[string]interface{} // map from constant string to data symbols 5163 scratchFpMem *Node // temp for floating point register / memory moves on some architectures 5164 stksize int64 // stack size for current frame 5165 stkptrsize int64 // prefix of stack containing pointers 5166 log bool 5167 } 5168 5169 // StringData returns a symbol (a *types.Sym wrapped in an interface) which 5170 // is the data component of a global string constant containing s. 5171 func (e *ssafn) StringData(s string) interface{} { 5172 if aux, ok := e.strings[s]; ok { 5173 return aux 5174 } 5175 if e.strings == nil { 5176 e.strings = make(map[string]interface{}) 5177 } 5178 data := stringsym(e.curfn.Pos, s) 5179 e.strings[s] = data 5180 return data 5181 } 5182 5183 func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode { 5184 n := tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list 5185 return n 5186 } 5187 5188 func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 5189 n := name.N.(*Node) 5190 ptrType := types.NewPtr(types.Types[TUINT8]) 5191 lenType := types.Types[TINT] 5192 if n.Class() == PAUTO && !n.Addrtaken() { 5193 // Split this string up into two separate variables. 5194 p := e.splitSlot(&name, ".ptr", 0, ptrType) 5195 l := e.splitSlot(&name, ".len", ptrType.Size(), lenType) 5196 return p, l 5197 } 5198 // Return the two parts of the larger variable. 5199 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)} 5200 } 5201 5202 func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 5203 n := name.N.(*Node) 5204 t := types.NewPtr(types.Types[TUINT8]) 5205 if n.Class() == PAUTO && !n.Addrtaken() { 5206 // Split this interface up into two separate variables. 5207 f := ".itab" 5208 if n.Type.IsEmptyInterface() { 5209 f = ".type" 5210 } 5211 c := e.splitSlot(&name, f, 0, t) 5212 d := e.splitSlot(&name, ".data", t.Size(), t) 5213 return c, d 5214 } 5215 // Return the two parts of the larger variable. 5216 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)} 5217 } 5218 5219 func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) { 5220 n := name.N.(*Node) 5221 ptrType := types.NewPtr(name.Type.ElemType()) 5222 lenType := types.Types[TINT] 5223 if n.Class() == PAUTO && !n.Addrtaken() { 5224 // Split this slice up into three separate variables. 5225 p := e.splitSlot(&name, ".ptr", 0, ptrType) 5226 l := e.splitSlot(&name, ".len", ptrType.Size(), lenType) 5227 c := e.splitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType) 5228 return p, l, c 5229 } 5230 // Return the three parts of the larger variable. 5231 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, 5232 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}, 5233 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)} 5234 } 5235 5236 func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 5237 n := name.N.(*Node) 5238 s := name.Type.Size() / 2 5239 var t *types.Type 5240 if s == 8 { 5241 t = types.Types[TFLOAT64] 5242 } else { 5243 t = types.Types[TFLOAT32] 5244 } 5245 if n.Class() == PAUTO && !n.Addrtaken() { 5246 // Split this complex up into two separate variables. 5247 r := e.splitSlot(&name, ".real", 0, t) 5248 i := e.splitSlot(&name, ".imag", t.Size(), t) 5249 return r, i 5250 } 5251 // Return the two parts of the larger variable. 5252 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s} 5253 } 5254 5255 func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 5256 n := name.N.(*Node) 5257 var t *types.Type 5258 if name.Type.IsSigned() { 5259 t = types.Types[TINT32] 5260 } else { 5261 t = types.Types[TUINT32] 5262 } 5263 if n.Class() == PAUTO && !n.Addrtaken() { 5264 // Split this int64 up into two separate variables. 5265 if thearch.LinkArch.ByteOrder == binary.BigEndian { 5266 return e.splitSlot(&name, ".hi", 0, t), e.splitSlot(&name, ".lo", t.Size(), types.Types[TUINT32]) 5267 } 5268 return e.splitSlot(&name, ".hi", t.Size(), t), e.splitSlot(&name, ".lo", 0, types.Types[TUINT32]) 5269 } 5270 // Return the two parts of the larger variable. 5271 if thearch.LinkArch.ByteOrder == binary.BigEndian { 5272 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off + 4} 5273 } 5274 return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off} 5275 } 5276 5277 func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot { 5278 n := name.N.(*Node) 5279 st := name.Type 5280 ft := st.FieldType(i) 5281 var offset int64 5282 for f := 0; f < i; f++ { 5283 offset += st.FieldType(f).Size() 5284 } 5285 if n.Class() == PAUTO && !n.Addrtaken() { 5286 // Note: the _ field may appear several times. But 5287 // have no fear, identically-named but distinct Autos are 5288 // ok, albeit maybe confusing for a debugger. 5289 return e.splitSlot(&name, "."+st.FieldName(i), offset, ft) 5290 } 5291 return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)} 5292 } 5293 5294 func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot { 5295 n := name.N.(*Node) 5296 at := name.Type 5297 if at.NumElem() != 1 { 5298 Fatalf("bad array size") 5299 } 5300 et := at.ElemType() 5301 if n.Class() == PAUTO && !n.Addrtaken() { 5302 return e.splitSlot(&name, "[0]", 0, et) 5303 } 5304 return ssa.LocalSlot{N: n, Type: et, Off: name.Off} 5305 } 5306 5307 func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym { 5308 return itabsym(it, offset) 5309 } 5310 5311 // splitSlot returns a slot representing the data of parent starting at offset. 5312 func (e *ssafn) splitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot { 5313 s := &types.Sym{Name: parent.N.(*Node).Sym.Name + suffix, Pkg: localpkg} 5314 5315 n := &Node{ 5316 Name: new(Name), 5317 Op: ONAME, 5318 Pos: parent.N.(*Node).Pos, 5319 } 5320 n.Orig = n 5321 5322 s.Def = asTypesNode(n) 5323 asNode(s.Def).Name.SetUsed(true) 5324 n.Sym = s 5325 n.Type = t 5326 n.SetClass(PAUTO) 5327 n.SetAddable(true) 5328 n.Esc = EscNever 5329 n.Name.Curfn = e.curfn 5330 e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n) 5331 dowidth(t) 5332 return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset} 5333 } 5334 5335 func (e *ssafn) CanSSA(t *types.Type) bool { 5336 return canSSAType(t) 5337 } 5338 5339 func (e *ssafn) Line(pos src.XPos) string { 5340 return linestr(pos) 5341 } 5342 5343 // Log logs a message from the compiler. 5344 func (e *ssafn) Logf(msg string, args ...interface{}) { 5345 if e.log { 5346 fmt.Printf(msg, args...) 5347 } 5348 } 5349 5350 func (e *ssafn) Log() bool { 5351 return e.log 5352 } 5353 5354 // Fatal reports a compiler error and exits. 5355 func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) { 5356 lineno = pos 5357 Fatalf(msg, args...) 5358 } 5359 5360 // Warnl reports a "warning", which is usually flag-triggered 5361 // logging output for the benefit of tests. 5362 func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) { 5363 Warnl(pos, fmt_, args...) 5364 } 5365 5366 func (e *ssafn) Debug_checknil() bool { 5367 return Debug_checknil != 0 5368 } 5369 5370 func (e *ssafn) UseWriteBarrier() bool { 5371 return use_writebarrier 5372 } 5373 5374 func (e *ssafn) Syslook(name string) *obj.LSym { 5375 switch name { 5376 case "goschedguarded": 5377 return goschedguarded 5378 case "writeBarrier": 5379 return writeBarrier 5380 case "gcWriteBarrier": 5381 return gcWriteBarrier 5382 case "typedmemmove": 5383 return typedmemmove 5384 case "typedmemclr": 5385 return typedmemclr 5386 } 5387 Fatalf("unknown Syslook func %v", name) 5388 return nil 5389 } 5390 5391 func (e *ssafn) SetWBPos(pos src.XPos) { 5392 e.curfn.Func.setWBPos(pos) 5393 } 5394 5395 func (n *Node) Typ() *types.Type { 5396 return n.Type 5397 } 5398 func (n *Node) StorageClass() ssa.StorageClass { 5399 switch n.Class() { 5400 case PPARAM: 5401 return ssa.ClassParam 5402 case PPARAMOUT: 5403 return ssa.ClassParamOut 5404 case PAUTO: 5405 return ssa.ClassAuto 5406 default: 5407 Fatalf("untranslateable storage class for %v: %s", n, n.Class()) 5408 return 0 5409 } 5410 }