github.com/epfl-dcsl/gotee@v0.0.0-20200909122901-014b35f5e5e9/src/cmd/compile/internal/gc/ssa.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "bytes" 9 "encoding/binary" 10 "fmt" 11 "html" 12 "os" 13 "sort" 14 15 "cmd/compile/internal/ssa" 16 "cmd/compile/internal/types" 17 "cmd/internal/obj" 18 "cmd/internal/objabi" 19 "cmd/internal/src" 20 "cmd/internal/sys" 21 ) 22 23 var ssaConfig *ssa.Config 24 var ssaCaches []ssa.Cache 25 26 func initssaconfig() { 27 types_ := ssa.Types{ 28 Bool: types.Types[TBOOL], 29 Int8: types.Types[TINT8], 30 Int16: types.Types[TINT16], 31 Int32: types.Types[TINT32], 32 Int64: types.Types[TINT64], 33 UInt8: types.Types[TUINT8], 34 UInt16: types.Types[TUINT16], 35 UInt32: types.Types[TUINT32], 36 UInt64: types.Types[TUINT64], 37 Float32: types.Types[TFLOAT32], 38 Float64: types.Types[TFLOAT64], 39 Int: types.Types[TINT], 40 UInt: types.Types[TUINT], 41 Uintptr: types.Types[TUINTPTR], 42 String: types.Types[TSTRING], 43 BytePtr: types.NewPtr(types.Types[TUINT8]), 44 Int32Ptr: types.NewPtr(types.Types[TINT32]), 45 UInt32Ptr: types.NewPtr(types.Types[TUINT32]), 46 IntPtr: types.NewPtr(types.Types[TINT]), 47 UintptrPtr: types.NewPtr(types.Types[TUINTPTR]), 48 Float32Ptr: types.NewPtr(types.Types[TFLOAT32]), 49 Float64Ptr: types.NewPtr(types.Types[TFLOAT64]), 50 BytePtrPtr: types.NewPtr(types.NewPtr(types.Types[TUINT8])), 51 } 52 53 if thearch.SoftFloat { 54 softfloatInit() 55 } 56 57 // Generate a few pointer types that are uncommon in the frontend but common in the backend. 58 // Caching is disabled in the backend, so generating these here avoids allocations. 59 _ = types.NewPtr(types.Types[TINTER]) // *interface{} 60 _ = types.NewPtr(types.NewPtr(types.Types[TSTRING])) // **string 61 _ = types.NewPtr(types.NewPtr(types.Idealstring)) // **string 62 _ = types.NewPtr(types.NewSlice(types.Types[TINTER])) // *[]interface{} 63 _ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte 64 _ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte 65 _ = types.NewPtr(types.NewSlice(types.Types[TSTRING])) // *[]string 66 _ = types.NewPtr(types.NewSlice(types.Idealstring)) // *[]string 67 _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8 68 _ = types.NewPtr(types.Types[TINT16]) // *int16 69 _ = types.NewPtr(types.Types[TINT64]) // *int64 70 _ = types.NewPtr(types.Errortype) // *error 71 types.NewPtrCacheEnabled = false 72 ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, types_, Ctxt, Debug['N'] == 0) 73 if thearch.LinkArch.Name == "386" { 74 ssaConfig.Set387(thearch.Use387) 75 } 76 ssaConfig.SoftFloat = thearch.SoftFloat 77 ssaCaches = make([]ssa.Cache, nBackendWorkers) 78 79 // Set up some runtime functions we'll need to call. 80 Newproc = sysfunc("newproc") 81 Deferproc = sysfunc("deferproc") 82 Gosecload = Gosecpkg.Lookup("Gosecload").Linksym() 83 Deferreturn = sysfunc("deferreturn") 84 Duffcopy = sysfunc("duffcopy") 85 Duffzero = sysfunc("duffzero") 86 panicindex = sysfunc("panicindex") 87 panicslice = sysfunc("panicslice") 88 panicdivide = sysfunc("panicdivide") 89 growslice = sysfunc("growslice") 90 panicdottypeE = sysfunc("panicdottypeE") 91 panicdottypeI = sysfunc("panicdottypeI") 92 panicnildottype = sysfunc("panicnildottype") 93 assertE2I = sysfunc("assertE2I") 94 assertE2I2 = sysfunc("assertE2I2") 95 assertI2I = sysfunc("assertI2I") 96 assertI2I2 = sysfunc("assertI2I2") 97 goschedguarded = sysfunc("goschedguarded") 98 writeBarrier = sysfunc("writeBarrier") 99 writebarrierptr = sysfunc("writebarrierptr") 100 gcWriteBarrier = sysfunc("gcWriteBarrier") 101 typedmemmove = sysfunc("typedmemmove") 102 typedmemclr = sysfunc("typedmemclr") 103 Udiv = sysfunc("udiv") 104 105 // GO386=387 runtime functions 106 ControlWord64trunc = sysfunc("controlWord64trunc") 107 ControlWord32 = sysfunc("controlWord32") 108 } 109 110 // buildssa builds an SSA function for fn. 111 // worker indicates which of the backend workers is doing the processing. 112 func buildssa(fn *Node, worker int) *ssa.Func { 113 name := fn.funcname() 114 printssa := name == os.Getenv("GOSSAFUNC") 115 if printssa { 116 fmt.Println("generating SSA for", name) 117 dumplist("buildssa-enter", fn.Func.Enter) 118 dumplist("buildssa-body", fn.Nbody) 119 dumplist("buildssa-exit", fn.Func.Exit) 120 } 121 122 var s state 123 s.pushLine(fn.Pos) 124 defer s.popLine() 125 126 s.hasdefer = fn.Func.HasDefer() 127 if fn.Func.Pragma&CgoUnsafeArgs != 0 { 128 s.cgoUnsafeArgs = true 129 } 130 131 fe := ssafn{ 132 curfn: fn, 133 log: printssa, 134 } 135 s.curfn = fn 136 137 s.f = ssa.NewFunc(&fe) 138 s.config = ssaConfig 139 s.f.Config = ssaConfig 140 s.f.Cache = &ssaCaches[worker] 141 s.f.Cache.Reset() 142 s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH", name) 143 s.f.Name = name 144 if fn.Func.Pragma&Nosplit != 0 { 145 s.f.NoSplit = true 146 } 147 s.exitCode = fn.Func.Exit 148 s.panics = map[funcLine]*ssa.Block{} 149 s.softFloat = s.config.SoftFloat 150 151 if name == os.Getenv("GOSSAFUNC") { 152 s.f.HTMLWriter = ssa.NewHTMLWriter("ssa.html", s.f.Frontend(), name) 153 // TODO: generate and print a mapping from nodes to values and blocks 154 } 155 156 // Allocate starting block 157 s.f.Entry = s.f.NewBlock(ssa.BlockPlain) 158 159 // Allocate starting values 160 s.labels = map[string]*ssaLabel{} 161 s.labeledNodes = map[*Node]*ssaLabel{} 162 s.fwdVars = map[*Node]*ssa.Value{} 163 s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem) 164 s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead 165 s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR]) 166 167 s.startBlock(s.f.Entry) 168 s.vars[&memVar] = s.startmem 169 170 // Generate addresses of local declarations 171 s.decladdrs = map[*Node]*ssa.Value{} 172 for _, n := range fn.Func.Dcl { 173 switch n.Class() { 174 case PPARAM, PPARAMOUT: 175 s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), n, s.sp) 176 if n.Class() == PPARAMOUT && s.canSSA(n) { 177 // Save ssa-able PPARAMOUT variables so we can 178 // store them back to the stack at the end of 179 // the function. 180 s.returns = append(s.returns, n) 181 } 182 case PAUTO: 183 // processed at each use, to prevent Addr coming 184 // before the decl. 185 case PAUTOHEAP: 186 // moved to heap - already handled by frontend 187 case PFUNC: 188 // local function - already handled by frontend 189 default: 190 s.Fatalf("local variable with class %v unimplemented", n.Class()) 191 } 192 } 193 194 // Populate SSAable arguments. 195 for _, n := range fn.Func.Dcl { 196 if n.Class() == PPARAM && s.canSSA(n) { 197 s.vars[n] = s.newValue0A(ssa.OpArg, n.Type, n) 198 } 199 } 200 201 // Convert the AST-based IR to the SSA-based IR 202 s.stmtList(fn.Func.Enter) 203 s.stmtList(fn.Nbody) 204 205 // fallthrough to exit 206 if s.curBlock != nil { 207 s.pushLine(fn.Func.Endlineno) 208 s.exit() 209 s.popLine() 210 } 211 212 for _, b := range s.f.Blocks { 213 if b.Pos != src.NoXPos { 214 s.updateUnsetPredPos(b) 215 } 216 } 217 218 s.insertPhis() 219 220 // Don't carry reference this around longer than necessary 221 s.exitCode = Nodes{} 222 223 // Main call to ssa package to compile function 224 ssa.Compile(s.f) 225 return s.f 226 } 227 228 // updateUnsetPredPos propagates the earliest-value position information for b 229 // towards all of b's predecessors that need a position, and recurs on that 230 // predecessor if its position is updated. B should have a non-empty position. 231 func (s *state) updateUnsetPredPos(b *ssa.Block) { 232 if b.Pos == src.NoXPos { 233 s.Fatalf("Block %s should have a position", b) 234 } 235 bestPos := src.NoXPos 236 for _, e := range b.Preds { 237 p := e.Block() 238 if !p.LackingPos() { 239 continue 240 } 241 if bestPos == src.NoXPos { 242 bestPos = b.Pos 243 for _, v := range b.Values { 244 if v.LackingPos() { 245 continue 246 } 247 if v.Pos != src.NoXPos { 248 // Assume values are still in roughly textual order; 249 // TODO: could also seek minimum position? 250 bestPos = v.Pos 251 break 252 } 253 } 254 } 255 p.Pos = bestPos 256 s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay. 257 } 258 return 259 } 260 261 type state struct { 262 // configuration (arch) information 263 config *ssa.Config 264 265 // function we're building 266 f *ssa.Func 267 268 // Node for function 269 curfn *Node 270 271 // labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f 272 labels map[string]*ssaLabel 273 labeledNodes map[*Node]*ssaLabel 274 275 // Code that must precede any return 276 // (e.g., copying value of heap-escaped paramout back to true paramout) 277 exitCode Nodes 278 279 // unlabeled break and continue statement tracking 280 breakTo *ssa.Block // current target for plain break statement 281 continueTo *ssa.Block // current target for plain continue statement 282 283 // current location where we're interpreting the AST 284 curBlock *ssa.Block 285 286 // variable assignments in the current block (map from variable symbol to ssa value) 287 // *Node is the unique identifier (an ONAME Node) for the variable. 288 // TODO: keep a single varnum map, then make all of these maps slices instead? 289 vars map[*Node]*ssa.Value 290 291 // fwdVars are variables that are used before they are defined in the current block. 292 // This map exists just to coalesce multiple references into a single FwdRef op. 293 // *Node is the unique identifier (an ONAME Node) for the variable. 294 fwdVars map[*Node]*ssa.Value 295 296 // all defined variables at the end of each block. Indexed by block ID. 297 defvars []map[*Node]*ssa.Value 298 299 // addresses of PPARAM and PPARAMOUT variables. 300 decladdrs map[*Node]*ssa.Value 301 302 // starting values. Memory, stack pointer, and globals pointer 303 startmem *ssa.Value 304 sp *ssa.Value 305 sb *ssa.Value 306 307 // line number stack. The current line number is top of stack 308 line []src.XPos 309 // the last line number processed; it may have been popped 310 lastPos src.XPos 311 312 // list of panic calls by function name and line number. 313 // Used to deduplicate panic calls. 314 panics map[funcLine]*ssa.Block 315 316 // list of PPARAMOUT (return) variables. 317 returns []*Node 318 319 cgoUnsafeArgs bool 320 hasdefer bool // whether the function contains a defer statement 321 softFloat bool 322 } 323 324 type funcLine struct { 325 f *obj.LSym 326 base *src.PosBase 327 line uint 328 } 329 330 type ssaLabel struct { 331 target *ssa.Block // block identified by this label 332 breakTarget *ssa.Block // block to break to in control flow node identified by this label 333 continueTarget *ssa.Block // block to continue to in control flow node identified by this label 334 } 335 336 // label returns the label associated with sym, creating it if necessary. 337 func (s *state) label(sym *types.Sym) *ssaLabel { 338 lab := s.labels[sym.Name] 339 if lab == nil { 340 lab = new(ssaLabel) 341 s.labels[sym.Name] = lab 342 } 343 return lab 344 } 345 346 func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) } 347 func (s *state) Log() bool { return s.f.Log() } 348 func (s *state) Fatalf(msg string, args ...interface{}) { 349 s.f.Frontend().Fatalf(s.peekPos(), msg, args...) 350 } 351 func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) } 352 func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() } 353 354 var ( 355 // dummy node for the memory variable 356 memVar = Node{Op: ONAME, Sym: &types.Sym{Name: "mem"}} 357 358 // dummy nodes for temporary variables 359 ptrVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}} 360 lenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}} 361 newlenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}} 362 capVar = Node{Op: ONAME, Sym: &types.Sym{Name: "cap"}} 363 typVar = Node{Op: ONAME, Sym: &types.Sym{Name: "typ"}} 364 okVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ok"}} 365 ) 366 367 // startBlock sets the current block we're generating code in to b. 368 func (s *state) startBlock(b *ssa.Block) { 369 if s.curBlock != nil { 370 s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock) 371 } 372 s.curBlock = b 373 s.vars = map[*Node]*ssa.Value{} 374 for n := range s.fwdVars { 375 delete(s.fwdVars, n) 376 } 377 } 378 379 // endBlock marks the end of generating code for the current block. 380 // Returns the (former) current block. Returns nil if there is no current 381 // block, i.e. if no code flows to the current execution point. 382 func (s *state) endBlock() *ssa.Block { 383 b := s.curBlock 384 if b == nil { 385 return nil 386 } 387 for len(s.defvars) <= int(b.ID) { 388 s.defvars = append(s.defvars, nil) 389 } 390 s.defvars[b.ID] = s.vars 391 s.curBlock = nil 392 s.vars = nil 393 if b.LackingPos() { 394 // Empty plain blocks get the line of their successor (handled after all blocks created), 395 // except for increment blocks in For statements (handled in ssa conversion of OFOR), 396 // and for blocks ending in GOTO/BREAK/CONTINUE. 397 b.Pos = src.NoXPos 398 } else { 399 b.Pos = s.lastPos 400 } 401 return b 402 } 403 404 // pushLine pushes a line number on the line number stack. 405 func (s *state) pushLine(line src.XPos) { 406 if !line.IsKnown() { 407 // the frontend may emit node with line number missing, 408 // use the parent line number in this case. 409 line = s.peekPos() 410 if Debug['K'] != 0 { 411 Warn("buildssa: unknown position (line 0)") 412 } 413 } else { 414 s.lastPos = line 415 } 416 417 s.line = append(s.line, line) 418 } 419 420 // popLine pops the top of the line number stack. 421 func (s *state) popLine() { 422 s.line = s.line[:len(s.line)-1] 423 } 424 425 // peekPos peeks the top of the line number stack. 426 func (s *state) peekPos() src.XPos { 427 return s.line[len(s.line)-1] 428 } 429 430 // newValue0 adds a new value with no arguments to the current block. 431 func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value { 432 return s.curBlock.NewValue0(s.peekPos(), op, t) 433 } 434 435 // newValue0A adds a new value with no arguments and an aux value to the current block. 436 func (s *state) newValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value { 437 return s.curBlock.NewValue0A(s.peekPos(), op, t, aux) 438 } 439 440 // newValue0I adds a new value with no arguments and an auxint value to the current block. 441 func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value { 442 return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint) 443 } 444 445 // newValue1 adds a new value with one argument to the current block. 446 func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { 447 return s.curBlock.NewValue1(s.peekPos(), op, t, arg) 448 } 449 450 // newValue1A adds a new value with one argument and an aux value to the current block. 451 func (s *state) newValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 452 return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg) 453 } 454 455 // newValue1I adds a new value with one argument and an auxint value to the current block. 456 func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value { 457 return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg) 458 } 459 460 // newValue2 adds a new value with two arguments to the current block. 461 func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value { 462 return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1) 463 } 464 465 // newValue2I adds a new value with two arguments and an auxint value to the current block. 466 func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value { 467 return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1) 468 } 469 470 // newValue3 adds a new value with three arguments to the current block. 471 func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 472 return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2) 473 } 474 475 // newValue3I adds a new value with three arguments and an auxint value to the current block. 476 func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 477 return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2) 478 } 479 480 // newValue3A adds a new value with three arguments and an aux value to the current block. 481 func (s *state) newValue3A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 482 return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2) 483 } 484 485 // newValue4 adds a new value with four arguments to the current block. 486 func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value { 487 return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3) 488 } 489 490 // entryNewValue0 adds a new value with no arguments to the entry block. 491 func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value { 492 return s.f.Entry.NewValue0(src.NoXPos, op, t) 493 } 494 495 // entryNewValue0A adds a new value with no arguments and an aux value to the entry block. 496 func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value { 497 return s.f.Entry.NewValue0A(src.NoXPos, op, t, aux) 498 } 499 500 // entryNewValue1 adds a new value with one argument to the entry block. 501 func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { 502 return s.f.Entry.NewValue1(src.NoXPos, op, t, arg) 503 } 504 505 // entryNewValue1 adds a new value with one argument and an auxint value to the entry block. 506 func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value { 507 return s.f.Entry.NewValue1I(src.NoXPos, op, t, auxint, arg) 508 } 509 510 // entryNewValue1A adds a new value with one argument and an aux value to the entry block. 511 func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 512 return s.f.Entry.NewValue1A(src.NoXPos, op, t, aux, arg) 513 } 514 515 // entryNewValue2 adds a new value with two arguments to the entry block. 516 func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value { 517 return s.f.Entry.NewValue2(src.NoXPos, op, t, arg0, arg1) 518 } 519 520 // const* routines add a new const value to the entry block. 521 func (s *state) constSlice(t *types.Type) *ssa.Value { 522 return s.f.ConstSlice(s.peekPos(), t) 523 } 524 func (s *state) constInterface(t *types.Type) *ssa.Value { 525 return s.f.ConstInterface(s.peekPos(), t) 526 } 527 func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(s.peekPos(), t) } 528 func (s *state) constEmptyString(t *types.Type) *ssa.Value { 529 return s.f.ConstEmptyString(s.peekPos(), t) 530 } 531 func (s *state) constBool(c bool) *ssa.Value { 532 return s.f.ConstBool(s.peekPos(), types.Types[TBOOL], c) 533 } 534 func (s *state) constInt8(t *types.Type, c int8) *ssa.Value { 535 return s.f.ConstInt8(s.peekPos(), t, c) 536 } 537 func (s *state) constInt16(t *types.Type, c int16) *ssa.Value { 538 return s.f.ConstInt16(s.peekPos(), t, c) 539 } 540 func (s *state) constInt32(t *types.Type, c int32) *ssa.Value { 541 return s.f.ConstInt32(s.peekPos(), t, c) 542 } 543 func (s *state) constInt64(t *types.Type, c int64) *ssa.Value { 544 return s.f.ConstInt64(s.peekPos(), t, c) 545 } 546 func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value { 547 return s.f.ConstFloat32(s.peekPos(), t, c) 548 } 549 func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value { 550 return s.f.ConstFloat64(s.peekPos(), t, c) 551 } 552 func (s *state) constInt(t *types.Type, c int64) *ssa.Value { 553 if s.config.PtrSize == 8 { 554 return s.constInt64(t, c) 555 } 556 if int64(int32(c)) != c { 557 s.Fatalf("integer constant too big %d", c) 558 } 559 return s.constInt32(t, int32(c)) 560 } 561 func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value { 562 return s.f.ConstOffPtrSP(s.peekPos(), t, c, s.sp) 563 } 564 565 // newValueOrSfCall* are wrappers around newValue*, which may create a call to a 566 // soft-float runtime function instead (when emitting soft-float code). 567 func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { 568 if s.softFloat { 569 if c, ok := s.sfcall(op, arg); ok { 570 return c 571 } 572 } 573 return s.newValue1(op, t, arg) 574 } 575 func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value { 576 if s.softFloat { 577 if c, ok := s.sfcall(op, arg0, arg1); ok { 578 return c 579 } 580 } 581 return s.newValue2(op, t, arg0, arg1) 582 } 583 584 // stmtList converts the statement list n to SSA and adds it to s. 585 func (s *state) stmtList(l Nodes) { 586 for _, n := range l.Slice() { 587 s.stmt(n) 588 } 589 } 590 591 // stmt converts the statement n to SSA and adds it to s. 592 func (s *state) stmt(n *Node) { 593 if !(n.Op == OVARKILL || n.Op == OVARLIVE) { 594 // OVARKILL and OVARLIVE are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging. 595 s.pushLine(n.Pos) 596 defer s.popLine() 597 } 598 599 // If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere), 600 // then this code is dead. Stop here. 601 if s.curBlock == nil && n.Op != OLABEL { 602 return 603 } 604 605 s.stmtList(n.Ninit) 606 switch n.Op { 607 608 case OBLOCK: 609 s.stmtList(n.List) 610 611 // No-ops 612 case OEMPTY, ODCLCONST, ODCLTYPE, OFALL: 613 614 // Expression statements 615 case OCALLFUNC: 616 if isIntrinsicCall(n) { 617 s.intrinsicCall(n) 618 return 619 } 620 fallthrough 621 622 case OCALLMETH, OCALLINTER: 623 s.call(n, callNormal) 624 if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class() == PFUNC { 625 if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" || 626 n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block") { 627 m := s.mem() 628 b := s.endBlock() 629 b.Kind = ssa.BlockExit 630 b.SetControl(m) 631 // TODO: never rewrite OPANIC to OCALLFUNC in the 632 // first place. Need to wait until all backends 633 // go through SSA. 634 } 635 } 636 case ODEFER: 637 s.call(n.Left, callDefer) 638 case OPROC: 639 s.call(n.Left, callGo) 640 641 case OGOSECURE: 642 //TODO aghosn For the moment make it a go routine. 643 s.call(n.Left, callGosecure) 644 645 case OAS2DOTTYPE: 646 res, resok := s.dottype(n.Rlist.First(), true) 647 deref := false 648 if !canSSAType(n.Rlist.First().Type) { 649 if res.Op != ssa.OpLoad { 650 s.Fatalf("dottype of non-load") 651 } 652 mem := s.mem() 653 if mem.Op == ssa.OpVarKill { 654 mem = mem.Args[0] 655 } 656 if res.Args[1] != mem { 657 s.Fatalf("memory no longer live from 2-result dottype load") 658 } 659 deref = true 660 res = res.Args[0] 661 } 662 s.assign(n.List.First(), res, deref, 0) 663 s.assign(n.List.Second(), resok, false, 0) 664 return 665 666 case OAS2FUNC: 667 // We come here only when it is an intrinsic call returning two values. 668 if !isIntrinsicCall(n.Rlist.First()) { 669 s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Rlist.First()) 670 } 671 v := s.intrinsicCall(n.Rlist.First()) 672 v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v) 673 v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v) 674 s.assign(n.List.First(), v1, false, 0) 675 s.assign(n.List.Second(), v2, false, 0) 676 return 677 678 case ODCL: 679 if n.Left.Class() == PAUTOHEAP { 680 Fatalf("DCL %v", n) 681 } 682 683 case OLABEL: 684 sym := n.Left.Sym 685 lab := s.label(sym) 686 687 // Associate label with its control flow node, if any 688 if ctl := n.labeledControl(); ctl != nil { 689 s.labeledNodes[ctl] = lab 690 } 691 692 // The label might already have a target block via a goto. 693 if lab.target == nil { 694 lab.target = s.f.NewBlock(ssa.BlockPlain) 695 } 696 697 // Go to that label. 698 // (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.) 699 if s.curBlock != nil { 700 b := s.endBlock() 701 b.AddEdgeTo(lab.target) 702 } 703 s.startBlock(lab.target) 704 705 case OGOTO: 706 sym := n.Left.Sym 707 708 lab := s.label(sym) 709 if lab.target == nil { 710 lab.target = s.f.NewBlock(ssa.BlockPlain) 711 } 712 713 b := s.endBlock() 714 b.Pos = s.lastPos // Do this even if b is an empty block. 715 b.AddEdgeTo(lab.target) 716 717 case OAS: 718 if n.Left == n.Right && n.Left.Op == ONAME { 719 // An x=x assignment. No point in doing anything 720 // here. In addition, skipping this assignment 721 // prevents generating: 722 // VARDEF x 723 // COPY x -> x 724 // which is bad because x is incorrectly considered 725 // dead before the vardef. See issue #14904. 726 return 727 } 728 729 // Evaluate RHS. 730 rhs := n.Right 731 if rhs != nil { 732 switch rhs.Op { 733 case OSTRUCTLIT, OARRAYLIT, OSLICELIT: 734 // All literals with nonzero fields have already been 735 // rewritten during walk. Any that remain are just T{} 736 // or equivalents. Use the zero value. 737 if !iszero(rhs) { 738 Fatalf("literal with nonzero value in SSA: %v", rhs) 739 } 740 rhs = nil 741 case OAPPEND: 742 // Check whether we're writing the result of an append back to the same slice. 743 // If so, we handle it specially to avoid write barriers on the fast 744 // (non-growth) path. 745 if !samesafeexpr(n.Left, rhs.List.First()) || Debug['N'] != 0 { 746 break 747 } 748 // If the slice can be SSA'd, it'll be on the stack, 749 // so there will be no write barriers, 750 // so there's no need to attempt to prevent them. 751 if s.canSSA(n.Left) { 752 if Debug_append > 0 { // replicating old diagnostic message 753 Warnl(n.Pos, "append: len-only update (in local slice)") 754 } 755 break 756 } 757 if Debug_append > 0 { 758 Warnl(n.Pos, "append: len-only update") 759 } 760 s.append(rhs, true) 761 return 762 } 763 } 764 765 if isblank(n.Left) { 766 // _ = rhs 767 // Just evaluate rhs for side-effects. 768 if rhs != nil { 769 s.expr(rhs) 770 } 771 return 772 } 773 774 var t *types.Type 775 if n.Right != nil { 776 t = n.Right.Type 777 } else { 778 t = n.Left.Type 779 } 780 781 var r *ssa.Value 782 deref := !canSSAType(t) 783 if deref { 784 if rhs == nil { 785 r = nil // Signal assign to use OpZero. 786 } else { 787 r = s.addr(rhs, false) 788 } 789 } else { 790 if rhs == nil { 791 r = s.zeroVal(t) 792 } else { 793 r = s.expr(rhs) 794 } 795 } 796 797 var skip skipMask 798 if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) { 799 // We're assigning a slicing operation back to its source. 800 // Don't write back fields we aren't changing. See issue #14855. 801 i, j, k := rhs.SliceBounds() 802 if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) { 803 // [0:...] is the same as [:...] 804 i = nil 805 } 806 // TODO: detect defaults for len/cap also. 807 // Currently doesn't really work because (*p)[:len(*p)] appears here as: 808 // tmp = len(*p) 809 // (*p)[:tmp] 810 //if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) { 811 // j = nil 812 //} 813 //if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) { 814 // k = nil 815 //} 816 if i == nil { 817 skip |= skipPtr 818 if j == nil { 819 skip |= skipLen 820 } 821 if k == nil { 822 skip |= skipCap 823 } 824 } 825 } 826 827 s.assign(n.Left, r, deref, skip) 828 829 case OIF: 830 bThen := s.f.NewBlock(ssa.BlockPlain) 831 bEnd := s.f.NewBlock(ssa.BlockPlain) 832 var bElse *ssa.Block 833 var likely int8 834 if n.Likely() { 835 likely = 1 836 } 837 if n.Rlist.Len() != 0 { 838 bElse = s.f.NewBlock(ssa.BlockPlain) 839 s.condBranch(n.Left, bThen, bElse, likely) 840 } else { 841 s.condBranch(n.Left, bThen, bEnd, likely) 842 } 843 844 s.startBlock(bThen) 845 s.stmtList(n.Nbody) 846 if b := s.endBlock(); b != nil { 847 b.AddEdgeTo(bEnd) 848 } 849 850 if n.Rlist.Len() != 0 { 851 s.startBlock(bElse) 852 s.stmtList(n.Rlist) 853 if b := s.endBlock(); b != nil { 854 b.AddEdgeTo(bEnd) 855 } 856 } 857 s.startBlock(bEnd) 858 859 case ORETURN: 860 s.stmtList(n.List) 861 b := s.exit() 862 b.Pos = s.lastPos 863 864 case ORETJMP: 865 s.stmtList(n.List) 866 b := s.exit() 867 b.Kind = ssa.BlockRetJmp // override BlockRet 868 b.Aux = n.Sym.Linksym() 869 870 case OCONTINUE, OBREAK: 871 var to *ssa.Block 872 if n.Left == nil { 873 // plain break/continue 874 switch n.Op { 875 case OCONTINUE: 876 to = s.continueTo 877 case OBREAK: 878 to = s.breakTo 879 } 880 } else { 881 // labeled break/continue; look up the target 882 sym := n.Left.Sym 883 lab := s.label(sym) 884 switch n.Op { 885 case OCONTINUE: 886 to = lab.continueTarget 887 case OBREAK: 888 to = lab.breakTarget 889 } 890 } 891 892 b := s.endBlock() 893 b.Pos = s.lastPos // Do this even if b is an empty block. 894 b.AddEdgeTo(to) 895 896 case OFOR, OFORUNTIL: 897 // OFOR: for Ninit; Left; Right { Nbody } 898 // For = cond; body; incr 899 // Foruntil = body; incr; cond 900 bCond := s.f.NewBlock(ssa.BlockPlain) 901 bBody := s.f.NewBlock(ssa.BlockPlain) 902 bIncr := s.f.NewBlock(ssa.BlockPlain) 903 bEnd := s.f.NewBlock(ssa.BlockPlain) 904 905 // first, jump to condition test (OFOR) or body (OFORUNTIL) 906 b := s.endBlock() 907 if n.Op == OFOR { 908 b.AddEdgeTo(bCond) 909 // generate code to test condition 910 s.startBlock(bCond) 911 if n.Left != nil { 912 s.condBranch(n.Left, bBody, bEnd, 1) 913 } else { 914 b := s.endBlock() 915 b.Kind = ssa.BlockPlain 916 b.AddEdgeTo(bBody) 917 } 918 919 } else { 920 b.AddEdgeTo(bBody) 921 } 922 923 // set up for continue/break in body 924 prevContinue := s.continueTo 925 prevBreak := s.breakTo 926 s.continueTo = bIncr 927 s.breakTo = bEnd 928 lab := s.labeledNodes[n] 929 if lab != nil { 930 // labeled for loop 931 lab.continueTarget = bIncr 932 lab.breakTarget = bEnd 933 } 934 935 // generate body 936 s.startBlock(bBody) 937 s.stmtList(n.Nbody) 938 939 // tear down continue/break 940 s.continueTo = prevContinue 941 s.breakTo = prevBreak 942 if lab != nil { 943 lab.continueTarget = nil 944 lab.breakTarget = nil 945 } 946 947 // done with body, goto incr 948 if b := s.endBlock(); b != nil { 949 b.AddEdgeTo(bIncr) 950 } 951 952 // generate incr 953 s.startBlock(bIncr) 954 if n.Right != nil { 955 s.stmt(n.Right) 956 } 957 if b := s.endBlock(); b != nil { 958 b.AddEdgeTo(bCond) 959 // It can happen that bIncr ends in a block containing only VARKILL, 960 // and that muddles the debugging experience. 961 if n.Op != OFORUNTIL && b.Pos == src.NoXPos { 962 b.Pos = bCond.Pos 963 } 964 } 965 966 if n.Op == OFORUNTIL { 967 // generate code to test condition 968 s.startBlock(bCond) 969 if n.Left != nil { 970 s.condBranch(n.Left, bBody, bEnd, 1) 971 } else { 972 b := s.endBlock() 973 b.Kind = ssa.BlockPlain 974 b.AddEdgeTo(bBody) 975 } 976 } 977 978 s.startBlock(bEnd) 979 980 case OSWITCH, OSELECT: 981 // These have been mostly rewritten by the front end into their Nbody fields. 982 // Our main task is to correctly hook up any break statements. 983 bEnd := s.f.NewBlock(ssa.BlockPlain) 984 985 prevBreak := s.breakTo 986 s.breakTo = bEnd 987 lab := s.labeledNodes[n] 988 if lab != nil { 989 // labeled 990 lab.breakTarget = bEnd 991 } 992 993 // generate body code 994 s.stmtList(n.Nbody) 995 996 s.breakTo = prevBreak 997 if lab != nil { 998 lab.breakTarget = nil 999 } 1000 1001 // walk adds explicit OBREAK nodes to the end of all reachable code paths. 1002 // If we still have a current block here, then mark it unreachable. 1003 if s.curBlock != nil { 1004 m := s.mem() 1005 b := s.endBlock() 1006 b.Kind = ssa.BlockExit 1007 b.SetControl(m) 1008 } 1009 s.startBlock(bEnd) 1010 1011 case OVARKILL: 1012 // Insert a varkill op to record that a variable is no longer live. 1013 // We only care about liveness info at call sites, so putting the 1014 // varkill in the store chain is enough to keep it correctly ordered 1015 // with respect to call ops. 1016 if !s.canSSA(n.Left) { 1017 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, n.Left, s.mem()) 1018 } 1019 1020 case OVARLIVE: 1021 // Insert a varlive op to record that a variable is still live. 1022 if !n.Left.Addrtaken() { 1023 s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left) 1024 } 1025 switch n.Left.Class() { 1026 case PAUTO, PPARAM, PPARAMOUT: 1027 default: 1028 s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left) 1029 } 1030 s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem()) 1031 1032 case OCHECKNIL: 1033 p := s.expr(n.Left) 1034 s.nilCheck(p) 1035 1036 default: 1037 s.Fatalf("unhandled stmt %v", n.Op) 1038 } 1039 } 1040 1041 // exit processes any code that needs to be generated just before returning. 1042 // It returns a BlockRet block that ends the control flow. Its control value 1043 // will be set to the final memory state. 1044 func (s *state) exit() *ssa.Block { 1045 if s.hasdefer { 1046 s.rtcall(Deferreturn, true, nil) 1047 } 1048 1049 // Run exit code. Typically, this code copies heap-allocated PPARAMOUT 1050 // variables back to the stack. 1051 s.stmtList(s.exitCode) 1052 1053 // Store SSAable PPARAMOUT variables back to stack locations. 1054 for _, n := range s.returns { 1055 addr := s.decladdrs[n] 1056 val := s.variable(n, n.Type) 1057 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) 1058 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, n.Type, addr, val, s.mem()) 1059 // TODO: if val is ever spilled, we'd like to use the 1060 // PPARAMOUT slot for spilling it. That won't happen 1061 // currently. 1062 } 1063 1064 // Do actual return. 1065 m := s.mem() 1066 b := s.endBlock() 1067 b.Kind = ssa.BlockRet 1068 b.SetControl(m) 1069 return b 1070 } 1071 1072 type opAndType struct { 1073 op Op 1074 etype types.EType 1075 } 1076 1077 var opToSSA = map[opAndType]ssa.Op{ 1078 {OADD, TINT8}: ssa.OpAdd8, 1079 {OADD, TUINT8}: ssa.OpAdd8, 1080 {OADD, TINT16}: ssa.OpAdd16, 1081 {OADD, TUINT16}: ssa.OpAdd16, 1082 {OADD, TINT32}: ssa.OpAdd32, 1083 {OADD, TUINT32}: ssa.OpAdd32, 1084 {OADD, TPTR32}: ssa.OpAdd32, 1085 {OADD, TINT64}: ssa.OpAdd64, 1086 {OADD, TUINT64}: ssa.OpAdd64, 1087 {OADD, TPTR64}: ssa.OpAdd64, 1088 {OADD, TFLOAT32}: ssa.OpAdd32F, 1089 {OADD, TFLOAT64}: ssa.OpAdd64F, 1090 1091 {OSUB, TINT8}: ssa.OpSub8, 1092 {OSUB, TUINT8}: ssa.OpSub8, 1093 {OSUB, TINT16}: ssa.OpSub16, 1094 {OSUB, TUINT16}: ssa.OpSub16, 1095 {OSUB, TINT32}: ssa.OpSub32, 1096 {OSUB, TUINT32}: ssa.OpSub32, 1097 {OSUB, TINT64}: ssa.OpSub64, 1098 {OSUB, TUINT64}: ssa.OpSub64, 1099 {OSUB, TFLOAT32}: ssa.OpSub32F, 1100 {OSUB, TFLOAT64}: ssa.OpSub64F, 1101 1102 {ONOT, TBOOL}: ssa.OpNot, 1103 1104 {OMINUS, TINT8}: ssa.OpNeg8, 1105 {OMINUS, TUINT8}: ssa.OpNeg8, 1106 {OMINUS, TINT16}: ssa.OpNeg16, 1107 {OMINUS, TUINT16}: ssa.OpNeg16, 1108 {OMINUS, TINT32}: ssa.OpNeg32, 1109 {OMINUS, TUINT32}: ssa.OpNeg32, 1110 {OMINUS, TINT64}: ssa.OpNeg64, 1111 {OMINUS, TUINT64}: ssa.OpNeg64, 1112 {OMINUS, TFLOAT32}: ssa.OpNeg32F, 1113 {OMINUS, TFLOAT64}: ssa.OpNeg64F, 1114 1115 {OCOM, TINT8}: ssa.OpCom8, 1116 {OCOM, TUINT8}: ssa.OpCom8, 1117 {OCOM, TINT16}: ssa.OpCom16, 1118 {OCOM, TUINT16}: ssa.OpCom16, 1119 {OCOM, TINT32}: ssa.OpCom32, 1120 {OCOM, TUINT32}: ssa.OpCom32, 1121 {OCOM, TINT64}: ssa.OpCom64, 1122 {OCOM, TUINT64}: ssa.OpCom64, 1123 1124 {OIMAG, TCOMPLEX64}: ssa.OpComplexImag, 1125 {OIMAG, TCOMPLEX128}: ssa.OpComplexImag, 1126 {OREAL, TCOMPLEX64}: ssa.OpComplexReal, 1127 {OREAL, TCOMPLEX128}: ssa.OpComplexReal, 1128 1129 {OMUL, TINT8}: ssa.OpMul8, 1130 {OMUL, TUINT8}: ssa.OpMul8, 1131 {OMUL, TINT16}: ssa.OpMul16, 1132 {OMUL, TUINT16}: ssa.OpMul16, 1133 {OMUL, TINT32}: ssa.OpMul32, 1134 {OMUL, TUINT32}: ssa.OpMul32, 1135 {OMUL, TINT64}: ssa.OpMul64, 1136 {OMUL, TUINT64}: ssa.OpMul64, 1137 {OMUL, TFLOAT32}: ssa.OpMul32F, 1138 {OMUL, TFLOAT64}: ssa.OpMul64F, 1139 1140 {ODIV, TFLOAT32}: ssa.OpDiv32F, 1141 {ODIV, TFLOAT64}: ssa.OpDiv64F, 1142 1143 {ODIV, TINT8}: ssa.OpDiv8, 1144 {ODIV, TUINT8}: ssa.OpDiv8u, 1145 {ODIV, TINT16}: ssa.OpDiv16, 1146 {ODIV, TUINT16}: ssa.OpDiv16u, 1147 {ODIV, TINT32}: ssa.OpDiv32, 1148 {ODIV, TUINT32}: ssa.OpDiv32u, 1149 {ODIV, TINT64}: ssa.OpDiv64, 1150 {ODIV, TUINT64}: ssa.OpDiv64u, 1151 1152 {OMOD, TINT8}: ssa.OpMod8, 1153 {OMOD, TUINT8}: ssa.OpMod8u, 1154 {OMOD, TINT16}: ssa.OpMod16, 1155 {OMOD, TUINT16}: ssa.OpMod16u, 1156 {OMOD, TINT32}: ssa.OpMod32, 1157 {OMOD, TUINT32}: ssa.OpMod32u, 1158 {OMOD, TINT64}: ssa.OpMod64, 1159 {OMOD, TUINT64}: ssa.OpMod64u, 1160 1161 {OAND, TINT8}: ssa.OpAnd8, 1162 {OAND, TUINT8}: ssa.OpAnd8, 1163 {OAND, TINT16}: ssa.OpAnd16, 1164 {OAND, TUINT16}: ssa.OpAnd16, 1165 {OAND, TINT32}: ssa.OpAnd32, 1166 {OAND, TUINT32}: ssa.OpAnd32, 1167 {OAND, TINT64}: ssa.OpAnd64, 1168 {OAND, TUINT64}: ssa.OpAnd64, 1169 1170 {OOR, TINT8}: ssa.OpOr8, 1171 {OOR, TUINT8}: ssa.OpOr8, 1172 {OOR, TINT16}: ssa.OpOr16, 1173 {OOR, TUINT16}: ssa.OpOr16, 1174 {OOR, TINT32}: ssa.OpOr32, 1175 {OOR, TUINT32}: ssa.OpOr32, 1176 {OOR, TINT64}: ssa.OpOr64, 1177 {OOR, TUINT64}: ssa.OpOr64, 1178 1179 {OXOR, TINT8}: ssa.OpXor8, 1180 {OXOR, TUINT8}: ssa.OpXor8, 1181 {OXOR, TINT16}: ssa.OpXor16, 1182 {OXOR, TUINT16}: ssa.OpXor16, 1183 {OXOR, TINT32}: ssa.OpXor32, 1184 {OXOR, TUINT32}: ssa.OpXor32, 1185 {OXOR, TINT64}: ssa.OpXor64, 1186 {OXOR, TUINT64}: ssa.OpXor64, 1187 1188 {OEQ, TBOOL}: ssa.OpEqB, 1189 {OEQ, TINT8}: ssa.OpEq8, 1190 {OEQ, TUINT8}: ssa.OpEq8, 1191 {OEQ, TINT16}: ssa.OpEq16, 1192 {OEQ, TUINT16}: ssa.OpEq16, 1193 {OEQ, TINT32}: ssa.OpEq32, 1194 {OEQ, TUINT32}: ssa.OpEq32, 1195 {OEQ, TINT64}: ssa.OpEq64, 1196 {OEQ, TUINT64}: ssa.OpEq64, 1197 {OEQ, TINTER}: ssa.OpEqInter, 1198 {OEQ, TSLICE}: ssa.OpEqSlice, 1199 {OEQ, TFUNC}: ssa.OpEqPtr, 1200 {OEQ, TMAP}: ssa.OpEqPtr, 1201 {OEQ, TCHAN}: ssa.OpEqPtr, 1202 {OEQ, TPTR32}: ssa.OpEqPtr, 1203 {OEQ, TPTR64}: ssa.OpEqPtr, 1204 {OEQ, TUINTPTR}: ssa.OpEqPtr, 1205 {OEQ, TUNSAFEPTR}: ssa.OpEqPtr, 1206 {OEQ, TFLOAT64}: ssa.OpEq64F, 1207 {OEQ, TFLOAT32}: ssa.OpEq32F, 1208 1209 {ONE, TBOOL}: ssa.OpNeqB, 1210 {ONE, TINT8}: ssa.OpNeq8, 1211 {ONE, TUINT8}: ssa.OpNeq8, 1212 {ONE, TINT16}: ssa.OpNeq16, 1213 {ONE, TUINT16}: ssa.OpNeq16, 1214 {ONE, TINT32}: ssa.OpNeq32, 1215 {ONE, TUINT32}: ssa.OpNeq32, 1216 {ONE, TINT64}: ssa.OpNeq64, 1217 {ONE, TUINT64}: ssa.OpNeq64, 1218 {ONE, TINTER}: ssa.OpNeqInter, 1219 {ONE, TSLICE}: ssa.OpNeqSlice, 1220 {ONE, TFUNC}: ssa.OpNeqPtr, 1221 {ONE, TMAP}: ssa.OpNeqPtr, 1222 {ONE, TCHAN}: ssa.OpNeqPtr, 1223 {ONE, TPTR32}: ssa.OpNeqPtr, 1224 {ONE, TPTR64}: ssa.OpNeqPtr, 1225 {ONE, TUINTPTR}: ssa.OpNeqPtr, 1226 {ONE, TUNSAFEPTR}: ssa.OpNeqPtr, 1227 {ONE, TFLOAT64}: ssa.OpNeq64F, 1228 {ONE, TFLOAT32}: ssa.OpNeq32F, 1229 1230 {OLT, TINT8}: ssa.OpLess8, 1231 {OLT, TUINT8}: ssa.OpLess8U, 1232 {OLT, TINT16}: ssa.OpLess16, 1233 {OLT, TUINT16}: ssa.OpLess16U, 1234 {OLT, TINT32}: ssa.OpLess32, 1235 {OLT, TUINT32}: ssa.OpLess32U, 1236 {OLT, TINT64}: ssa.OpLess64, 1237 {OLT, TUINT64}: ssa.OpLess64U, 1238 {OLT, TFLOAT64}: ssa.OpLess64F, 1239 {OLT, TFLOAT32}: ssa.OpLess32F, 1240 1241 {OGT, TINT8}: ssa.OpGreater8, 1242 {OGT, TUINT8}: ssa.OpGreater8U, 1243 {OGT, TINT16}: ssa.OpGreater16, 1244 {OGT, TUINT16}: ssa.OpGreater16U, 1245 {OGT, TINT32}: ssa.OpGreater32, 1246 {OGT, TUINT32}: ssa.OpGreater32U, 1247 {OGT, TINT64}: ssa.OpGreater64, 1248 {OGT, TUINT64}: ssa.OpGreater64U, 1249 {OGT, TFLOAT64}: ssa.OpGreater64F, 1250 {OGT, TFLOAT32}: ssa.OpGreater32F, 1251 1252 {OLE, TINT8}: ssa.OpLeq8, 1253 {OLE, TUINT8}: ssa.OpLeq8U, 1254 {OLE, TINT16}: ssa.OpLeq16, 1255 {OLE, TUINT16}: ssa.OpLeq16U, 1256 {OLE, TINT32}: ssa.OpLeq32, 1257 {OLE, TUINT32}: ssa.OpLeq32U, 1258 {OLE, TINT64}: ssa.OpLeq64, 1259 {OLE, TUINT64}: ssa.OpLeq64U, 1260 {OLE, TFLOAT64}: ssa.OpLeq64F, 1261 {OLE, TFLOAT32}: ssa.OpLeq32F, 1262 1263 {OGE, TINT8}: ssa.OpGeq8, 1264 {OGE, TUINT8}: ssa.OpGeq8U, 1265 {OGE, TINT16}: ssa.OpGeq16, 1266 {OGE, TUINT16}: ssa.OpGeq16U, 1267 {OGE, TINT32}: ssa.OpGeq32, 1268 {OGE, TUINT32}: ssa.OpGeq32U, 1269 {OGE, TINT64}: ssa.OpGeq64, 1270 {OGE, TUINT64}: ssa.OpGeq64U, 1271 {OGE, TFLOAT64}: ssa.OpGeq64F, 1272 {OGE, TFLOAT32}: ssa.OpGeq32F, 1273 } 1274 1275 func (s *state) concreteEtype(t *types.Type) types.EType { 1276 e := t.Etype 1277 switch e { 1278 default: 1279 return e 1280 case TINT: 1281 if s.config.PtrSize == 8 { 1282 return TINT64 1283 } 1284 return TINT32 1285 case TUINT: 1286 if s.config.PtrSize == 8 { 1287 return TUINT64 1288 } 1289 return TUINT32 1290 case TUINTPTR: 1291 if s.config.PtrSize == 8 { 1292 return TUINT64 1293 } 1294 return TUINT32 1295 } 1296 } 1297 1298 func (s *state) ssaOp(op Op, t *types.Type) ssa.Op { 1299 etype := s.concreteEtype(t) 1300 x, ok := opToSSA[opAndType{op, etype}] 1301 if !ok { 1302 s.Fatalf("unhandled binary op %v %s", op, etype) 1303 } 1304 return x 1305 } 1306 1307 func floatForComplex(t *types.Type) *types.Type { 1308 if t.Size() == 8 { 1309 return types.Types[TFLOAT32] 1310 } else { 1311 return types.Types[TFLOAT64] 1312 } 1313 } 1314 1315 type opAndTwoTypes struct { 1316 op Op 1317 etype1 types.EType 1318 etype2 types.EType 1319 } 1320 1321 type twoTypes struct { 1322 etype1 types.EType 1323 etype2 types.EType 1324 } 1325 1326 type twoOpsAndType struct { 1327 op1 ssa.Op 1328 op2 ssa.Op 1329 intermediateType types.EType 1330 } 1331 1332 var fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1333 1334 {TINT8, TFLOAT32}: {ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32}, 1335 {TINT16, TFLOAT32}: {ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32}, 1336 {TINT32, TFLOAT32}: {ssa.OpCopy, ssa.OpCvt32to32F, TINT32}, 1337 {TINT64, TFLOAT32}: {ssa.OpCopy, ssa.OpCvt64to32F, TINT64}, 1338 1339 {TINT8, TFLOAT64}: {ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32}, 1340 {TINT16, TFLOAT64}: {ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32}, 1341 {TINT32, TFLOAT64}: {ssa.OpCopy, ssa.OpCvt32to64F, TINT32}, 1342 {TINT64, TFLOAT64}: {ssa.OpCopy, ssa.OpCvt64to64F, TINT64}, 1343 1344 {TFLOAT32, TINT8}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1345 {TFLOAT32, TINT16}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1346 {TFLOAT32, TINT32}: {ssa.OpCvt32Fto32, ssa.OpCopy, TINT32}, 1347 {TFLOAT32, TINT64}: {ssa.OpCvt32Fto64, ssa.OpCopy, TINT64}, 1348 1349 {TFLOAT64, TINT8}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1350 {TFLOAT64, TINT16}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1351 {TFLOAT64, TINT32}: {ssa.OpCvt64Fto32, ssa.OpCopy, TINT32}, 1352 {TFLOAT64, TINT64}: {ssa.OpCvt64Fto64, ssa.OpCopy, TINT64}, 1353 // unsigned 1354 {TUINT8, TFLOAT32}: {ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32}, 1355 {TUINT16, TFLOAT32}: {ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32}, 1356 {TUINT32, TFLOAT32}: {ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned 1357 {TUINT64, TFLOAT32}: {ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead 1358 1359 {TUINT8, TFLOAT64}: {ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32}, 1360 {TUINT16, TFLOAT64}: {ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32}, 1361 {TUINT32, TFLOAT64}: {ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned 1362 {TUINT64, TFLOAT64}: {ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead 1363 1364 {TFLOAT32, TUINT8}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1365 {TFLOAT32, TUINT16}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1366 {TFLOAT32, TUINT32}: {ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1367 {TFLOAT32, TUINT64}: {ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead 1368 1369 {TFLOAT64, TUINT8}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1370 {TFLOAT64, TUINT16}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1371 {TFLOAT64, TUINT32}: {ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1372 {TFLOAT64, TUINT64}: {ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead 1373 1374 // float 1375 {TFLOAT64, TFLOAT32}: {ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32}, 1376 {TFLOAT64, TFLOAT64}: {ssa.OpRound64F, ssa.OpCopy, TFLOAT64}, 1377 {TFLOAT32, TFLOAT32}: {ssa.OpRound32F, ssa.OpCopy, TFLOAT32}, 1378 {TFLOAT32, TFLOAT64}: {ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64}, 1379 } 1380 1381 // this map is used only for 32-bit arch, and only includes the difference 1382 // on 32-bit arch, don't use int64<->float conversion for uint32 1383 var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{ 1384 {TUINT32, TFLOAT32}: {ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32}, 1385 {TUINT32, TFLOAT64}: {ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32}, 1386 {TFLOAT32, TUINT32}: {ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32}, 1387 {TFLOAT64, TUINT32}: {ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32}, 1388 } 1389 1390 // uint64<->float conversions, only on machines that have intructions for that 1391 var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1392 {TUINT64, TFLOAT32}: {ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64}, 1393 {TUINT64, TFLOAT64}: {ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64}, 1394 {TFLOAT32, TUINT64}: {ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64}, 1395 {TFLOAT64, TUINT64}: {ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64}, 1396 } 1397 1398 var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{ 1399 {OLSH, TINT8, TUINT8}: ssa.OpLsh8x8, 1400 {OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8, 1401 {OLSH, TINT8, TUINT16}: ssa.OpLsh8x16, 1402 {OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16, 1403 {OLSH, TINT8, TUINT32}: ssa.OpLsh8x32, 1404 {OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32, 1405 {OLSH, TINT8, TUINT64}: ssa.OpLsh8x64, 1406 {OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64, 1407 1408 {OLSH, TINT16, TUINT8}: ssa.OpLsh16x8, 1409 {OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8, 1410 {OLSH, TINT16, TUINT16}: ssa.OpLsh16x16, 1411 {OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16, 1412 {OLSH, TINT16, TUINT32}: ssa.OpLsh16x32, 1413 {OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32, 1414 {OLSH, TINT16, TUINT64}: ssa.OpLsh16x64, 1415 {OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64, 1416 1417 {OLSH, TINT32, TUINT8}: ssa.OpLsh32x8, 1418 {OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8, 1419 {OLSH, TINT32, TUINT16}: ssa.OpLsh32x16, 1420 {OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16, 1421 {OLSH, TINT32, TUINT32}: ssa.OpLsh32x32, 1422 {OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32, 1423 {OLSH, TINT32, TUINT64}: ssa.OpLsh32x64, 1424 {OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64, 1425 1426 {OLSH, TINT64, TUINT8}: ssa.OpLsh64x8, 1427 {OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8, 1428 {OLSH, TINT64, TUINT16}: ssa.OpLsh64x16, 1429 {OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16, 1430 {OLSH, TINT64, TUINT32}: ssa.OpLsh64x32, 1431 {OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32, 1432 {OLSH, TINT64, TUINT64}: ssa.OpLsh64x64, 1433 {OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64, 1434 1435 {ORSH, TINT8, TUINT8}: ssa.OpRsh8x8, 1436 {ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8, 1437 {ORSH, TINT8, TUINT16}: ssa.OpRsh8x16, 1438 {ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16, 1439 {ORSH, TINT8, TUINT32}: ssa.OpRsh8x32, 1440 {ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32, 1441 {ORSH, TINT8, TUINT64}: ssa.OpRsh8x64, 1442 {ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64, 1443 1444 {ORSH, TINT16, TUINT8}: ssa.OpRsh16x8, 1445 {ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8, 1446 {ORSH, TINT16, TUINT16}: ssa.OpRsh16x16, 1447 {ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16, 1448 {ORSH, TINT16, TUINT32}: ssa.OpRsh16x32, 1449 {ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32, 1450 {ORSH, TINT16, TUINT64}: ssa.OpRsh16x64, 1451 {ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64, 1452 1453 {ORSH, TINT32, TUINT8}: ssa.OpRsh32x8, 1454 {ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8, 1455 {ORSH, TINT32, TUINT16}: ssa.OpRsh32x16, 1456 {ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16, 1457 {ORSH, TINT32, TUINT32}: ssa.OpRsh32x32, 1458 {ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32, 1459 {ORSH, TINT32, TUINT64}: ssa.OpRsh32x64, 1460 {ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64, 1461 1462 {ORSH, TINT64, TUINT8}: ssa.OpRsh64x8, 1463 {ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8, 1464 {ORSH, TINT64, TUINT16}: ssa.OpRsh64x16, 1465 {ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16, 1466 {ORSH, TINT64, TUINT32}: ssa.OpRsh64x32, 1467 {ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32, 1468 {ORSH, TINT64, TUINT64}: ssa.OpRsh64x64, 1469 {ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64, 1470 } 1471 1472 func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op { 1473 etype1 := s.concreteEtype(t) 1474 etype2 := s.concreteEtype(u) 1475 x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}] 1476 if !ok { 1477 s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2) 1478 } 1479 return x 1480 } 1481 1482 // expr converts the expression n to ssa, adds it to s and returns the ssa result. 1483 func (s *state) expr(n *Node) *ssa.Value { 1484 if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) { 1485 // ONAMEs and named OLITERALs have the line number 1486 // of the decl, not the use. See issue 14742. 1487 s.pushLine(n.Pos) 1488 defer s.popLine() 1489 } 1490 1491 s.stmtList(n.Ninit) 1492 switch n.Op { 1493 case OARRAYBYTESTRTMP: 1494 slice := s.expr(n.Left) 1495 ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice) 1496 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) 1497 return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) 1498 case OSTRARRAYBYTETMP: 1499 str := s.expr(n.Left) 1500 ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str) 1501 len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str) 1502 return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len) 1503 case OCFUNC: 1504 aux := n.Left.Sym.Linksym() 1505 return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb) 1506 case ONAME: 1507 if n.Class() == PFUNC { 1508 // "value" of a function is the address of the function's closure 1509 sym := funcsym(n.Sym).Linksym() 1510 return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb) 1511 } 1512 if s.canSSA(n) { 1513 return s.variable(n, n.Type) 1514 } 1515 addr := s.addr(n, false) 1516 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1517 case OCLOSUREVAR: 1518 addr := s.addr(n, false) 1519 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1520 case OLITERAL: 1521 switch u := n.Val().U.(type) { 1522 case *Mpint: 1523 i := u.Int64() 1524 switch n.Type.Size() { 1525 case 1: 1526 return s.constInt8(n.Type, int8(i)) 1527 case 2: 1528 return s.constInt16(n.Type, int16(i)) 1529 case 4: 1530 return s.constInt32(n.Type, int32(i)) 1531 case 8: 1532 return s.constInt64(n.Type, i) 1533 default: 1534 s.Fatalf("bad integer size %d", n.Type.Size()) 1535 return nil 1536 } 1537 case string: 1538 if u == "" { 1539 return s.constEmptyString(n.Type) 1540 } 1541 return s.entryNewValue0A(ssa.OpConstString, n.Type, u) 1542 case bool: 1543 return s.constBool(u) 1544 case *NilVal: 1545 t := n.Type 1546 switch { 1547 case t.IsSlice(): 1548 return s.constSlice(t) 1549 case t.IsInterface(): 1550 return s.constInterface(t) 1551 default: 1552 return s.constNil(t) 1553 } 1554 case *Mpflt: 1555 switch n.Type.Size() { 1556 case 4: 1557 return s.constFloat32(n.Type, u.Float32()) 1558 case 8: 1559 return s.constFloat64(n.Type, u.Float64()) 1560 default: 1561 s.Fatalf("bad float size %d", n.Type.Size()) 1562 return nil 1563 } 1564 case *Mpcplx: 1565 r := &u.Real 1566 i := &u.Imag 1567 switch n.Type.Size() { 1568 case 8: 1569 pt := types.Types[TFLOAT32] 1570 return s.newValue2(ssa.OpComplexMake, n.Type, 1571 s.constFloat32(pt, r.Float32()), 1572 s.constFloat32(pt, i.Float32())) 1573 case 16: 1574 pt := types.Types[TFLOAT64] 1575 return s.newValue2(ssa.OpComplexMake, n.Type, 1576 s.constFloat64(pt, r.Float64()), 1577 s.constFloat64(pt, i.Float64())) 1578 default: 1579 s.Fatalf("bad float size %d", n.Type.Size()) 1580 return nil 1581 } 1582 1583 default: 1584 s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype()) 1585 return nil 1586 } 1587 case OCONVNOP: 1588 to := n.Type 1589 from := n.Left.Type 1590 1591 // Assume everything will work out, so set up our return value. 1592 // Anything interesting that happens from here is a fatal. 1593 x := s.expr(n.Left) 1594 1595 // Special case for not confusing GC and liveness. 1596 // We don't want pointers accidentally classified 1597 // as not-pointers or vice-versa because of copy 1598 // elision. 1599 if to.IsPtrShaped() != from.IsPtrShaped() { 1600 return s.newValue2(ssa.OpConvert, to, x, s.mem()) 1601 } 1602 1603 v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type 1604 1605 // CONVNOP closure 1606 if to.Etype == TFUNC && from.IsPtrShaped() { 1607 return v 1608 } 1609 1610 // named <--> unnamed type or typed <--> untyped const 1611 if from.Etype == to.Etype { 1612 return v 1613 } 1614 1615 // unsafe.Pointer <--> *T 1616 if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() { 1617 return v 1618 } 1619 1620 // map <--> *hmap 1621 if to.Etype == TMAP && from.IsPtr() && 1622 to.MapType().Hmap == from.Elem() { 1623 return v 1624 } 1625 1626 dowidth(from) 1627 dowidth(to) 1628 if from.Width != to.Width { 1629 s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width) 1630 return nil 1631 } 1632 if etypesign(from.Etype) != etypesign(to.Etype) { 1633 s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype) 1634 return nil 1635 } 1636 1637 if instrumenting { 1638 // These appear to be fine, but they fail the 1639 // integer constraint below, so okay them here. 1640 // Sample non-integer conversion: map[string]string -> *uint8 1641 return v 1642 } 1643 1644 if etypesign(from.Etype) == 0 { 1645 s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to) 1646 return nil 1647 } 1648 1649 // integer, same width, same sign 1650 return v 1651 1652 case OCONV: 1653 x := s.expr(n.Left) 1654 ft := n.Left.Type // from type 1655 tt := n.Type // to type 1656 if ft.IsBoolean() && tt.IsKind(TUINT8) { 1657 // Bool -> uint8 is generated internally when indexing into runtime.staticbyte. 1658 return s.newValue1(ssa.OpCopy, n.Type, x) 1659 } 1660 if ft.IsInteger() && tt.IsInteger() { 1661 var op ssa.Op 1662 if tt.Size() == ft.Size() { 1663 op = ssa.OpCopy 1664 } else if tt.Size() < ft.Size() { 1665 // truncation 1666 switch 10*ft.Size() + tt.Size() { 1667 case 21: 1668 op = ssa.OpTrunc16to8 1669 case 41: 1670 op = ssa.OpTrunc32to8 1671 case 42: 1672 op = ssa.OpTrunc32to16 1673 case 81: 1674 op = ssa.OpTrunc64to8 1675 case 82: 1676 op = ssa.OpTrunc64to16 1677 case 84: 1678 op = ssa.OpTrunc64to32 1679 default: 1680 s.Fatalf("weird integer truncation %v -> %v", ft, tt) 1681 } 1682 } else if ft.IsSigned() { 1683 // sign extension 1684 switch 10*ft.Size() + tt.Size() { 1685 case 12: 1686 op = ssa.OpSignExt8to16 1687 case 14: 1688 op = ssa.OpSignExt8to32 1689 case 18: 1690 op = ssa.OpSignExt8to64 1691 case 24: 1692 op = ssa.OpSignExt16to32 1693 case 28: 1694 op = ssa.OpSignExt16to64 1695 case 48: 1696 op = ssa.OpSignExt32to64 1697 default: 1698 s.Fatalf("bad integer sign extension %v -> %v", ft, tt) 1699 } 1700 } else { 1701 // zero extension 1702 switch 10*ft.Size() + tt.Size() { 1703 case 12: 1704 op = ssa.OpZeroExt8to16 1705 case 14: 1706 op = ssa.OpZeroExt8to32 1707 case 18: 1708 op = ssa.OpZeroExt8to64 1709 case 24: 1710 op = ssa.OpZeroExt16to32 1711 case 28: 1712 op = ssa.OpZeroExt16to64 1713 case 48: 1714 op = ssa.OpZeroExt32to64 1715 default: 1716 s.Fatalf("weird integer sign extension %v -> %v", ft, tt) 1717 } 1718 } 1719 return s.newValue1(op, n.Type, x) 1720 } 1721 1722 if ft.IsFloat() || tt.IsFloat() { 1723 conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}] 1724 if s.config.RegSize == 4 && thearch.LinkArch.Family != sys.MIPS && !s.softFloat { 1725 if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { 1726 conv = conv1 1727 } 1728 } 1729 if thearch.LinkArch.Family == sys.ARM64 || s.softFloat { 1730 if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { 1731 conv = conv1 1732 } 1733 } 1734 1735 if thearch.LinkArch.Family == sys.MIPS && !s.softFloat { 1736 if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() { 1737 // tt is float32 or float64, and ft is also unsigned 1738 if tt.Size() == 4 { 1739 return s.uint32Tofloat32(n, x, ft, tt) 1740 } 1741 if tt.Size() == 8 { 1742 return s.uint32Tofloat64(n, x, ft, tt) 1743 } 1744 } else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() { 1745 // ft is float32 or float64, and tt is unsigned integer 1746 if ft.Size() == 4 { 1747 return s.float32ToUint32(n, x, ft, tt) 1748 } 1749 if ft.Size() == 8 { 1750 return s.float64ToUint32(n, x, ft, tt) 1751 } 1752 } 1753 } 1754 1755 if !ok { 1756 s.Fatalf("weird float conversion %v -> %v", ft, tt) 1757 } 1758 op1, op2, it := conv.op1, conv.op2, conv.intermediateType 1759 1760 if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid { 1761 // normal case, not tripping over unsigned 64 1762 if op1 == ssa.OpCopy { 1763 if op2 == ssa.OpCopy { 1764 return x 1765 } 1766 return s.newValueOrSfCall1(op2, n.Type, x) 1767 } 1768 if op2 == ssa.OpCopy { 1769 return s.newValueOrSfCall1(op1, n.Type, x) 1770 } 1771 return s.newValueOrSfCall1(op2, n.Type, s.newValueOrSfCall1(op1, types.Types[it], x)) 1772 } 1773 // Tricky 64-bit unsigned cases. 1774 if ft.IsInteger() { 1775 // tt is float32 or float64, and ft is also unsigned 1776 if tt.Size() == 4 { 1777 return s.uint64Tofloat32(n, x, ft, tt) 1778 } 1779 if tt.Size() == 8 { 1780 return s.uint64Tofloat64(n, x, ft, tt) 1781 } 1782 s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt) 1783 } 1784 // ft is float32 or float64, and tt is unsigned integer 1785 if ft.Size() == 4 { 1786 return s.float32ToUint64(n, x, ft, tt) 1787 } 1788 if ft.Size() == 8 { 1789 return s.float64ToUint64(n, x, ft, tt) 1790 } 1791 s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt) 1792 return nil 1793 } 1794 1795 if ft.IsComplex() && tt.IsComplex() { 1796 var op ssa.Op 1797 if ft.Size() == tt.Size() { 1798 switch ft.Size() { 1799 case 8: 1800 op = ssa.OpRound32F 1801 case 16: 1802 op = ssa.OpRound64F 1803 default: 1804 s.Fatalf("weird complex conversion %v -> %v", ft, tt) 1805 } 1806 } else if ft.Size() == 8 && tt.Size() == 16 { 1807 op = ssa.OpCvt32Fto64F 1808 } else if ft.Size() == 16 && tt.Size() == 8 { 1809 op = ssa.OpCvt64Fto32F 1810 } else { 1811 s.Fatalf("weird complex conversion %v -> %v", ft, tt) 1812 } 1813 ftp := floatForComplex(ft) 1814 ttp := floatForComplex(tt) 1815 return s.newValue2(ssa.OpComplexMake, tt, 1816 s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)), 1817 s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x))) 1818 } 1819 1820 s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype) 1821 return nil 1822 1823 case ODOTTYPE: 1824 res, _ := s.dottype(n, false) 1825 return res 1826 1827 // binary ops 1828 case OLT, OEQ, ONE, OLE, OGE, OGT: 1829 a := s.expr(n.Left) 1830 b := s.expr(n.Right) 1831 if n.Left.Type.IsComplex() { 1832 pt := floatForComplex(n.Left.Type) 1833 op := s.ssaOp(OEQ, pt) 1834 r := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)) 1835 i := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)) 1836 c := s.newValue2(ssa.OpAndB, types.Types[TBOOL], r, i) 1837 switch n.Op { 1838 case OEQ: 1839 return c 1840 case ONE: 1841 return s.newValue1(ssa.OpNot, types.Types[TBOOL], c) 1842 default: 1843 s.Fatalf("ordered complex compare %v", n.Op) 1844 } 1845 } 1846 if n.Left.Type.IsFloat() { 1847 return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b) 1848 } 1849 return s.newValue2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b) 1850 case OMUL: 1851 a := s.expr(n.Left) 1852 b := s.expr(n.Right) 1853 if n.Type.IsComplex() { 1854 mulop := ssa.OpMul64F 1855 addop := ssa.OpAdd64F 1856 subop := ssa.OpSub64F 1857 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1858 wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 1859 1860 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1861 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1862 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1863 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1864 1865 if pt != wt { // Widen for calculation 1866 areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal) 1867 breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal) 1868 aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag) 1869 bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag) 1870 } 1871 1872 xreal := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag)) 1873 ximag := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, bimag), s.newValueOrSfCall2(mulop, wt, aimag, breal)) 1874 1875 if pt != wt { // Narrow to store back 1876 xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal) 1877 ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag) 1878 } 1879 1880 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1881 } 1882 1883 if n.Type.IsFloat() { 1884 return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1885 } 1886 1887 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1888 1889 case ODIV: 1890 a := s.expr(n.Left) 1891 b := s.expr(n.Right) 1892 if n.Type.IsComplex() { 1893 // TODO this is not executed because the front-end substitutes a runtime call. 1894 // That probably ought to change; with modest optimization the widen/narrow 1895 // conversions could all be elided in larger expression trees. 1896 mulop := ssa.OpMul64F 1897 addop := ssa.OpAdd64F 1898 subop := ssa.OpSub64F 1899 divop := ssa.OpDiv64F 1900 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1901 wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 1902 1903 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1904 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1905 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1906 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1907 1908 if pt != wt { // Widen for calculation 1909 areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal) 1910 breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal) 1911 aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag) 1912 bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag) 1913 } 1914 1915 denom := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, breal, breal), s.newValueOrSfCall2(mulop, wt, bimag, bimag)) 1916 xreal := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag)) 1917 ximag := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, aimag, breal), s.newValueOrSfCall2(mulop, wt, areal, bimag)) 1918 1919 // TODO not sure if this is best done in wide precision or narrow 1920 // Double-rounding might be an issue. 1921 // Note that the pre-SSA implementation does the entire calculation 1922 // in wide format, so wide is compatible. 1923 xreal = s.newValueOrSfCall2(divop, wt, xreal, denom) 1924 ximag = s.newValueOrSfCall2(divop, wt, ximag, denom) 1925 1926 if pt != wt { // Narrow to store back 1927 xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal) 1928 ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag) 1929 } 1930 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1931 } 1932 if n.Type.IsFloat() { 1933 return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1934 } 1935 return s.intDivide(n, a, b) 1936 case OMOD: 1937 a := s.expr(n.Left) 1938 b := s.expr(n.Right) 1939 return s.intDivide(n, a, b) 1940 case OADD, OSUB: 1941 a := s.expr(n.Left) 1942 b := s.expr(n.Right) 1943 if n.Type.IsComplex() { 1944 pt := floatForComplex(n.Type) 1945 op := s.ssaOp(n.Op, pt) 1946 return s.newValue2(ssa.OpComplexMake, n.Type, 1947 s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)), 1948 s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))) 1949 } 1950 if n.Type.IsFloat() { 1951 return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1952 } 1953 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1954 case OAND, OOR, OXOR: 1955 a := s.expr(n.Left) 1956 b := s.expr(n.Right) 1957 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1958 case OLSH, ORSH: 1959 a := s.expr(n.Left) 1960 b := s.expr(n.Right) 1961 return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b) 1962 case OANDAND, OOROR: 1963 // To implement OANDAND (and OOROR), we introduce a 1964 // new temporary variable to hold the result. The 1965 // variable is associated with the OANDAND node in the 1966 // s.vars table (normally variables are only 1967 // associated with ONAME nodes). We convert 1968 // A && B 1969 // to 1970 // var = A 1971 // if var { 1972 // var = B 1973 // } 1974 // Using var in the subsequent block introduces the 1975 // necessary phi variable. 1976 el := s.expr(n.Left) 1977 s.vars[n] = el 1978 1979 b := s.endBlock() 1980 b.Kind = ssa.BlockIf 1981 b.SetControl(el) 1982 // In theory, we should set b.Likely here based on context. 1983 // However, gc only gives us likeliness hints 1984 // in a single place, for plain OIF statements, 1985 // and passing around context is finnicky, so don't bother for now. 1986 1987 bRight := s.f.NewBlock(ssa.BlockPlain) 1988 bResult := s.f.NewBlock(ssa.BlockPlain) 1989 if n.Op == OANDAND { 1990 b.AddEdgeTo(bRight) 1991 b.AddEdgeTo(bResult) 1992 } else if n.Op == OOROR { 1993 b.AddEdgeTo(bResult) 1994 b.AddEdgeTo(bRight) 1995 } 1996 1997 s.startBlock(bRight) 1998 er := s.expr(n.Right) 1999 s.vars[n] = er 2000 2001 b = s.endBlock() 2002 b.AddEdgeTo(bResult) 2003 2004 s.startBlock(bResult) 2005 return s.variable(n, types.Types[TBOOL]) 2006 case OCOMPLEX: 2007 r := s.expr(n.Left) 2008 i := s.expr(n.Right) 2009 return s.newValue2(ssa.OpComplexMake, n.Type, r, i) 2010 2011 // unary ops 2012 case OMINUS: 2013 a := s.expr(n.Left) 2014 if n.Type.IsComplex() { 2015 tp := floatForComplex(n.Type) 2016 negop := s.ssaOp(n.Op, tp) 2017 return s.newValue2(ssa.OpComplexMake, n.Type, 2018 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)), 2019 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a))) 2020 } 2021 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 2022 case ONOT, OCOM: 2023 a := s.expr(n.Left) 2024 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 2025 case OIMAG, OREAL: 2026 a := s.expr(n.Left) 2027 return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a) 2028 case OPLUS: 2029 return s.expr(n.Left) 2030 2031 case OADDR: 2032 return s.addr(n.Left, n.Bounded()) 2033 2034 case OINDREGSP: 2035 addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset) 2036 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 2037 2038 case OIND: 2039 p := s.exprPtr(n.Left, false, n.Pos) 2040 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 2041 2042 case ODOT: 2043 t := n.Left.Type 2044 if canSSAType(t) { 2045 v := s.expr(n.Left) 2046 return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v) 2047 } 2048 if n.Left.Op == OSTRUCTLIT { 2049 // All literals with nonzero fields have already been 2050 // rewritten during walk. Any that remain are just T{} 2051 // or equivalents. Use the zero value. 2052 if !iszero(n.Left) { 2053 Fatalf("literal with nonzero value in SSA: %v", n.Left) 2054 } 2055 return s.zeroVal(n.Type) 2056 } 2057 p := s.addr(n, false) 2058 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 2059 2060 case ODOTPTR: 2061 p := s.exprPtr(n.Left, false, n.Pos) 2062 p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type), n.Xoffset, p) 2063 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 2064 2065 case OINDEX: 2066 switch { 2067 case n.Left.Type.IsString(): 2068 if n.Bounded() && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) { 2069 // Replace "abc"[1] with 'b'. 2070 // Delayed until now because "abc"[1] is not an ideal constant. 2071 // See test/fixedbugs/issue11370.go. 2072 return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(n.Left.Val().U.(string)[n.Right.Int64()]))) 2073 } 2074 a := s.expr(n.Left) 2075 i := s.expr(n.Right) 2076 i = s.extendIndex(i, panicindex) 2077 if !n.Bounded() { 2078 len := s.newValue1(ssa.OpStringLen, types.Types[TINT], a) 2079 s.boundsCheck(i, len) 2080 } 2081 ptrtyp := s.f.Config.Types.BytePtr 2082 ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a) 2083 if Isconst(n.Right, CTINT) { 2084 ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr) 2085 } else { 2086 ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i) 2087 } 2088 return s.newValue2(ssa.OpLoad, types.Types[TUINT8], ptr, s.mem()) 2089 case n.Left.Type.IsSlice(): 2090 p := s.addr(n, false) 2091 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 2092 case n.Left.Type.IsArray(): 2093 if bound := n.Left.Type.NumElem(); bound <= 1 { 2094 // SSA can handle arrays of length at most 1. 2095 a := s.expr(n.Left) 2096 i := s.expr(n.Right) 2097 if bound == 0 { 2098 // Bounds check will never succeed. Might as well 2099 // use constants for the bounds check. 2100 z := s.constInt(types.Types[TINT], 0) 2101 s.boundsCheck(z, z) 2102 // The return value won't be live, return junk. 2103 return s.newValue0(ssa.OpUnknown, n.Type) 2104 } 2105 i = s.extendIndex(i, panicindex) 2106 if !n.Bounded() { 2107 s.boundsCheck(i, s.constInt(types.Types[TINT], bound)) 2108 } 2109 return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a) 2110 } 2111 p := s.addr(n, false) 2112 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 2113 default: 2114 s.Fatalf("bad type for index %v", n.Left.Type) 2115 return nil 2116 } 2117 2118 case OLEN, OCAP: 2119 switch { 2120 case n.Left.Type.IsSlice(): 2121 op := ssa.OpSliceLen 2122 if n.Op == OCAP { 2123 op = ssa.OpSliceCap 2124 } 2125 return s.newValue1(op, types.Types[TINT], s.expr(n.Left)) 2126 case n.Left.Type.IsString(): // string; not reachable for OCAP 2127 return s.newValue1(ssa.OpStringLen, types.Types[TINT], s.expr(n.Left)) 2128 case n.Left.Type.IsMap(), n.Left.Type.IsChan(): 2129 return s.referenceTypeBuiltin(n, s.expr(n.Left)) 2130 default: // array 2131 return s.constInt(types.Types[TINT], n.Left.Type.NumElem()) 2132 } 2133 2134 case OSPTR: 2135 a := s.expr(n.Left) 2136 if n.Left.Type.IsSlice() { 2137 return s.newValue1(ssa.OpSlicePtr, n.Type, a) 2138 } else { 2139 return s.newValue1(ssa.OpStringPtr, n.Type, a) 2140 } 2141 2142 case OITAB: 2143 a := s.expr(n.Left) 2144 return s.newValue1(ssa.OpITab, n.Type, a) 2145 2146 case OIDATA: 2147 a := s.expr(n.Left) 2148 return s.newValue1(ssa.OpIData, n.Type, a) 2149 2150 case OEFACE: 2151 tab := s.expr(n.Left) 2152 data := s.expr(n.Right) 2153 return s.newValue2(ssa.OpIMake, n.Type, tab, data) 2154 2155 case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR: 2156 v := s.expr(n.Left) 2157 var i, j, k *ssa.Value 2158 low, high, max := n.SliceBounds() 2159 if low != nil { 2160 i = s.extendIndex(s.expr(low), panicslice) 2161 } 2162 if high != nil { 2163 j = s.extendIndex(s.expr(high), panicslice) 2164 } 2165 if max != nil { 2166 k = s.extendIndex(s.expr(max), panicslice) 2167 } 2168 p, l, c := s.slice(n.Left.Type, v, i, j, k) 2169 return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c) 2170 2171 case OSLICESTR: 2172 v := s.expr(n.Left) 2173 var i, j *ssa.Value 2174 low, high, _ := n.SliceBounds() 2175 if low != nil { 2176 i = s.extendIndex(s.expr(low), panicslice) 2177 } 2178 if high != nil { 2179 j = s.extendIndex(s.expr(high), panicslice) 2180 } 2181 p, l, _ := s.slice(n.Left.Type, v, i, j, nil) 2182 return s.newValue2(ssa.OpStringMake, n.Type, p, l) 2183 2184 case OCALLFUNC: 2185 if isIntrinsicCall(n) { 2186 return s.intrinsicCall(n) 2187 } 2188 fallthrough 2189 2190 case OCALLINTER, OCALLMETH: 2191 a := s.call(n, callNormal) 2192 return s.newValue2(ssa.OpLoad, n.Type, a, s.mem()) 2193 2194 case OGETG: 2195 return s.newValue1(ssa.OpGetG, n.Type, s.mem()) 2196 2197 case OAPPEND: 2198 return s.append(n, false) 2199 2200 case OSTRUCTLIT, OARRAYLIT: 2201 // All literals with nonzero fields have already been 2202 // rewritten during walk. Any that remain are just T{} 2203 // or equivalents. Use the zero value. 2204 if !iszero(n) { 2205 Fatalf("literal with nonzero value in SSA: %v", n) 2206 } 2207 return s.zeroVal(n.Type) 2208 2209 default: 2210 s.Fatalf("unhandled expr %v", n.Op) 2211 return nil 2212 } 2213 } 2214 2215 // append converts an OAPPEND node to SSA. 2216 // If inplace is false, it converts the OAPPEND expression n to an ssa.Value, 2217 // adds it to s, and returns the Value. 2218 // If inplace is true, it writes the result of the OAPPEND expression n 2219 // back to the slice being appended to, and returns nil. 2220 // inplace MUST be set to false if the slice can be SSA'd. 2221 func (s *state) append(n *Node, inplace bool) *ssa.Value { 2222 // If inplace is false, process as expression "append(s, e1, e2, e3)": 2223 // 2224 // ptr, len, cap := s 2225 // newlen := len + 3 2226 // if newlen > cap { 2227 // ptr, len, cap = growslice(s, newlen) 2228 // newlen = len + 3 // recalculate to avoid a spill 2229 // } 2230 // // with write barriers, if needed: 2231 // *(ptr+len) = e1 2232 // *(ptr+len+1) = e2 2233 // *(ptr+len+2) = e3 2234 // return makeslice(ptr, newlen, cap) 2235 // 2236 // 2237 // If inplace is true, process as statement "s = append(s, e1, e2, e3)": 2238 // 2239 // a := &s 2240 // ptr, len, cap := s 2241 // newlen := len + 3 2242 // if newlen > cap { 2243 // newptr, len, newcap = growslice(ptr, len, cap, newlen) 2244 // vardef(a) // if necessary, advise liveness we are writing a new a 2245 // *a.cap = newcap // write before ptr to avoid a spill 2246 // *a.ptr = newptr // with write barrier 2247 // } 2248 // newlen = len + 3 // recalculate to avoid a spill 2249 // *a.len = newlen 2250 // // with write barriers, if needed: 2251 // *(ptr+len) = e1 2252 // *(ptr+len+1) = e2 2253 // *(ptr+len+2) = e3 2254 2255 et := n.Type.Elem() 2256 pt := types.NewPtr(et) 2257 2258 // Evaluate slice 2259 sn := n.List.First() // the slice node is the first in the list 2260 2261 var slice, addr *ssa.Value 2262 if inplace { 2263 addr = s.addr(sn, false) 2264 slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 2265 } else { 2266 slice = s.expr(sn) 2267 } 2268 2269 // Allocate new blocks 2270 grow := s.f.NewBlock(ssa.BlockPlain) 2271 assign := s.f.NewBlock(ssa.BlockPlain) 2272 2273 // Decide if we need to grow 2274 nargs := int64(n.List.Len() - 1) 2275 p := s.newValue1(ssa.OpSlicePtr, pt, slice) 2276 l := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) 2277 c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice) 2278 nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs)) 2279 2280 cmp := s.newValue2(s.ssaOp(OGT, types.Types[TINT]), types.Types[TBOOL], nl, c) 2281 s.vars[&ptrVar] = p 2282 2283 if !inplace { 2284 s.vars[&newlenVar] = nl 2285 s.vars[&capVar] = c 2286 } else { 2287 s.vars[&lenVar] = l 2288 } 2289 2290 b := s.endBlock() 2291 b.Kind = ssa.BlockIf 2292 b.Likely = ssa.BranchUnlikely 2293 b.SetControl(cmp) 2294 b.AddEdgeTo(grow) 2295 b.AddEdgeTo(assign) 2296 2297 // Call growslice 2298 s.startBlock(grow) 2299 taddr := s.expr(n.Left) 2300 r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[TINT], types.Types[TINT]}, taddr, p, l, c, nl) 2301 2302 if inplace { 2303 if sn.Op == ONAME && sn.Class() != PEXTERN { 2304 // Tell liveness we're about to build a new slice 2305 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem()) 2306 } 2307 capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_cap), addr) 2308 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], capaddr, r[2], s.mem()) 2309 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, pt, addr, r[0], s.mem()) 2310 // load the value we just stored to avoid having to spill it 2311 s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem()) 2312 s.vars[&lenVar] = r[1] // avoid a spill in the fast path 2313 } else { 2314 s.vars[&ptrVar] = r[0] 2315 s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs)) 2316 s.vars[&capVar] = r[2] 2317 } 2318 2319 b = s.endBlock() 2320 b.AddEdgeTo(assign) 2321 2322 // assign new elements to slots 2323 s.startBlock(assign) 2324 2325 if inplace { 2326 l = s.variable(&lenVar, types.Types[TINT]) // generates phi for len 2327 nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs)) 2328 lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_nel), addr) 2329 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenaddr, nl, s.mem()) 2330 } 2331 2332 // Evaluate args 2333 type argRec struct { 2334 // if store is true, we're appending the value v. If false, we're appending the 2335 // value at *v. 2336 v *ssa.Value 2337 store bool 2338 } 2339 args := make([]argRec, 0, nargs) 2340 for _, n := range n.List.Slice()[1:] { 2341 if canSSAType(n.Type) { 2342 args = append(args, argRec{v: s.expr(n), store: true}) 2343 } else { 2344 v := s.addr(n, false) 2345 args = append(args, argRec{v: v}) 2346 } 2347 } 2348 2349 p = s.variable(&ptrVar, pt) // generates phi for ptr 2350 if !inplace { 2351 nl = s.variable(&newlenVar, types.Types[TINT]) // generates phi for nl 2352 c = s.variable(&capVar, types.Types[TINT]) // generates phi for cap 2353 } 2354 p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l) 2355 for i, arg := range args { 2356 addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[TINT], int64(i))) 2357 if arg.store { 2358 s.storeType(et, addr, arg.v, 0) 2359 } else { 2360 store := s.newValue3I(ssa.OpMove, types.TypeMem, et.Size(), addr, arg.v, s.mem()) 2361 store.Aux = et 2362 s.vars[&memVar] = store 2363 } 2364 } 2365 2366 delete(s.vars, &ptrVar) 2367 if inplace { 2368 delete(s.vars, &lenVar) 2369 return nil 2370 } 2371 delete(s.vars, &newlenVar) 2372 delete(s.vars, &capVar) 2373 // make result 2374 return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c) 2375 } 2376 2377 // condBranch evaluates the boolean expression cond and branches to yes 2378 // if cond is true and no if cond is false. 2379 // This function is intended to handle && and || better than just calling 2380 // s.expr(cond) and branching on the result. 2381 func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) { 2382 switch cond.Op { 2383 case OANDAND: 2384 mid := s.f.NewBlock(ssa.BlockPlain) 2385 s.stmtList(cond.Ninit) 2386 s.condBranch(cond.Left, mid, no, max8(likely, 0)) 2387 s.startBlock(mid) 2388 s.condBranch(cond.Right, yes, no, likely) 2389 return 2390 // Note: if likely==1, then both recursive calls pass 1. 2391 // If likely==-1, then we don't have enough information to decide 2392 // whether the first branch is likely or not. So we pass 0 for 2393 // the likeliness of the first branch. 2394 // TODO: have the frontend give us branch prediction hints for 2395 // OANDAND and OOROR nodes (if it ever has such info). 2396 case OOROR: 2397 mid := s.f.NewBlock(ssa.BlockPlain) 2398 s.stmtList(cond.Ninit) 2399 s.condBranch(cond.Left, yes, mid, min8(likely, 0)) 2400 s.startBlock(mid) 2401 s.condBranch(cond.Right, yes, no, likely) 2402 return 2403 // Note: if likely==-1, then both recursive calls pass -1. 2404 // If likely==1, then we don't have enough info to decide 2405 // the likelihood of the first branch. 2406 case ONOT: 2407 s.stmtList(cond.Ninit) 2408 s.condBranch(cond.Left, no, yes, -likely) 2409 return 2410 } 2411 c := s.expr(cond) 2412 b := s.endBlock() 2413 b.Kind = ssa.BlockIf 2414 b.SetControl(c) 2415 b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness 2416 b.AddEdgeTo(yes) 2417 b.AddEdgeTo(no) 2418 } 2419 2420 type skipMask uint8 2421 2422 const ( 2423 skipPtr skipMask = 1 << iota 2424 skipLen 2425 skipCap 2426 ) 2427 2428 // assign does left = right. 2429 // Right has already been evaluated to ssa, left has not. 2430 // If deref is true, then we do left = *right instead (and right has already been nil-checked). 2431 // If deref is true and right == nil, just do left = 0. 2432 // skip indicates assignments (at the top level) that can be avoided. 2433 func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) { 2434 if left.Op == ONAME && isblank(left) { 2435 return 2436 } 2437 t := left.Type 2438 dowidth(t) 2439 if s.canSSA(left) { 2440 if deref { 2441 s.Fatalf("can SSA LHS %v but not RHS %s", left, right) 2442 } 2443 if left.Op == ODOT { 2444 // We're assigning to a field of an ssa-able value. 2445 // We need to build a new structure with the new value for the 2446 // field we're assigning and the old values for the other fields. 2447 // For instance: 2448 // type T struct {a, b, c int} 2449 // var T x 2450 // x.b = 5 2451 // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c} 2452 2453 // Grab information about the structure type. 2454 t := left.Left.Type 2455 nf := t.NumFields() 2456 idx := fieldIdx(left) 2457 2458 // Grab old value of structure. 2459 old := s.expr(left.Left) 2460 2461 // Make new structure. 2462 new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t) 2463 2464 // Add fields as args. 2465 for i := 0; i < nf; i++ { 2466 if i == idx { 2467 new.AddArg(right) 2468 } else { 2469 new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old)) 2470 } 2471 } 2472 2473 // Recursively assign the new value we've made to the base of the dot op. 2474 s.assign(left.Left, new, false, 0) 2475 // TODO: do we need to update named values here? 2476 return 2477 } 2478 if left.Op == OINDEX && left.Left.Type.IsArray() { 2479 // We're assigning to an element of an ssa-able array. 2480 // a[i] = v 2481 t := left.Left.Type 2482 n := t.NumElem() 2483 2484 i := s.expr(left.Right) // index 2485 if n == 0 { 2486 // The bounds check must fail. Might as well 2487 // ignore the actual index and just use zeros. 2488 z := s.constInt(types.Types[TINT], 0) 2489 s.boundsCheck(z, z) 2490 return 2491 } 2492 if n != 1 { 2493 s.Fatalf("assigning to non-1-length array") 2494 } 2495 // Rewrite to a = [1]{v} 2496 i = s.extendIndex(i, panicindex) 2497 s.boundsCheck(i, s.constInt(types.Types[TINT], 1)) 2498 v := s.newValue1(ssa.OpArrayMake1, t, right) 2499 s.assign(left.Left, v, false, 0) 2500 return 2501 } 2502 // Update variable assignment. 2503 s.vars[left] = right 2504 s.addNamedValue(left, right) 2505 return 2506 } 2507 // Left is not ssa-able. Compute its address. 2508 addr := s.addr(left, false) 2509 if left.Op == ONAME && left.Class() != PEXTERN && skip == 0 { 2510 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, left, s.mem()) 2511 } 2512 if isReflectHeaderDataField(left) { 2513 // Package unsafe's documentation says storing pointers into 2514 // reflect.SliceHeader and reflect.StringHeader's Data fields 2515 // is valid, even though they have type uintptr (#19168). 2516 // Mark it pointer type to signal the writebarrier pass to 2517 // insert a write barrier. 2518 t = types.Types[TUNSAFEPTR] 2519 } 2520 if deref { 2521 // Treat as a mem->mem move. 2522 var store *ssa.Value 2523 if right == nil { 2524 store = s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), addr, s.mem()) 2525 } else { 2526 store = s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), addr, right, s.mem()) 2527 } 2528 store.Aux = t 2529 s.vars[&memVar] = store 2530 return 2531 } 2532 // Treat as a store. 2533 s.storeType(t, addr, right, skip) 2534 } 2535 2536 // zeroVal returns the zero value for type t. 2537 func (s *state) zeroVal(t *types.Type) *ssa.Value { 2538 switch { 2539 case t.IsInteger(): 2540 switch t.Size() { 2541 case 1: 2542 return s.constInt8(t, 0) 2543 case 2: 2544 return s.constInt16(t, 0) 2545 case 4: 2546 return s.constInt32(t, 0) 2547 case 8: 2548 return s.constInt64(t, 0) 2549 default: 2550 s.Fatalf("bad sized integer type %v", t) 2551 } 2552 case t.IsFloat(): 2553 switch t.Size() { 2554 case 4: 2555 return s.constFloat32(t, 0) 2556 case 8: 2557 return s.constFloat64(t, 0) 2558 default: 2559 s.Fatalf("bad sized float type %v", t) 2560 } 2561 case t.IsComplex(): 2562 switch t.Size() { 2563 case 8: 2564 z := s.constFloat32(types.Types[TFLOAT32], 0) 2565 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2566 case 16: 2567 z := s.constFloat64(types.Types[TFLOAT64], 0) 2568 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2569 default: 2570 s.Fatalf("bad sized complex type %v", t) 2571 } 2572 2573 case t.IsString(): 2574 return s.constEmptyString(t) 2575 case t.IsPtrShaped(): 2576 return s.constNil(t) 2577 case t.IsBoolean(): 2578 return s.constBool(false) 2579 case t.IsInterface(): 2580 return s.constInterface(t) 2581 case t.IsSlice(): 2582 return s.constSlice(t) 2583 case t.IsStruct(): 2584 n := t.NumFields() 2585 v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t) 2586 for i := 0; i < n; i++ { 2587 v.AddArg(s.zeroVal(t.FieldType(i))) 2588 } 2589 return v 2590 case t.IsArray(): 2591 switch t.NumElem() { 2592 case 0: 2593 return s.entryNewValue0(ssa.OpArrayMake0, t) 2594 case 1: 2595 return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem())) 2596 } 2597 } 2598 s.Fatalf("zero for type %v not implemented", t) 2599 return nil 2600 } 2601 2602 type callKind int8 2603 2604 const ( 2605 callNormal callKind = iota 2606 callDefer 2607 callGo 2608 callGosecure 2609 ) 2610 2611 type sfRtCallDef struct { 2612 rtfn *obj.LSym 2613 rtype types.EType 2614 } 2615 2616 var softFloatOps map[ssa.Op]sfRtCallDef 2617 2618 func softfloatInit() { 2619 // Some of these operations get transformed by sfcall. 2620 softFloatOps = map[ssa.Op]sfRtCallDef{ 2621 ssa.OpAdd32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32}, 2622 ssa.OpAdd64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64}, 2623 ssa.OpSub32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32}, 2624 ssa.OpSub64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64}, 2625 ssa.OpMul32F: sfRtCallDef{sysfunc("fmul32"), TFLOAT32}, 2626 ssa.OpMul64F: sfRtCallDef{sysfunc("fmul64"), TFLOAT64}, 2627 ssa.OpDiv32F: sfRtCallDef{sysfunc("fdiv32"), TFLOAT32}, 2628 ssa.OpDiv64F: sfRtCallDef{sysfunc("fdiv64"), TFLOAT64}, 2629 2630 ssa.OpEq64F: sfRtCallDef{sysfunc("feq64"), TBOOL}, 2631 ssa.OpEq32F: sfRtCallDef{sysfunc("feq32"), TBOOL}, 2632 ssa.OpNeq64F: sfRtCallDef{sysfunc("feq64"), TBOOL}, 2633 ssa.OpNeq32F: sfRtCallDef{sysfunc("feq32"), TBOOL}, 2634 ssa.OpLess64F: sfRtCallDef{sysfunc("fgt64"), TBOOL}, 2635 ssa.OpLess32F: sfRtCallDef{sysfunc("fgt32"), TBOOL}, 2636 ssa.OpGreater64F: sfRtCallDef{sysfunc("fgt64"), TBOOL}, 2637 ssa.OpGreater32F: sfRtCallDef{sysfunc("fgt32"), TBOOL}, 2638 ssa.OpLeq64F: sfRtCallDef{sysfunc("fge64"), TBOOL}, 2639 ssa.OpLeq32F: sfRtCallDef{sysfunc("fge32"), TBOOL}, 2640 ssa.OpGeq64F: sfRtCallDef{sysfunc("fge64"), TBOOL}, 2641 ssa.OpGeq32F: sfRtCallDef{sysfunc("fge32"), TBOOL}, 2642 2643 ssa.OpCvt32to32F: sfRtCallDef{sysfunc("fint32to32"), TFLOAT32}, 2644 ssa.OpCvt32Fto32: sfRtCallDef{sysfunc("f32toint32"), TINT32}, 2645 ssa.OpCvt64to32F: sfRtCallDef{sysfunc("fint64to32"), TFLOAT32}, 2646 ssa.OpCvt32Fto64: sfRtCallDef{sysfunc("f32toint64"), TINT64}, 2647 ssa.OpCvt64Uto32F: sfRtCallDef{sysfunc("fuint64to32"), TFLOAT32}, 2648 ssa.OpCvt32Fto64U: sfRtCallDef{sysfunc("f32touint64"), TUINT64}, 2649 ssa.OpCvt32to64F: sfRtCallDef{sysfunc("fint32to64"), TFLOAT64}, 2650 ssa.OpCvt64Fto32: sfRtCallDef{sysfunc("f64toint32"), TINT32}, 2651 ssa.OpCvt64to64F: sfRtCallDef{sysfunc("fint64to64"), TFLOAT64}, 2652 ssa.OpCvt64Fto64: sfRtCallDef{sysfunc("f64toint64"), TINT64}, 2653 ssa.OpCvt64Uto64F: sfRtCallDef{sysfunc("fuint64to64"), TFLOAT64}, 2654 ssa.OpCvt64Fto64U: sfRtCallDef{sysfunc("f64touint64"), TUINT64}, 2655 ssa.OpCvt32Fto64F: sfRtCallDef{sysfunc("f32to64"), TFLOAT64}, 2656 ssa.OpCvt64Fto32F: sfRtCallDef{sysfunc("f64to32"), TFLOAT32}, 2657 } 2658 } 2659 2660 // TODO: do not emit sfcall if operation can be optimized to constant in later 2661 // opt phase 2662 func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) { 2663 if callDef, ok := softFloatOps[op]; ok { 2664 switch op { 2665 case ssa.OpLess32F, 2666 ssa.OpLess64F, 2667 ssa.OpLeq32F, 2668 ssa.OpLeq64F: 2669 args[0], args[1] = args[1], args[0] 2670 case ssa.OpSub32F, 2671 ssa.OpSub64F: 2672 args[1] = s.newValue1(s.ssaOp(OMINUS, types.Types[callDef.rtype]), args[1].Type, args[1]) 2673 } 2674 2675 result := s.rtcall(callDef.rtfn, true, []*types.Type{types.Types[callDef.rtype]}, args...)[0] 2676 if op == ssa.OpNeq32F || op == ssa.OpNeq64F { 2677 result = s.newValue1(ssa.OpNot, result.Type, result) 2678 } 2679 return result, true 2680 } 2681 return nil, false 2682 } 2683 2684 var intrinsics map[intrinsicKey]intrinsicBuilder 2685 2686 // An intrinsicBuilder converts a call node n into an ssa value that 2687 // implements that call as an intrinsic. args is a list of arguments to the func. 2688 type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value 2689 2690 type intrinsicKey struct { 2691 arch *sys.Arch 2692 pkg string 2693 fn string 2694 } 2695 2696 func init() { 2697 intrinsics = map[intrinsicKey]intrinsicBuilder{} 2698 2699 var all []*sys.Arch 2700 var p4 []*sys.Arch 2701 var p8 []*sys.Arch 2702 for _, a := range sys.Archs { 2703 all = append(all, a) 2704 if a.PtrSize == 4 { 2705 p4 = append(p4, a) 2706 } else { 2707 p8 = append(p8, a) 2708 } 2709 } 2710 2711 // add adds the intrinsic b for pkg.fn for the given list of architectures. 2712 add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) { 2713 for _, a := range archs { 2714 intrinsics[intrinsicKey{a, pkg, fn}] = b 2715 } 2716 } 2717 // addF does the same as add but operates on architecture families. 2718 addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) { 2719 m := 0 2720 for _, f := range archFamilies { 2721 if f >= 32 { 2722 panic("too many architecture families") 2723 } 2724 m |= 1 << uint(f) 2725 } 2726 for _, a := range all { 2727 if m>>uint(a.Family)&1 != 0 { 2728 intrinsics[intrinsicKey{a, pkg, fn}] = b 2729 } 2730 } 2731 } 2732 // alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists. 2733 alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) { 2734 for _, a := range archs { 2735 if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok { 2736 intrinsics[intrinsicKey{a, pkg, fn}] = b 2737 } 2738 } 2739 } 2740 2741 /******** runtime ********/ 2742 if !instrumenting { 2743 add("runtime", "slicebytetostringtmp", 2744 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2745 // Compiler frontend optimizations emit OARRAYBYTESTRTMP nodes 2746 // for the backend instead of slicebytetostringtmp calls 2747 // when not instrumenting. 2748 slice := args[0] 2749 ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice) 2750 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) 2751 return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) 2752 }, 2753 all...) 2754 } 2755 add("runtime", "KeepAlive", 2756 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2757 data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0]) 2758 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem()) 2759 return nil 2760 }, 2761 all...) 2762 add("runtime", "getclosureptr", 2763 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2764 return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr) 2765 }, 2766 all...) 2767 2768 addF("runtime", "getcallerpc", 2769 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2770 return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr) 2771 }, sys.AMD64, sys.I386) 2772 2773 add("runtime", "getcallersp", 2774 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2775 return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr) 2776 }, 2777 all...) 2778 2779 /******** runtime/internal/sys ********/ 2780 addF("runtime/internal/sys", "Ctz32", 2781 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2782 return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0]) 2783 }, 2784 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 2785 addF("runtime/internal/sys", "Ctz64", 2786 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2787 return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0]) 2788 }, 2789 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 2790 addF("runtime/internal/sys", "Bswap32", 2791 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2792 return s.newValue1(ssa.OpBswap32, types.Types[TUINT32], args[0]) 2793 }, 2794 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X) 2795 addF("runtime/internal/sys", "Bswap64", 2796 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2797 return s.newValue1(ssa.OpBswap64, types.Types[TUINT64], args[0]) 2798 }, 2799 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X) 2800 2801 /******** runtime/internal/atomic ********/ 2802 addF("runtime/internal/atomic", "Load", 2803 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2804 v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem()) 2805 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2806 return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) 2807 }, 2808 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) 2809 addF("runtime/internal/atomic", "Load64", 2810 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2811 v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem()) 2812 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2813 return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) 2814 }, 2815 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) 2816 addF("runtime/internal/atomic", "Loadp", 2817 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2818 v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem()) 2819 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2820 return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v) 2821 }, 2822 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) 2823 2824 addF("runtime/internal/atomic", "Store", 2825 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2826 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem()) 2827 return nil 2828 }, 2829 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) 2830 addF("runtime/internal/atomic", "Store64", 2831 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2832 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem()) 2833 return nil 2834 }, 2835 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) 2836 addF("runtime/internal/atomic", "StorepNoWB", 2837 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2838 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem()) 2839 return nil 2840 }, 2841 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64) 2842 2843 addF("runtime/internal/atomic", "Xchg", 2844 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2845 v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem()) 2846 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2847 return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) 2848 }, 2849 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) 2850 addF("runtime/internal/atomic", "Xchg64", 2851 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2852 v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem()) 2853 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2854 return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) 2855 }, 2856 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) 2857 2858 addF("runtime/internal/atomic", "Xadd", 2859 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2860 v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem()) 2861 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2862 return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) 2863 }, 2864 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) 2865 addF("runtime/internal/atomic", "Xadd64", 2866 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2867 v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem()) 2868 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2869 return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) 2870 }, 2871 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) 2872 2873 addF("runtime/internal/atomic", "Cas", 2874 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2875 v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) 2876 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2877 return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) 2878 }, 2879 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) 2880 addF("runtime/internal/atomic", "Cas64", 2881 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2882 v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) 2883 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2884 return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) 2885 }, 2886 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) 2887 2888 addF("runtime/internal/atomic", "And8", 2889 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2890 s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem()) 2891 return nil 2892 }, 2893 sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64) 2894 addF("runtime/internal/atomic", "Or8", 2895 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2896 s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem()) 2897 return nil 2898 }, 2899 sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64) 2900 2901 alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...) 2902 alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...) 2903 alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...) 2904 alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...) 2905 alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...) 2906 alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...) 2907 alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...) 2908 alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...) 2909 alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...) 2910 alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...) 2911 alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...) 2912 alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...) 2913 alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...) 2914 alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...) 2915 alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...) 2916 alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...) 2917 2918 /******** math ********/ 2919 addF("math", "Sqrt", 2920 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2921 return s.newValue1(ssa.OpSqrt, types.Types[TFLOAT64], args[0]) 2922 }, 2923 sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X) 2924 addF("math", "Trunc", 2925 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2926 return s.newValue1(ssa.OpTrunc, types.Types[TFLOAT64], args[0]) 2927 }, 2928 sys.PPC64, sys.S390X) 2929 addF("math", "Ceil", 2930 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2931 return s.newValue1(ssa.OpCeil, types.Types[TFLOAT64], args[0]) 2932 }, 2933 sys.PPC64, sys.S390X) 2934 addF("math", "Floor", 2935 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2936 return s.newValue1(ssa.OpFloor, types.Types[TFLOAT64], args[0]) 2937 }, 2938 sys.PPC64, sys.S390X) 2939 addF("math", "Round", 2940 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2941 return s.newValue1(ssa.OpRound, types.Types[TFLOAT64], args[0]) 2942 }, 2943 sys.S390X) 2944 addF("math", "RoundToEven", 2945 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2946 return s.newValue1(ssa.OpRoundToEven, types.Types[TFLOAT64], args[0]) 2947 }, 2948 sys.S390X) 2949 addF("math", "Abs", 2950 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2951 return s.newValue1(ssa.OpAbs, types.Types[TFLOAT64], args[0]) 2952 }, 2953 sys.PPC64) 2954 addF("math", "Copysign", 2955 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2956 return s.newValue2(ssa.OpCopysign, types.Types[TFLOAT64], args[0], args[1]) 2957 }, 2958 sys.PPC64) 2959 2960 makeRoundAMD64 := func(op ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2961 return func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2962 aux := syslook("support_sse41").Sym.Linksym() 2963 addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), aux, s.sb) 2964 v := s.newValue2(ssa.OpLoad, types.Types[TBOOL], addr, s.mem()) 2965 b := s.endBlock() 2966 b.Kind = ssa.BlockIf 2967 b.SetControl(v) 2968 bTrue := s.f.NewBlock(ssa.BlockPlain) 2969 bFalse := s.f.NewBlock(ssa.BlockPlain) 2970 bEnd := s.f.NewBlock(ssa.BlockPlain) 2971 b.AddEdgeTo(bTrue) 2972 b.AddEdgeTo(bFalse) 2973 b.Likely = ssa.BranchLikely // most machines have sse4.1 nowadays 2974 2975 // We have the intrinsic - use it directly. 2976 s.startBlock(bTrue) 2977 s.vars[n] = s.newValue1(op, types.Types[TFLOAT64], args[0]) 2978 s.endBlock().AddEdgeTo(bEnd) 2979 2980 // Call the pure Go version. 2981 s.startBlock(bFalse) 2982 a := s.call(n, callNormal) 2983 s.vars[n] = s.newValue2(ssa.OpLoad, types.Types[TFLOAT64], a, s.mem()) 2984 s.endBlock().AddEdgeTo(bEnd) 2985 2986 // Merge results. 2987 s.startBlock(bEnd) 2988 return s.variable(n, types.Types[TFLOAT64]) 2989 } 2990 } 2991 addF("math", "RoundToEven", 2992 makeRoundAMD64(ssa.OpRoundToEven), 2993 sys.AMD64) 2994 addF("math", "Floor", 2995 makeRoundAMD64(ssa.OpFloor), 2996 sys.AMD64) 2997 addF("math", "Ceil", 2998 makeRoundAMD64(ssa.OpCeil), 2999 sys.AMD64) 3000 addF("math", "Trunc", 3001 makeRoundAMD64(ssa.OpTrunc), 3002 sys.AMD64) 3003 3004 /******** math/bits ********/ 3005 addF("math/bits", "TrailingZeros64", 3006 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3007 return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0]) 3008 }, 3009 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3010 addF("math/bits", "TrailingZeros32", 3011 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3012 return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0]) 3013 }, 3014 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3015 addF("math/bits", "TrailingZeros16", 3016 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3017 x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0]) 3018 c := s.constInt32(types.Types[TUINT32], 1<<16) 3019 y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c) 3020 return s.newValue1(ssa.OpCtz32, types.Types[TINT], y) 3021 }, 3022 sys.ARM, sys.MIPS) 3023 addF("math/bits", "TrailingZeros16", 3024 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3025 x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0]) 3026 c := s.constInt64(types.Types[TUINT64], 1<<16) 3027 y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c) 3028 return s.newValue1(ssa.OpCtz64, types.Types[TINT], y) 3029 }, 3030 sys.AMD64, sys.ARM64, sys.S390X) 3031 addF("math/bits", "TrailingZeros8", 3032 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3033 x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0]) 3034 c := s.constInt32(types.Types[TUINT32], 1<<8) 3035 y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c) 3036 return s.newValue1(ssa.OpCtz32, types.Types[TINT], y) 3037 }, 3038 sys.ARM, sys.MIPS) 3039 addF("math/bits", "TrailingZeros8", 3040 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3041 x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0]) 3042 c := s.constInt64(types.Types[TUINT64], 1<<8) 3043 y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c) 3044 return s.newValue1(ssa.OpCtz64, types.Types[TINT], y) 3045 }, 3046 sys.AMD64, sys.ARM64, sys.S390X) 3047 alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...) 3048 alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...) 3049 // ReverseBytes inlines correctly, no need to intrinsify it. 3050 // ReverseBytes16 lowers to a rotate, no need for anything special here. 3051 addF("math/bits", "Len64", 3052 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3053 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0]) 3054 }, 3055 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3056 addF("math/bits", "Len32", 3057 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3058 if s.config.PtrSize == 4 { 3059 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0]) 3060 } 3061 x := s.newValue1(ssa.OpZeroExt32to64, types.Types[TUINT64], args[0]) 3062 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) 3063 }, 3064 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3065 addF("math/bits", "Len16", 3066 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3067 if s.config.PtrSize == 4 { 3068 x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0]) 3069 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x) 3070 } 3071 x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0]) 3072 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) 3073 }, 3074 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3075 // Note: disabled on AMD64 because the Go code is faster! 3076 addF("math/bits", "Len8", 3077 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3078 if s.config.PtrSize == 4 { 3079 x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0]) 3080 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x) 3081 } 3082 x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0]) 3083 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) 3084 }, 3085 sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3086 3087 addF("math/bits", "Len", 3088 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3089 if s.config.PtrSize == 4 { 3090 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0]) 3091 } 3092 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0]) 3093 }, 3094 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 3095 // LeadingZeros is handled because it trivially calls Len. 3096 addF("math/bits", "Reverse64", 3097 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3098 return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0]) 3099 }, 3100 sys.ARM64) 3101 addF("math/bits", "Reverse32", 3102 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3103 return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0]) 3104 }, 3105 sys.ARM64) 3106 addF("math/bits", "Reverse16", 3107 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3108 return s.newValue1(ssa.OpBitRev16, types.Types[TINT], args[0]) 3109 }, 3110 sys.ARM64) 3111 addF("math/bits", "Reverse8", 3112 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3113 return s.newValue1(ssa.OpBitRev8, types.Types[TINT], args[0]) 3114 }, 3115 sys.ARM64) 3116 addF("math/bits", "Reverse", 3117 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3118 if s.config.PtrSize == 4 { 3119 return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0]) 3120 } 3121 return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0]) 3122 }, 3123 sys.ARM64) 3124 makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3125 return func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3126 aux := syslook("support_popcnt").Sym.Linksym() 3127 addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), aux, s.sb) 3128 v := s.newValue2(ssa.OpLoad, types.Types[TBOOL], addr, s.mem()) 3129 b := s.endBlock() 3130 b.Kind = ssa.BlockIf 3131 b.SetControl(v) 3132 bTrue := s.f.NewBlock(ssa.BlockPlain) 3133 bFalse := s.f.NewBlock(ssa.BlockPlain) 3134 bEnd := s.f.NewBlock(ssa.BlockPlain) 3135 b.AddEdgeTo(bTrue) 3136 b.AddEdgeTo(bFalse) 3137 b.Likely = ssa.BranchLikely // most machines have popcnt nowadays 3138 3139 // We have the intrinsic - use it directly. 3140 s.startBlock(bTrue) 3141 op := op64 3142 if s.config.PtrSize == 4 { 3143 op = op32 3144 } 3145 s.vars[n] = s.newValue1(op, types.Types[TINT], args[0]) 3146 s.endBlock().AddEdgeTo(bEnd) 3147 3148 // Call the pure Go version. 3149 s.startBlock(bFalse) 3150 a := s.call(n, callNormal) 3151 s.vars[n] = s.newValue2(ssa.OpLoad, types.Types[TINT], a, s.mem()) 3152 s.endBlock().AddEdgeTo(bEnd) 3153 3154 // Merge results. 3155 s.startBlock(bEnd) 3156 return s.variable(n, types.Types[TINT]) 3157 } 3158 } 3159 addF("math/bits", "OnesCount64", 3160 makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64), 3161 sys.AMD64) 3162 addF("math/bits", "OnesCount64", 3163 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3164 return s.newValue1(ssa.OpPopCount64, types.Types[TINT], args[0]) 3165 }, 3166 sys.PPC64) 3167 addF("math/bits", "OnesCount32", 3168 makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32), 3169 sys.AMD64) 3170 addF("math/bits", "OnesCount32", 3171 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3172 return s.newValue1(ssa.OpPopCount32, types.Types[TINT], args[0]) 3173 }, 3174 sys.PPC64) 3175 addF("math/bits", "OnesCount16", 3176 makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16), 3177 sys.AMD64) 3178 // Note: no OnesCount8, the Go implementation is faster - just a table load. 3179 addF("math/bits", "OnesCount", 3180 makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32), 3181 sys.AMD64) 3182 3183 /******** sync/atomic ********/ 3184 3185 // Note: these are disabled by flag_race in findIntrinsic below. 3186 alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...) 3187 alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...) 3188 alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...) 3189 alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...) 3190 alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...) 3191 alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...) 3192 alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...) 3193 3194 alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...) 3195 alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...) 3196 // Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap. 3197 alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...) 3198 alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...) 3199 alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...) 3200 alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...) 3201 3202 alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...) 3203 alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...) 3204 alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...) 3205 alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...) 3206 alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...) 3207 alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...) 3208 3209 alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...) 3210 alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...) 3211 alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...) 3212 alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...) 3213 alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...) 3214 alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...) 3215 3216 alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...) 3217 alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...) 3218 alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...) 3219 alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...) 3220 alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...) 3221 alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...) 3222 3223 /******** math/big ********/ 3224 add("math/big", "mulWW", 3225 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3226 return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1]) 3227 }, 3228 sys.ArchAMD64) 3229 add("math/big", "divWW", 3230 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 3231 return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2]) 3232 }, 3233 sys.ArchAMD64) 3234 } 3235 3236 // findIntrinsic returns a function which builds the SSA equivalent of the 3237 // function identified by the symbol sym. If sym is not an intrinsic call, returns nil. 3238 func findIntrinsic(sym *types.Sym) intrinsicBuilder { 3239 if ssa.IntrinsicsDisable { 3240 return nil 3241 } 3242 if sym == nil || sym.Pkg == nil { 3243 return nil 3244 } 3245 pkg := sym.Pkg.Path 3246 if sym.Pkg == localpkg { 3247 pkg = myimportpath 3248 } 3249 if flag_race && pkg == "sync/atomic" { 3250 // The race detector needs to be able to intercept these calls. 3251 // We can't intrinsify them. 3252 return nil 3253 } 3254 // Skip intrinsifying math functions (which may contain hard-float 3255 // instructions) when soft-float 3256 if thearch.SoftFloat && pkg == "math" { 3257 return nil 3258 } 3259 3260 fn := sym.Name 3261 return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}] 3262 } 3263 3264 func isIntrinsicCall(n *Node) bool { 3265 if n == nil || n.Left == nil { 3266 return false 3267 } 3268 return findIntrinsic(n.Left.Sym) != nil 3269 } 3270 3271 // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation. 3272 func (s *state) intrinsicCall(n *Node) *ssa.Value { 3273 v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n)) 3274 if ssa.IntrinsicsDebug > 0 { 3275 x := v 3276 if x == nil { 3277 x = s.mem() 3278 } 3279 if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 { 3280 x = x.Args[0] 3281 } 3282 Warnl(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString()) 3283 } 3284 return v 3285 } 3286 3287 type callArg struct { 3288 offset int64 3289 v *ssa.Value 3290 } 3291 type byOffset []callArg 3292 3293 func (x byOffset) Len() int { return len(x) } 3294 func (x byOffset) Swap(i, j int) { x[i], x[j] = x[j], x[i] } 3295 func (x byOffset) Less(i, j int) bool { 3296 return x[i].offset < x[j].offset 3297 } 3298 3299 // intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them. 3300 func (s *state) intrinsicArgs(n *Node) []*ssa.Value { 3301 // This code is complicated because of how walk transforms calls. For a call node, 3302 // each entry in n.List is either an assignment to OINDREGSP which actually 3303 // stores an arg, or an assignment to a temporary which computes an arg 3304 // which is later assigned. 3305 // The args can also be out of order. 3306 // TODO: when walk goes away someday, this code can go away also. 3307 var args []callArg 3308 temps := map[*Node]*ssa.Value{} 3309 for _, a := range n.List.Slice() { 3310 if a.Op != OAS { 3311 s.Fatalf("non-assignment as a function argument %v", a.Op) 3312 } 3313 l, r := a.Left, a.Right 3314 switch l.Op { 3315 case ONAME: 3316 // Evaluate and store to "temporary". 3317 // Walk ensures these temporaries are dead outside of n. 3318 temps[l] = s.expr(r) 3319 case OINDREGSP: 3320 // Store a value to an argument slot. 3321 var v *ssa.Value 3322 if x, ok := temps[r]; ok { 3323 // This is a previously computed temporary. 3324 v = x 3325 } else { 3326 // This is an explicit value; evaluate it. 3327 v = s.expr(r) 3328 } 3329 args = append(args, callArg{l.Xoffset, v}) 3330 default: 3331 s.Fatalf("function argument assignment target not allowed: %v", l.Op) 3332 } 3333 } 3334 sort.Sort(byOffset(args)) 3335 res := make([]*ssa.Value, len(args)) 3336 for i, a := range args { 3337 res[i] = a.v 3338 } 3339 return res 3340 } 3341 3342 // Calls the function n using the specified call type. 3343 // Returns the address of the return value (or nil if none). 3344 func (s *state) call(n *Node, k callKind) *ssa.Value { 3345 var sym *types.Sym // target symbol (if static) 3346 var closure *ssa.Value // ptr to closure to run (if dynamic) 3347 var codeptr *ssa.Value // ptr to target code (if dynamic) 3348 var rcvr *ssa.Value // receiver to set 3349 fn := n.Left 3350 switch n.Op { 3351 case OCALLFUNC: 3352 if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC { 3353 sym = fn.Sym 3354 break 3355 } 3356 closure = s.expr(fn) 3357 case OCALLMETH: 3358 if fn.Op != ODOTMETH { 3359 Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) 3360 } 3361 if k == callNormal { 3362 sym = fn.Sym 3363 break 3364 } 3365 // Make a name n2 for the function. 3366 // fn.Sym might be sync.(*Mutex).Unlock. 3367 // Make a PFUNC node out of that, then evaluate it. 3368 // We get back an SSA value representing &sync.(*Mutex).Unlock·f. 3369 // We can then pass that to defer or go. 3370 n2 := newnamel(fn.Pos, fn.Sym) 3371 n2.Name.Curfn = s.curfn 3372 n2.SetClass(PFUNC) 3373 n2.Pos = fn.Pos 3374 n2.Type = types.Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it. 3375 closure = s.expr(n2) 3376 // Note: receiver is already assigned in n.List, so we don't 3377 // want to set it here. 3378 case OCALLINTER: 3379 if fn.Op != ODOTINTER { 3380 Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op) 3381 } 3382 i := s.expr(fn.Left) 3383 itab := s.newValue1(ssa.OpITab, types.Types[TUINTPTR], i) 3384 s.nilCheck(itab) 3385 itabidx := fn.Xoffset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab 3386 itab = s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab) 3387 if k == callNormal { 3388 codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], itab, s.mem()) 3389 } else { 3390 closure = itab 3391 } 3392 rcvr = s.newValue1(ssa.OpIData, types.Types[TUINTPTR], i) 3393 } 3394 dowidth(fn.Type) 3395 stksize := fn.Type.ArgWidth() // includes receiver 3396 3397 // Run all argument assignments. The arg slots have already 3398 // been offset by the appropriate amount (+2*widthptr for go/defer, 3399 // +widthptr for interface calls). 3400 // For OCALLMETH, the receiver is set in these statements. 3401 s.stmtList(n.List) 3402 3403 // Set receiver (for interface calls) 3404 if rcvr != nil { 3405 argStart := Ctxt.FixedFrameSize() 3406 if k != callNormal { 3407 argStart += int64(2 * Widthptr) 3408 } 3409 addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart) 3410 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], addr, rcvr, s.mem()) 3411 } 3412 3413 // Defer/go/gosecure args 3414 if k != callNormal { 3415 // Write argsize and closure (args to Newproc/Deferproc). 3416 argStart := Ctxt.FixedFrameSize() 3417 argsize := s.constInt32(types.Types[TUINT32], int32(stksize)) 3418 addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart) 3419 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINT32], addr, argsize, s.mem()) 3420 addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr)) 3421 //TODO @aghosn here we can pass the shitty shit to gosecure. Write an ID instead? 3422 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], addr, closure, s.mem()) 3423 stksize += 2 * int64(Widthptr) 3424 } 3425 3426 // call target 3427 var call *ssa.Value 3428 switch { 3429 case k == callDefer: 3430 call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, Deferproc, s.mem()) 3431 case k == callGo: 3432 call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, Newproc, s.mem()) 3433 case k == callGosecure: 3434 call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, Gosecload, s.mem()) 3435 //call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, Newproc, s.mem()) 3436 case closure != nil: 3437 codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], closure, s.mem()) 3438 call = s.newValue3(ssa.OpClosureCall, types.TypeMem, codeptr, closure, s.mem()) 3439 case codeptr != nil: 3440 call = s.newValue2(ssa.OpInterCall, types.TypeMem, codeptr, s.mem()) 3441 case sym != nil: 3442 call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, sym.Linksym(), s.mem()) 3443 default: 3444 Fatalf("bad call type %v %v", n.Op, n) 3445 } 3446 call.AuxInt = stksize // Call operations carry the argsize of the callee along with them 3447 s.vars[&memVar] = call 3448 3449 // Finish block for defers 3450 if k == callDefer { 3451 b := s.endBlock() 3452 b.Kind = ssa.BlockDefer 3453 b.SetControl(call) 3454 bNext := s.f.NewBlock(ssa.BlockPlain) 3455 b.AddEdgeTo(bNext) 3456 // Add recover edge to exit code. 3457 r := s.f.NewBlock(ssa.BlockPlain) 3458 s.startBlock(r) 3459 s.exit() 3460 b.AddEdgeTo(r) 3461 b.Likely = ssa.BranchLikely 3462 s.startBlock(bNext) 3463 } 3464 3465 res := n.Left.Type.Results() 3466 if res.NumFields() == 0 || k != callNormal { 3467 // call has no return value. Continue with the next statement. 3468 return nil 3469 } 3470 fp := res.Field(0) 3471 return s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize()) 3472 } 3473 3474 // etypesign returns the signed-ness of e, for integer/pointer etypes. 3475 // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer. 3476 func etypesign(e types.EType) int8 { 3477 switch e { 3478 case TINT8, TINT16, TINT32, TINT64, TINT: 3479 return -1 3480 case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR: 3481 return +1 3482 } 3483 return 0 3484 } 3485 3486 // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result. 3487 // The value that the returned Value represents is guaranteed to be non-nil. 3488 // If bounded is true then this address does not require a nil check for its operand 3489 // even if that would otherwise be implied. 3490 func (s *state) addr(n *Node, bounded bool) *ssa.Value { 3491 t := types.NewPtr(n.Type) 3492 switch n.Op { 3493 case ONAME: 3494 switch n.Class() { 3495 case PEXTERN: 3496 // global variable 3497 v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym.Linksym(), s.sb) 3498 // TODO: Make OpAddr use AuxInt as well as Aux. 3499 if n.Xoffset != 0 { 3500 v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v) 3501 } 3502 return v 3503 case PPARAM: 3504 // parameter slot 3505 v := s.decladdrs[n] 3506 if v != nil { 3507 return v 3508 } 3509 if n == nodfp { 3510 // Special arg that points to the frame pointer (Used by ORECOVER). 3511 return s.entryNewValue1A(ssa.OpAddr, t, n, s.sp) 3512 } 3513 s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs) 3514 return nil 3515 case PAUTO: 3516 return s.newValue1A(ssa.OpAddr, t, n, s.sp) 3517 case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early. 3518 // ensure that we reuse symbols for out parameters so 3519 // that cse works on their addresses 3520 return s.newValue1A(ssa.OpAddr, t, n, s.sp) 3521 default: 3522 s.Fatalf("variable address class %v not implemented", n.Class()) 3523 return nil 3524 } 3525 case OINDREGSP: 3526 // indirect off REGSP 3527 // used for storing/loading arguments/returns to/from callees 3528 return s.constOffPtrSP(t, n.Xoffset) 3529 case OINDEX: 3530 if n.Left.Type.IsSlice() { 3531 a := s.expr(n.Left) 3532 i := s.expr(n.Right) 3533 i = s.extendIndex(i, panicindex) 3534 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], a) 3535 if !n.Bounded() { 3536 s.boundsCheck(i, len) 3537 } 3538 p := s.newValue1(ssa.OpSlicePtr, t, a) 3539 return s.newValue2(ssa.OpPtrIndex, t, p, i) 3540 } else { // array 3541 a := s.addr(n.Left, bounded) 3542 i := s.expr(n.Right) 3543 i = s.extendIndex(i, panicindex) 3544 len := s.constInt(types.Types[TINT], n.Left.Type.NumElem()) 3545 if !n.Bounded() { 3546 s.boundsCheck(i, len) 3547 } 3548 return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left.Type.Elem()), a, i) 3549 } 3550 case OIND: 3551 return s.exprPtr(n.Left, bounded, n.Pos) 3552 case ODOT: 3553 p := s.addr(n.Left, bounded) 3554 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p) 3555 case ODOTPTR: 3556 p := s.exprPtr(n.Left, bounded, n.Pos) 3557 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p) 3558 case OCLOSUREVAR: 3559 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, 3560 s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)) 3561 case OCONVNOP: 3562 addr := s.addr(n.Left, bounded) 3563 return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type 3564 case OCALLFUNC, OCALLINTER, OCALLMETH: 3565 return s.call(n, callNormal) 3566 case ODOTTYPE: 3567 v, _ := s.dottype(n, false) 3568 if v.Op != ssa.OpLoad { 3569 s.Fatalf("dottype of non-load") 3570 } 3571 if v.Args[1] != s.mem() { 3572 s.Fatalf("memory no longer live from dottype load") 3573 } 3574 return v.Args[0] 3575 default: 3576 s.Fatalf("unhandled addr %v", n.Op) 3577 return nil 3578 } 3579 } 3580 3581 // canSSA reports whether n is SSA-able. 3582 // n must be an ONAME (or an ODOT sequence with an ONAME base). 3583 func (s *state) canSSA(n *Node) bool { 3584 if Debug['N'] != 0 { 3585 return false 3586 } 3587 for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) { 3588 n = n.Left 3589 } 3590 if n.Op != ONAME { 3591 return false 3592 } 3593 if n.Addrtaken() { 3594 return false 3595 } 3596 if n.isParamHeapCopy() { 3597 return false 3598 } 3599 if n.Class() == PAUTOHEAP { 3600 Fatalf("canSSA of PAUTOHEAP %v", n) 3601 } 3602 switch n.Class() { 3603 case PEXTERN: 3604 return false 3605 case PPARAMOUT: 3606 if s.hasdefer { 3607 // TODO: handle this case? Named return values must be 3608 // in memory so that the deferred function can see them. 3609 // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false } 3610 // Or maybe not, see issue 18860. Even unnamed return values 3611 // must be written back so if a defer recovers, the caller can see them. 3612 return false 3613 } 3614 if s.cgoUnsafeArgs { 3615 // Cgo effectively takes the address of all result args, 3616 // but the compiler can't see that. 3617 return false 3618 } 3619 } 3620 if n.Class() == PPARAM && n.Sym != nil && n.Sym.Name == ".this" { 3621 // wrappers generated by genwrapper need to update 3622 // the .this pointer in place. 3623 // TODO: treat as a PPARMOUT? 3624 return false 3625 } 3626 return canSSAType(n.Type) 3627 // TODO: try to make more variables SSAable? 3628 } 3629 3630 // canSSA reports whether variables of type t are SSA-able. 3631 func canSSAType(t *types.Type) bool { 3632 dowidth(t) 3633 if t.Width > int64(4*Widthptr) { 3634 // 4*Widthptr is an arbitrary constant. We want it 3635 // to be at least 3*Widthptr so slices can be registerized. 3636 // Too big and we'll introduce too much register pressure. 3637 return false 3638 } 3639 switch t.Etype { 3640 case TARRAY: 3641 // We can't do larger arrays because dynamic indexing is 3642 // not supported on SSA variables. 3643 // TODO: allow if all indexes are constant. 3644 if t.NumElem() <= 1 { 3645 return canSSAType(t.Elem()) 3646 } 3647 return false 3648 case TSTRUCT: 3649 if t.NumFields() > ssa.MaxStruct { 3650 return false 3651 } 3652 for _, t1 := range t.Fields().Slice() { 3653 if !canSSAType(t1.Type) { 3654 return false 3655 } 3656 } 3657 return true 3658 default: 3659 return true 3660 } 3661 } 3662 3663 // exprPtr evaluates n to a pointer and nil-checks it. 3664 func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value { 3665 p := s.expr(n) 3666 if bounded || n.NonNil() { 3667 if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 { 3668 s.f.Warnl(lineno, "removed nil check") 3669 } 3670 return p 3671 } 3672 s.nilCheck(p) 3673 return p 3674 } 3675 3676 // nilCheck generates nil pointer checking code. 3677 // Used only for automatically inserted nil checks, 3678 // not for user code like 'x != nil'. 3679 func (s *state) nilCheck(ptr *ssa.Value) { 3680 if disable_checknil != 0 || s.curfn.Func.NilCheckDisabled() { 3681 return 3682 } 3683 s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem()) 3684 } 3685 3686 // boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not. 3687 // Starts a new block on return. 3688 // idx is already converted to full int width. 3689 func (s *state) boundsCheck(idx, len *ssa.Value) { 3690 if Debug['B'] != 0 { 3691 return 3692 } 3693 3694 // bounds check 3695 cmp := s.newValue2(ssa.OpIsInBounds, types.Types[TBOOL], idx, len) 3696 s.check(cmp, panicindex) 3697 } 3698 3699 // sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not. 3700 // Starts a new block on return. 3701 // idx and len are already converted to full int width. 3702 func (s *state) sliceBoundsCheck(idx, len *ssa.Value) { 3703 if Debug['B'] != 0 { 3704 return 3705 } 3706 3707 // bounds check 3708 cmp := s.newValue2(ssa.OpIsSliceInBounds, types.Types[TBOOL], idx, len) 3709 s.check(cmp, panicslice) 3710 } 3711 3712 // If cmp (a bool) is false, panic using the given function. 3713 func (s *state) check(cmp *ssa.Value, fn *obj.LSym) { 3714 b := s.endBlock() 3715 b.Kind = ssa.BlockIf 3716 b.SetControl(cmp) 3717 b.Likely = ssa.BranchLikely 3718 bNext := s.f.NewBlock(ssa.BlockPlain) 3719 line := s.peekPos() 3720 pos := Ctxt.PosTable.Pos(line) 3721 fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()} 3722 bPanic := s.panics[fl] 3723 if bPanic == nil { 3724 bPanic = s.f.NewBlock(ssa.BlockPlain) 3725 s.panics[fl] = bPanic 3726 s.startBlock(bPanic) 3727 // The panic call takes/returns memory to ensure that the right 3728 // memory state is observed if the panic happens. 3729 s.rtcall(fn, false, nil) 3730 } 3731 b.AddEdgeTo(bNext) 3732 b.AddEdgeTo(bPanic) 3733 s.startBlock(bNext) 3734 } 3735 3736 func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value { 3737 needcheck := true 3738 switch b.Op { 3739 case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64: 3740 if b.AuxInt != 0 { 3741 needcheck = false 3742 } 3743 } 3744 if needcheck { 3745 // do a size-appropriate check for zero 3746 cmp := s.newValue2(s.ssaOp(ONE, n.Type), types.Types[TBOOL], b, s.zeroVal(n.Type)) 3747 s.check(cmp, panicdivide) 3748 } 3749 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 3750 } 3751 3752 // rtcall issues a call to the given runtime function fn with the listed args. 3753 // Returns a slice of results of the given result types. 3754 // The call is added to the end of the current block. 3755 // If returns is false, the block is marked as an exit block. 3756 func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value { 3757 // Write args to the stack 3758 off := Ctxt.FixedFrameSize() 3759 for _, arg := range args { 3760 t := arg.Type 3761 off = Rnd(off, t.Alignment()) 3762 ptr := s.constOffPtrSP(t.PtrTo(), off) 3763 size := t.Size() 3764 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, ptr, arg, s.mem()) 3765 off += size 3766 } 3767 off = Rnd(off, int64(Widthreg)) 3768 3769 // Issue call 3770 call := s.newValue1A(ssa.OpStaticCall, types.TypeMem, fn, s.mem()) 3771 s.vars[&memVar] = call 3772 3773 if !returns { 3774 // Finish block 3775 b := s.endBlock() 3776 b.Kind = ssa.BlockExit 3777 b.SetControl(call) 3778 call.AuxInt = off - Ctxt.FixedFrameSize() 3779 if len(results) > 0 { 3780 Fatalf("panic call can't have results") 3781 } 3782 return nil 3783 } 3784 3785 // Load results 3786 res := make([]*ssa.Value, len(results)) 3787 for i, t := range results { 3788 off = Rnd(off, t.Alignment()) 3789 ptr := s.constOffPtrSP(types.NewPtr(t), off) 3790 res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem()) 3791 off += t.Size() 3792 } 3793 off = Rnd(off, int64(Widthptr)) 3794 3795 // Remember how much callee stack space we needed. 3796 call.AuxInt = off 3797 3798 return res 3799 } 3800 3801 // do *left = right for type t. 3802 func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask) { 3803 if skip == 0 && (!types.Haspointers(t) || ssa.IsStackAddr(left)) { 3804 // Known to not have write barrier. Store the whole type. 3805 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem()) 3806 return 3807 } 3808 3809 // store scalar fields first, so write barrier stores for 3810 // pointer fields can be grouped together, and scalar values 3811 // don't need to be live across the write barrier call. 3812 // TODO: if the writebarrier pass knows how to reorder stores, 3813 // we can do a single store here as long as skip==0. 3814 s.storeTypeScalars(t, left, right, skip) 3815 if skip&skipPtr == 0 && types.Haspointers(t) { 3816 s.storeTypePtrs(t, left, right) 3817 } 3818 } 3819 3820 // do *left = right for all scalar (non-pointer) parts of t. 3821 func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) { 3822 switch { 3823 case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex(): 3824 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem()) 3825 case t.IsPtrShaped(): 3826 // no scalar fields. 3827 case t.IsString(): 3828 if skip&skipLen != 0 { 3829 return 3830 } 3831 len := s.newValue1(ssa.OpStringLen, types.Types[TINT], right) 3832 lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left) 3833 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenAddr, len, s.mem()) 3834 case t.IsSlice(): 3835 if skip&skipLen == 0 { 3836 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], right) 3837 lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left) 3838 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenAddr, len, s.mem()) 3839 } 3840 if skip&skipCap == 0 { 3841 cap := s.newValue1(ssa.OpSliceCap, types.Types[TINT], right) 3842 capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left) 3843 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], capAddr, cap, s.mem()) 3844 } 3845 case t.IsInterface(): 3846 // itab field doesn't need a write barrier (even though it is a pointer). 3847 itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right) 3848 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], left, itab, s.mem()) 3849 case t.IsStruct(): 3850 n := t.NumFields() 3851 for i := 0; i < n; i++ { 3852 ft := t.FieldType(i) 3853 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3854 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3855 s.storeTypeScalars(ft, addr, val, 0) 3856 } 3857 case t.IsArray() && t.NumElem() == 0: 3858 // nothing 3859 case t.IsArray() && t.NumElem() == 1: 3860 s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0) 3861 default: 3862 s.Fatalf("bad write barrier type %v", t) 3863 } 3864 } 3865 3866 // do *left = right for all pointer parts of t. 3867 func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) { 3868 switch { 3869 case t.IsPtrShaped(): 3870 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem()) 3871 case t.IsString(): 3872 ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right) 3873 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem()) 3874 case t.IsSlice(): 3875 elType := types.NewPtr(t.Elem()) 3876 ptr := s.newValue1(ssa.OpSlicePtr, elType, right) 3877 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, elType, left, ptr, s.mem()) 3878 case t.IsInterface(): 3879 // itab field is treated as a scalar. 3880 idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right) 3881 idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left) 3882 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, idataAddr, idata, s.mem()) 3883 case t.IsStruct(): 3884 n := t.NumFields() 3885 for i := 0; i < n; i++ { 3886 ft := t.FieldType(i) 3887 if !types.Haspointers(ft) { 3888 continue 3889 } 3890 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3891 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3892 s.storeTypePtrs(ft, addr, val) 3893 } 3894 case t.IsArray() && t.NumElem() == 0: 3895 // nothing 3896 case t.IsArray() && t.NumElem() == 1: 3897 s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right)) 3898 default: 3899 s.Fatalf("bad write barrier type %v", t) 3900 } 3901 } 3902 3903 // slice computes the slice v[i:j:k] and returns ptr, len, and cap of result. 3904 // i,j,k may be nil, in which case they are set to their default value. 3905 // t is a slice, ptr to array, or string type. 3906 func (s *state) slice(t *types.Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) { 3907 var elemtype *types.Type 3908 var ptrtype *types.Type 3909 var ptr *ssa.Value 3910 var len *ssa.Value 3911 var cap *ssa.Value 3912 zero := s.constInt(types.Types[TINT], 0) 3913 switch { 3914 case t.IsSlice(): 3915 elemtype = t.Elem() 3916 ptrtype = types.NewPtr(elemtype) 3917 ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v) 3918 len = s.newValue1(ssa.OpSliceLen, types.Types[TINT], v) 3919 cap = s.newValue1(ssa.OpSliceCap, types.Types[TINT], v) 3920 case t.IsString(): 3921 elemtype = types.Types[TUINT8] 3922 ptrtype = types.NewPtr(elemtype) 3923 ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v) 3924 len = s.newValue1(ssa.OpStringLen, types.Types[TINT], v) 3925 cap = len 3926 case t.IsPtr(): 3927 if !t.Elem().IsArray() { 3928 s.Fatalf("bad ptr to array in slice %v\n", t) 3929 } 3930 elemtype = t.Elem().Elem() 3931 ptrtype = types.NewPtr(elemtype) 3932 s.nilCheck(v) 3933 ptr = v 3934 len = s.constInt(types.Types[TINT], t.Elem().NumElem()) 3935 cap = len 3936 default: 3937 s.Fatalf("bad type in slice %v\n", t) 3938 } 3939 3940 // Set default values 3941 if i == nil { 3942 i = zero 3943 } 3944 if j == nil { 3945 j = len 3946 } 3947 if k == nil { 3948 k = cap 3949 } 3950 3951 // Panic if slice indices are not in bounds. 3952 s.sliceBoundsCheck(i, j) 3953 if j != k { 3954 s.sliceBoundsCheck(j, k) 3955 } 3956 if k != cap { 3957 s.sliceBoundsCheck(k, cap) 3958 } 3959 3960 // Generate the following code assuming that indexes are in bounds. 3961 // The masking is to make sure that we don't generate a slice 3962 // that points to the next object in memory. 3963 // rlen = j - i 3964 // rcap = k - i 3965 // delta = i * elemsize 3966 // rptr = p + delta&mask(rcap) 3967 // result = (SliceMake rptr rlen rcap) 3968 // where mask(x) is 0 if x==0 and -1 if x>0. 3969 subOp := s.ssaOp(OSUB, types.Types[TINT]) 3970 mulOp := s.ssaOp(OMUL, types.Types[TINT]) 3971 andOp := s.ssaOp(OAND, types.Types[TINT]) 3972 rlen := s.newValue2(subOp, types.Types[TINT], j, i) 3973 var rcap *ssa.Value 3974 switch { 3975 case t.IsString(): 3976 // Capacity of the result is unimportant. However, we use 3977 // rcap to test if we've generated a zero-length slice. 3978 // Use length of strings for that. 3979 rcap = rlen 3980 case j == k: 3981 rcap = rlen 3982 default: 3983 rcap = s.newValue2(subOp, types.Types[TINT], k, i) 3984 } 3985 3986 var rptr *ssa.Value 3987 if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 { 3988 // No pointer arithmetic necessary. 3989 rptr = ptr 3990 } else { 3991 // delta = # of bytes to offset pointer by. 3992 delta := s.newValue2(mulOp, types.Types[TINT], i, s.constInt(types.Types[TINT], elemtype.Width)) 3993 // If we're slicing to the point where the capacity is zero, 3994 // zero out the delta. 3995 mask := s.newValue1(ssa.OpSlicemask, types.Types[TINT], rcap) 3996 delta = s.newValue2(andOp, types.Types[TINT], delta, mask) 3997 // Compute rptr = ptr + delta 3998 rptr = s.newValue2(ssa.OpAddPtr, ptrtype, ptr, delta) 3999 } 4000 4001 return rptr, rlen, rcap 4002 } 4003 4004 type u642fcvtTab struct { 4005 geq, cvt2F, and, rsh, or, add ssa.Op 4006 one func(*state, *types.Type, int64) *ssa.Value 4007 } 4008 4009 var u64_f64 = u642fcvtTab{ 4010 geq: ssa.OpGeq64, 4011 cvt2F: ssa.OpCvt64to64F, 4012 and: ssa.OpAnd64, 4013 rsh: ssa.OpRsh64Ux64, 4014 or: ssa.OpOr64, 4015 add: ssa.OpAdd64F, 4016 one: (*state).constInt64, 4017 } 4018 4019 var u64_f32 = u642fcvtTab{ 4020 geq: ssa.OpGeq64, 4021 cvt2F: ssa.OpCvt64to32F, 4022 and: ssa.OpAnd64, 4023 rsh: ssa.OpRsh64Ux64, 4024 or: ssa.OpOr64, 4025 add: ssa.OpAdd32F, 4026 one: (*state).constInt64, 4027 } 4028 4029 func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4030 return s.uint64Tofloat(&u64_f64, n, x, ft, tt) 4031 } 4032 4033 func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4034 return s.uint64Tofloat(&u64_f32, n, x, ft, tt) 4035 } 4036 4037 func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4038 // if x >= 0 { 4039 // result = (floatY) x 4040 // } else { 4041 // y = uintX(x) ; y = x & 1 4042 // z = uintX(x) ; z = z >> 1 4043 // z = z >> 1 4044 // z = z | y 4045 // result = floatY(z) 4046 // result = result + result 4047 // } 4048 // 4049 // Code borrowed from old code generator. 4050 // What's going on: large 64-bit "unsigned" looks like 4051 // negative number to hardware's integer-to-float 4052 // conversion. However, because the mantissa is only 4053 // 63 bits, we don't need the LSB, so instead we do an 4054 // unsigned right shift (divide by two), convert, and 4055 // double. However, before we do that, we need to be 4056 // sure that we do not lose a "1" if that made the 4057 // difference in the resulting rounding. Therefore, we 4058 // preserve it, and OR (not ADD) it back in. The case 4059 // that matters is when the eleven discarded bits are 4060 // equal to 10000000001; that rounds up, and the 1 cannot 4061 // be lost else it would round down if the LSB of the 4062 // candidate mantissa is 0. 4063 cmp := s.newValue2(cvttab.geq, types.Types[TBOOL], x, s.zeroVal(ft)) 4064 b := s.endBlock() 4065 b.Kind = ssa.BlockIf 4066 b.SetControl(cmp) 4067 b.Likely = ssa.BranchLikely 4068 4069 bThen := s.f.NewBlock(ssa.BlockPlain) 4070 bElse := s.f.NewBlock(ssa.BlockPlain) 4071 bAfter := s.f.NewBlock(ssa.BlockPlain) 4072 4073 b.AddEdgeTo(bThen) 4074 s.startBlock(bThen) 4075 a0 := s.newValue1(cvttab.cvt2F, tt, x) 4076 s.vars[n] = a0 4077 s.endBlock() 4078 bThen.AddEdgeTo(bAfter) 4079 4080 b.AddEdgeTo(bElse) 4081 s.startBlock(bElse) 4082 one := cvttab.one(s, ft, 1) 4083 y := s.newValue2(cvttab.and, ft, x, one) 4084 z := s.newValue2(cvttab.rsh, ft, x, one) 4085 z = s.newValue2(cvttab.or, ft, z, y) 4086 a := s.newValue1(cvttab.cvt2F, tt, z) 4087 a1 := s.newValue2(cvttab.add, tt, a, a) 4088 s.vars[n] = a1 4089 s.endBlock() 4090 bElse.AddEdgeTo(bAfter) 4091 4092 s.startBlock(bAfter) 4093 return s.variable(n, n.Type) 4094 } 4095 4096 type u322fcvtTab struct { 4097 cvtI2F, cvtF2F ssa.Op 4098 } 4099 4100 var u32_f64 = u322fcvtTab{ 4101 cvtI2F: ssa.OpCvt32to64F, 4102 cvtF2F: ssa.OpCopy, 4103 } 4104 4105 var u32_f32 = u322fcvtTab{ 4106 cvtI2F: ssa.OpCvt32to32F, 4107 cvtF2F: ssa.OpCvt64Fto32F, 4108 } 4109 4110 func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4111 return s.uint32Tofloat(&u32_f64, n, x, ft, tt) 4112 } 4113 4114 func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4115 return s.uint32Tofloat(&u32_f32, n, x, ft, tt) 4116 } 4117 4118 func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4119 // if x >= 0 { 4120 // result = floatY(x) 4121 // } else { 4122 // result = floatY(float64(x) + (1<<32)) 4123 // } 4124 cmp := s.newValue2(ssa.OpGeq32, types.Types[TBOOL], x, s.zeroVal(ft)) 4125 b := s.endBlock() 4126 b.Kind = ssa.BlockIf 4127 b.SetControl(cmp) 4128 b.Likely = ssa.BranchLikely 4129 4130 bThen := s.f.NewBlock(ssa.BlockPlain) 4131 bElse := s.f.NewBlock(ssa.BlockPlain) 4132 bAfter := s.f.NewBlock(ssa.BlockPlain) 4133 4134 b.AddEdgeTo(bThen) 4135 s.startBlock(bThen) 4136 a0 := s.newValue1(cvttab.cvtI2F, tt, x) 4137 s.vars[n] = a0 4138 s.endBlock() 4139 bThen.AddEdgeTo(bAfter) 4140 4141 b.AddEdgeTo(bElse) 4142 s.startBlock(bElse) 4143 a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[TFLOAT64], x) 4144 twoToThe32 := s.constFloat64(types.Types[TFLOAT64], float64(1<<32)) 4145 a2 := s.newValue2(ssa.OpAdd64F, types.Types[TFLOAT64], a1, twoToThe32) 4146 a3 := s.newValue1(cvttab.cvtF2F, tt, a2) 4147 4148 s.vars[n] = a3 4149 s.endBlock() 4150 bElse.AddEdgeTo(bAfter) 4151 4152 s.startBlock(bAfter) 4153 return s.variable(n, n.Type) 4154 } 4155 4156 // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels. 4157 func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value { 4158 if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() { 4159 s.Fatalf("node must be a map or a channel") 4160 } 4161 // if n == nil { 4162 // return 0 4163 // } else { 4164 // // len 4165 // return *((*int)n) 4166 // // cap 4167 // return *(((*int)n)+1) 4168 // } 4169 lenType := n.Type 4170 nilValue := s.constNil(types.Types[TUINTPTR]) 4171 cmp := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], x, nilValue) 4172 b := s.endBlock() 4173 b.Kind = ssa.BlockIf 4174 b.SetControl(cmp) 4175 b.Likely = ssa.BranchUnlikely 4176 4177 bThen := s.f.NewBlock(ssa.BlockPlain) 4178 bElse := s.f.NewBlock(ssa.BlockPlain) 4179 bAfter := s.f.NewBlock(ssa.BlockPlain) 4180 4181 // length/capacity of a nil map/chan is zero 4182 b.AddEdgeTo(bThen) 4183 s.startBlock(bThen) 4184 s.vars[n] = s.zeroVal(lenType) 4185 s.endBlock() 4186 bThen.AddEdgeTo(bAfter) 4187 4188 b.AddEdgeTo(bElse) 4189 s.startBlock(bElse) 4190 switch n.Op { 4191 case OLEN: 4192 // length is stored in the first word for map/chan 4193 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem()) 4194 case OCAP: 4195 // capacity is stored in the second word for chan 4196 sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x) 4197 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem()) 4198 default: 4199 s.Fatalf("op must be OLEN or OCAP") 4200 } 4201 s.endBlock() 4202 bElse.AddEdgeTo(bAfter) 4203 4204 s.startBlock(bAfter) 4205 return s.variable(n, lenType) 4206 } 4207 4208 type f2uCvtTab struct { 4209 ltf, cvt2U, subf, or ssa.Op 4210 floatValue func(*state, *types.Type, float64) *ssa.Value 4211 intValue func(*state, *types.Type, int64) *ssa.Value 4212 cutoff uint64 4213 } 4214 4215 var f32_u64 = f2uCvtTab{ 4216 ltf: ssa.OpLess32F, 4217 cvt2U: ssa.OpCvt32Fto64, 4218 subf: ssa.OpSub32F, 4219 or: ssa.OpOr64, 4220 floatValue: (*state).constFloat32, 4221 intValue: (*state).constInt64, 4222 cutoff: 9223372036854775808, 4223 } 4224 4225 var f64_u64 = f2uCvtTab{ 4226 ltf: ssa.OpLess64F, 4227 cvt2U: ssa.OpCvt64Fto64, 4228 subf: ssa.OpSub64F, 4229 or: ssa.OpOr64, 4230 floatValue: (*state).constFloat64, 4231 intValue: (*state).constInt64, 4232 cutoff: 9223372036854775808, 4233 } 4234 4235 var f32_u32 = f2uCvtTab{ 4236 ltf: ssa.OpLess32F, 4237 cvt2U: ssa.OpCvt32Fto32, 4238 subf: ssa.OpSub32F, 4239 or: ssa.OpOr32, 4240 floatValue: (*state).constFloat32, 4241 intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) }, 4242 cutoff: 2147483648, 4243 } 4244 4245 var f64_u32 = f2uCvtTab{ 4246 ltf: ssa.OpLess64F, 4247 cvt2U: ssa.OpCvt64Fto32, 4248 subf: ssa.OpSub64F, 4249 or: ssa.OpOr32, 4250 floatValue: (*state).constFloat64, 4251 intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) }, 4252 cutoff: 2147483648, 4253 } 4254 4255 func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4256 return s.floatToUint(&f32_u64, n, x, ft, tt) 4257 } 4258 func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4259 return s.floatToUint(&f64_u64, n, x, ft, tt) 4260 } 4261 4262 func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4263 return s.floatToUint(&f32_u32, n, x, ft, tt) 4264 } 4265 4266 func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4267 return s.floatToUint(&f64_u32, n, x, ft, tt) 4268 } 4269 4270 func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4271 // cutoff:=1<<(intY_Size-1) 4272 // if x < floatX(cutoff) { 4273 // result = uintY(x) 4274 // } else { 4275 // y = x - floatX(cutoff) 4276 // z = uintY(y) 4277 // result = z | -(cutoff) 4278 // } 4279 cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff)) 4280 cmp := s.newValue2(cvttab.ltf, types.Types[TBOOL], x, cutoff) 4281 b := s.endBlock() 4282 b.Kind = ssa.BlockIf 4283 b.SetControl(cmp) 4284 b.Likely = ssa.BranchLikely 4285 4286 bThen := s.f.NewBlock(ssa.BlockPlain) 4287 bElse := s.f.NewBlock(ssa.BlockPlain) 4288 bAfter := s.f.NewBlock(ssa.BlockPlain) 4289 4290 b.AddEdgeTo(bThen) 4291 s.startBlock(bThen) 4292 a0 := s.newValue1(cvttab.cvt2U, tt, x) 4293 s.vars[n] = a0 4294 s.endBlock() 4295 bThen.AddEdgeTo(bAfter) 4296 4297 b.AddEdgeTo(bElse) 4298 s.startBlock(bElse) 4299 y := s.newValue2(cvttab.subf, ft, x, cutoff) 4300 y = s.newValue1(cvttab.cvt2U, tt, y) 4301 z := cvttab.intValue(s, tt, int64(-cvttab.cutoff)) 4302 a1 := s.newValue2(cvttab.or, tt, y, z) 4303 s.vars[n] = a1 4304 s.endBlock() 4305 bElse.AddEdgeTo(bAfter) 4306 4307 s.startBlock(bAfter) 4308 return s.variable(n, n.Type) 4309 } 4310 4311 // dottype generates SSA for a type assertion node. 4312 // commaok indicates whether to panic or return a bool. 4313 // If commaok is false, resok will be nil. 4314 func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { 4315 iface := s.expr(n.Left) // input interface 4316 target := s.expr(n.Right) // target type 4317 byteptr := s.f.Config.Types.BytePtr 4318 4319 if n.Type.IsInterface() { 4320 if n.Type.IsEmptyInterface() { 4321 // Converting to an empty interface. 4322 // Input could be an empty or nonempty interface. 4323 if Debug_typeassert > 0 { 4324 Warnl(n.Pos, "type assertion inlined") 4325 } 4326 4327 // Get itab/type field from input. 4328 itab := s.newValue1(ssa.OpITab, byteptr, iface) 4329 // Conversion succeeds iff that field is not nil. 4330 cond := s.newValue2(ssa.OpNeqPtr, types.Types[TBOOL], itab, s.constNil(byteptr)) 4331 4332 if n.Left.Type.IsEmptyInterface() && commaok { 4333 // Converting empty interface to empty interface with ,ok is just a nil check. 4334 return iface, cond 4335 } 4336 4337 // Branch on nilness. 4338 b := s.endBlock() 4339 b.Kind = ssa.BlockIf 4340 b.SetControl(cond) 4341 b.Likely = ssa.BranchLikely 4342 bOk := s.f.NewBlock(ssa.BlockPlain) 4343 bFail := s.f.NewBlock(ssa.BlockPlain) 4344 b.AddEdgeTo(bOk) 4345 b.AddEdgeTo(bFail) 4346 4347 if !commaok { 4348 // On failure, panic by calling panicnildottype. 4349 s.startBlock(bFail) 4350 s.rtcall(panicnildottype, false, nil, target) 4351 4352 // On success, return (perhaps modified) input interface. 4353 s.startBlock(bOk) 4354 if n.Left.Type.IsEmptyInterface() { 4355 res = iface // Use input interface unchanged. 4356 return 4357 } 4358 // Load type out of itab, build interface with existing idata. 4359 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) 4360 typ := s.newValue2(ssa.OpLoad, byteptr, off, s.mem()) 4361 idata := s.newValue1(ssa.OpIData, n.Type, iface) 4362 res = s.newValue2(ssa.OpIMake, n.Type, typ, idata) 4363 return 4364 } 4365 4366 s.startBlock(bOk) 4367 // nonempty -> empty 4368 // Need to load type from itab 4369 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) 4370 s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem()) 4371 s.endBlock() 4372 4373 // itab is nil, might as well use that as the nil result. 4374 s.startBlock(bFail) 4375 s.vars[&typVar] = itab 4376 s.endBlock() 4377 4378 // Merge point. 4379 bEnd := s.f.NewBlock(ssa.BlockPlain) 4380 bOk.AddEdgeTo(bEnd) 4381 bFail.AddEdgeTo(bEnd) 4382 s.startBlock(bEnd) 4383 idata := s.newValue1(ssa.OpIData, n.Type, iface) 4384 res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata) 4385 resok = cond 4386 delete(s.vars, &typVar) 4387 return 4388 } 4389 // converting to a nonempty interface needs a runtime call. 4390 if Debug_typeassert > 0 { 4391 Warnl(n.Pos, "type assertion not inlined") 4392 } 4393 if n.Left.Type.IsEmptyInterface() { 4394 if commaok { 4395 call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface) 4396 return call[0], call[1] 4397 } 4398 return s.rtcall(assertE2I, true, []*types.Type{n.Type}, target, iface)[0], nil 4399 } 4400 if commaok { 4401 call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface) 4402 return call[0], call[1] 4403 } 4404 return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil 4405 } 4406 4407 if Debug_typeassert > 0 { 4408 Warnl(n.Pos, "type assertion inlined") 4409 } 4410 4411 // Converting to a concrete type. 4412 direct := isdirectiface(n.Type) 4413 itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface 4414 if Debug_typeassert > 0 { 4415 Warnl(n.Pos, "type assertion inlined") 4416 } 4417 var targetITab *ssa.Value 4418 if n.Left.Type.IsEmptyInterface() { 4419 // Looking for pointer to target type. 4420 targetITab = target 4421 } else { 4422 // Looking for pointer to itab for target type and source interface. 4423 targetITab = s.expr(n.List.First()) 4424 } 4425 4426 var tmp *Node // temporary for use with large types 4427 var addr *ssa.Value // address of tmp 4428 if commaok && !canSSAType(n.Type) { 4429 // unSSAable type, use temporary. 4430 // TODO: get rid of some of these temporaries. 4431 tmp = tempAt(n.Pos, s.curfn, n.Type) 4432 addr = s.addr(tmp, false) 4433 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem()) 4434 } 4435 4436 cond := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], itab, targetITab) 4437 b := s.endBlock() 4438 b.Kind = ssa.BlockIf 4439 b.SetControl(cond) 4440 b.Likely = ssa.BranchLikely 4441 4442 bOk := s.f.NewBlock(ssa.BlockPlain) 4443 bFail := s.f.NewBlock(ssa.BlockPlain) 4444 b.AddEdgeTo(bOk) 4445 b.AddEdgeTo(bFail) 4446 4447 if !commaok { 4448 // on failure, panic by calling panicdottype 4449 s.startBlock(bFail) 4450 taddr := s.expr(n.Right.Right) 4451 if n.Left.Type.IsEmptyInterface() { 4452 s.rtcall(panicdottypeE, false, nil, itab, target, taddr) 4453 } else { 4454 s.rtcall(panicdottypeI, false, nil, itab, target, taddr) 4455 } 4456 4457 // on success, return data from interface 4458 s.startBlock(bOk) 4459 if direct { 4460 return s.newValue1(ssa.OpIData, n.Type, iface), nil 4461 } 4462 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) 4463 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()), nil 4464 } 4465 4466 // commaok is the more complicated case because we have 4467 // a control flow merge point. 4468 bEnd := s.f.NewBlock(ssa.BlockPlain) 4469 // Note that we need a new valVar each time (unlike okVar where we can 4470 // reuse the variable) because it might have a different type every time. 4471 valVar := &Node{Op: ONAME, Sym: &types.Sym{Name: "val"}} 4472 4473 // type assertion succeeded 4474 s.startBlock(bOk) 4475 if tmp == nil { 4476 if direct { 4477 s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface) 4478 } else { 4479 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) 4480 s.vars[valVar] = s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 4481 } 4482 } else { 4483 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) 4484 store := s.newValue3I(ssa.OpMove, types.TypeMem, n.Type.Size(), addr, p, s.mem()) 4485 store.Aux = n.Type 4486 s.vars[&memVar] = store 4487 } 4488 s.vars[&okVar] = s.constBool(true) 4489 s.endBlock() 4490 bOk.AddEdgeTo(bEnd) 4491 4492 // type assertion failed 4493 s.startBlock(bFail) 4494 if tmp == nil { 4495 s.vars[valVar] = s.zeroVal(n.Type) 4496 } else { 4497 store := s.newValue2I(ssa.OpZero, types.TypeMem, n.Type.Size(), addr, s.mem()) 4498 store.Aux = n.Type 4499 s.vars[&memVar] = store 4500 } 4501 s.vars[&okVar] = s.constBool(false) 4502 s.endBlock() 4503 bFail.AddEdgeTo(bEnd) 4504 4505 // merge point 4506 s.startBlock(bEnd) 4507 if tmp == nil { 4508 res = s.variable(valVar, n.Type) 4509 delete(s.vars, valVar) 4510 } else { 4511 res = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 4512 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem()) 4513 } 4514 resok = s.variable(&okVar, types.Types[TBOOL]) 4515 delete(s.vars, &okVar) 4516 return res, resok 4517 } 4518 4519 // variable returns the value of a variable at the current location. 4520 func (s *state) variable(name *Node, t *types.Type) *ssa.Value { 4521 v := s.vars[name] 4522 if v != nil { 4523 return v 4524 } 4525 v = s.fwdVars[name] 4526 if v != nil { 4527 return v 4528 } 4529 4530 if s.curBlock == s.f.Entry { 4531 // No variable should be live at entry. 4532 s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, name, v) 4533 } 4534 // Make a FwdRef, which records a value that's live on block input. 4535 // We'll find the matching definition as part of insertPhis. 4536 v = s.newValue0A(ssa.OpFwdRef, t, name) 4537 s.fwdVars[name] = v 4538 s.addNamedValue(name, v) 4539 return v 4540 } 4541 4542 func (s *state) mem() *ssa.Value { 4543 return s.variable(&memVar, types.TypeMem) 4544 } 4545 4546 func (s *state) addNamedValue(n *Node, v *ssa.Value) { 4547 if n.Class() == Pxxx { 4548 // Don't track our dummy nodes (&memVar etc.). 4549 return 4550 } 4551 if n.IsAutoTmp() { 4552 // Don't track temporary variables. 4553 return 4554 } 4555 if n.Class() == PPARAMOUT { 4556 // Don't track named output values. This prevents return values 4557 // from being assigned too early. See #14591 and #14762. TODO: allow this. 4558 return 4559 } 4560 if n.Class() == PAUTO && n.Xoffset != 0 { 4561 s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset) 4562 } 4563 loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0} 4564 values, ok := s.f.NamedValues[loc] 4565 if !ok { 4566 s.f.Names = append(s.f.Names, loc) 4567 } 4568 s.f.NamedValues[loc] = append(values, v) 4569 } 4570 4571 // Branch is an unresolved branch. 4572 type Branch struct { 4573 P *obj.Prog // branch instruction 4574 B *ssa.Block // target 4575 } 4576 4577 // SSAGenState contains state needed during Prog generation. 4578 type SSAGenState struct { 4579 pp *Progs 4580 4581 // Branches remembers all the branch instructions we've seen 4582 // and where they would like to go. 4583 Branches []Branch 4584 4585 // bstart remembers where each block starts (indexed by block ID) 4586 bstart []*obj.Prog 4587 4588 // 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?) 4589 SSEto387 map[int16]int16 4590 // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8. 4591 ScratchFpMem *Node 4592 4593 maxarg int64 // largest frame size for arguments to calls made by the function 4594 4595 // Map from GC safe points to stack map index, generated by 4596 // liveness analysis. 4597 stackMapIndex map[*ssa.Value]int 4598 } 4599 4600 // Prog appends a new Prog. 4601 func (s *SSAGenState) Prog(as obj.As) *obj.Prog { 4602 return s.pp.Prog(as) 4603 } 4604 4605 // Pc returns the current Prog. 4606 func (s *SSAGenState) Pc() *obj.Prog { 4607 return s.pp.next 4608 } 4609 4610 // SetPos sets the current source position. 4611 func (s *SSAGenState) SetPos(pos src.XPos) { 4612 s.pp.pos = pos 4613 } 4614 4615 // DebugFriendlySetPos sets the position subject to heuristics 4616 // that reduce "jumpy" line number churn when debugging. 4617 // Spill/fill/copy instructions from the register allocator, 4618 // phi functions, and instructions with a no-pos position 4619 // are examples of instructions that can cause churn. 4620 func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) { 4621 // The two choices here are either to leave lineno unchanged, 4622 // or to explicitly set it to src.NoXPos. Leaving it unchanged 4623 // (reusing the preceding line number) produces slightly better- 4624 // looking assembly language output from the compiler, and is 4625 // expected by some already-existing tests. 4626 // The debug information appears to be the same in either case 4627 switch v.Op { 4628 case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg: 4629 // leave the position unchanged from beginning of block 4630 // or previous line number. 4631 default: 4632 if v.Pos != src.NoXPos { 4633 s.SetPos(v.Pos) 4634 } 4635 } 4636 } 4637 4638 // genssa appends entries to pp for each instruction in f. 4639 func genssa(f *ssa.Func, pp *Progs) { 4640 var s SSAGenState 4641 4642 e := f.Frontend().(*ssafn) 4643 4644 s.stackMapIndex = liveness(e, f) 4645 4646 // Remember where each block starts. 4647 s.bstart = make([]*obj.Prog, f.NumBlocks()) 4648 s.pp = pp 4649 var progToValue map[*obj.Prog]*ssa.Value 4650 var progToBlock map[*obj.Prog]*ssa.Block 4651 var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point. 4652 var logProgs = e.log 4653 if logProgs { 4654 progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues()) 4655 progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks()) 4656 f.Logf("genssa %s\n", f.Name) 4657 progToBlock[s.pp.next] = f.Blocks[0] 4658 } 4659 4660 if thearch.Use387 { 4661 s.SSEto387 = map[int16]int16{} 4662 } 4663 4664 s.ScratchFpMem = e.scratchFpMem 4665 4666 logLocationLists := Debug_locationlist != 0 4667 if Ctxt.Flag_locationlists { 4668 e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(f, logLocationLists) 4669 valueToProgAfter = make([]*obj.Prog, f.NumValues()) 4670 } 4671 4672 // Emit basic blocks 4673 for i, b := range f.Blocks { 4674 s.bstart[b.ID] = s.pp.next 4675 // Emit values in block 4676 thearch.SSAMarkMoves(&s, b) 4677 for _, v := range b.Values { 4678 x := s.pp.next 4679 s.DebugFriendlySetPosFrom(v) 4680 switch v.Op { 4681 case ssa.OpInitMem: 4682 // memory arg needs no code 4683 case ssa.OpArg: 4684 // input args need no code 4685 case ssa.OpSP, ssa.OpSB: 4686 // nothing to do 4687 case ssa.OpSelect0, ssa.OpSelect1: 4688 // nothing to do 4689 case ssa.OpGetG: 4690 // nothing to do when there's a g register, 4691 // and checkLower complains if there's not 4692 case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive: 4693 // nothing to do; already used by liveness 4694 case ssa.OpVarKill: 4695 // Zero variable if it is ambiguously live. 4696 // After the VARKILL anything this variable references 4697 // might be collected. If it were to become live again later, 4698 // the GC will see references to already-collected objects. 4699 // See issue 20029. 4700 n := v.Aux.(*Node) 4701 if n.Name.Needzero() { 4702 if n.Class() != PAUTO { 4703 v.Fatalf("zero of variable which isn't PAUTO %v", n) 4704 } 4705 if n.Type.Size()%int64(Widthptr) != 0 { 4706 v.Fatalf("zero of variable not a multiple of ptr size %v", n) 4707 } 4708 thearch.ZeroAuto(s.pp, n) 4709 } 4710 case ssa.OpPhi: 4711 CheckLoweredPhi(v) 4712 case ssa.OpRegKill: 4713 // nothing to do 4714 default: 4715 // let the backend handle it 4716 thearch.SSAGenValue(&s, v) 4717 } 4718 4719 if Ctxt.Flag_locationlists { 4720 valueToProgAfter[v.ID] = s.pp.next 4721 } 4722 if logProgs { 4723 for ; x != s.pp.next; x = x.Link { 4724 progToValue[x] = v 4725 } 4726 } 4727 } 4728 // Emit control flow instructions for block 4729 var next *ssa.Block 4730 if i < len(f.Blocks)-1 && Debug['N'] == 0 { 4731 // If -N, leave next==nil so every block with successors 4732 // ends in a JMP (except call blocks - plive doesn't like 4733 // select{send,recv} followed by a JMP call). Helps keep 4734 // line numbers for otherwise empty blocks. 4735 next = f.Blocks[i+1] 4736 } 4737 x := s.pp.next 4738 s.SetPos(b.Pos) 4739 thearch.SSAGenBlock(&s, b, next) 4740 if logProgs { 4741 for ; x != s.pp.next; x = x.Link { 4742 progToBlock[x] = b 4743 } 4744 } 4745 } 4746 4747 if Ctxt.Flag_locationlists { 4748 for i := range f.Blocks { 4749 blockDebug := e.curfn.Func.DebugInfo.Blocks[i] 4750 for _, locList := range blockDebug.Variables { 4751 for _, loc := range locList.Locations { 4752 if loc.Start == ssa.BlockStart { 4753 loc.StartProg = s.bstart[f.Blocks[i].ID] 4754 } else { 4755 loc.StartProg = valueToProgAfter[loc.Start.ID] 4756 } 4757 if loc.End == nil { 4758 Fatalf("empty loc %v compiling %v", loc, f.Name) 4759 } 4760 4761 if loc.End == ssa.BlockEnd { 4762 // If this variable was live at the end of the block, it should be 4763 // live over the control flow instructions. Extend it up to the 4764 // beginning of the next block. 4765 // If this is the last block, then there's no Prog to use for it, and 4766 // EndProg is unset. 4767 if i < len(f.Blocks)-1 { 4768 loc.EndProg = s.bstart[f.Blocks[i+1].ID] 4769 } 4770 } else { 4771 // Advance the "end" forward by one; the end-of-range doesn't take effect 4772 // until the instruction actually executes. 4773 loc.EndProg = valueToProgAfter[loc.End.ID].Link 4774 if loc.EndProg == nil { 4775 Fatalf("nil loc.EndProg compiling %v, loc=%v", f.Name, loc) 4776 } 4777 } 4778 if !logLocationLists { 4779 loc.Start = nil 4780 loc.End = nil 4781 } 4782 } 4783 } 4784 } 4785 } 4786 4787 // Resolve branches 4788 for _, br := range s.Branches { 4789 br.P.To.Val = s.bstart[br.B.ID] 4790 } 4791 4792 if logProgs { 4793 filename := "" 4794 for p := pp.Text; p != nil; p = p.Link { 4795 if p.Pos.IsKnown() && p.InnermostFilename() != filename { 4796 filename = p.InnermostFilename() 4797 f.Logf("# %s\n", filename) 4798 } 4799 4800 var s string 4801 if v, ok := progToValue[p]; ok { 4802 s = v.String() 4803 } else if b, ok := progToBlock[p]; ok { 4804 s = b.String() 4805 } else { 4806 s = " " // most value and branch strings are 2-3 characters long 4807 } 4808 f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString()) 4809 } 4810 if f.HTMLWriter != nil { 4811 // LineHist is defunct now - this code won't do 4812 // anything. 4813 // TODO: fix this (ideally without a global variable) 4814 // saved := pp.Text.Ctxt.LineHist.PrintFilenameOnly 4815 // pp.Text.Ctxt.LineHist.PrintFilenameOnly = true 4816 var buf bytes.Buffer 4817 buf.WriteString("<code>") 4818 buf.WriteString("<dl class=\"ssa-gen\">") 4819 filename := "" 4820 for p := pp.Text; p != nil; p = p.Link { 4821 // Don't spam every line with the file name, which is often huge. 4822 // Only print changes, and "unknown" is not a change. 4823 if p.Pos.IsKnown() && p.InnermostFilename() != filename { 4824 filename = p.InnermostFilename() 4825 buf.WriteString("<dt class=\"ssa-prog-src\"></dt><dd class=\"ssa-prog\">") 4826 buf.WriteString(html.EscapeString("# " + filename)) 4827 buf.WriteString("</dd>") 4828 } 4829 4830 buf.WriteString("<dt class=\"ssa-prog-src\">") 4831 if v, ok := progToValue[p]; ok { 4832 buf.WriteString(v.HTML()) 4833 } else if b, ok := progToBlock[p]; ok { 4834 buf.WriteString("<b>" + b.HTML() + "</b>") 4835 } 4836 buf.WriteString("</dt>") 4837 buf.WriteString("<dd class=\"ssa-prog\">") 4838 buf.WriteString(fmt.Sprintf("%.5d <span class=\"line-number\">(%s)</span> %s", p.Pc, p.InnermostLineNumber(), html.EscapeString(p.InstructionString()))) 4839 buf.WriteString("</dd>") 4840 } 4841 buf.WriteString("</dl>") 4842 buf.WriteString("</code>") 4843 f.HTMLWriter.WriteColumn("genssa", "ssa-prog", buf.String()) 4844 // pp.Text.Ctxt.LineHist.PrintFilenameOnly = saved 4845 } 4846 } 4847 4848 defframe(&s, e) 4849 if Debug['f'] != 0 { 4850 frame(0) 4851 } 4852 4853 f.HTMLWriter.Close() 4854 f.HTMLWriter = nil 4855 } 4856 4857 func defframe(s *SSAGenState, e *ssafn) { 4858 pp := s.pp 4859 4860 frame := Rnd(s.maxarg+e.stksize, int64(Widthreg)) 4861 if thearch.PadFrame != nil { 4862 frame = thearch.PadFrame(frame) 4863 } 4864 4865 // Fill in argument and frame size. 4866 pp.Text.To.Type = obj.TYPE_TEXTSIZE 4867 pp.Text.To.Val = int32(Rnd(e.curfn.Type.ArgWidth(), int64(Widthreg))) 4868 pp.Text.To.Offset = frame 4869 4870 // Insert code to zero ambiguously live variables so that the 4871 // garbage collector only sees initialized values when it 4872 // looks for pointers. 4873 p := pp.Text 4874 var lo, hi int64 4875 4876 // Opaque state for backend to use. Current backends use it to 4877 // keep track of which helper registers have been zeroed. 4878 var state uint32 4879 4880 // Iterate through declarations. They are sorted in decreasing Xoffset order. 4881 for _, n := range e.curfn.Func.Dcl { 4882 if !n.Name.Needzero() { 4883 continue 4884 } 4885 if n.Class() != PAUTO { 4886 Fatalf("needzero class %d", n.Class()) 4887 } 4888 if n.Type.Size()%int64(Widthptr) != 0 || n.Xoffset%int64(Widthptr) != 0 || n.Type.Size() == 0 { 4889 Fatalf("var %L has size %d offset %d", n, n.Type.Size(), n.Xoffset) 4890 } 4891 4892 if lo != hi && n.Xoffset+n.Type.Size() >= lo-int64(2*Widthreg) { 4893 // Merge with range we already have. 4894 lo = n.Xoffset 4895 continue 4896 } 4897 4898 // Zero old range 4899 p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state) 4900 4901 // Set new range. 4902 lo = n.Xoffset 4903 hi = lo + n.Type.Size() 4904 } 4905 4906 // Zero final range. 4907 thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state) 4908 } 4909 4910 type FloatingEQNEJump struct { 4911 Jump obj.As 4912 Index int 4913 } 4914 4915 func (s *SSAGenState) oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump) { 4916 p := s.Prog(jumps.Jump) 4917 p.To.Type = obj.TYPE_BRANCH 4918 p.Pos = b.Pos 4919 to := jumps.Index 4920 s.Branches = append(s.Branches, Branch{p, b.Succs[to].Block()}) 4921 } 4922 4923 func (s *SSAGenState) FPJump(b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) { 4924 switch next { 4925 case b.Succs[0].Block(): 4926 s.oneFPJump(b, &jumps[0][0]) 4927 s.oneFPJump(b, &jumps[0][1]) 4928 case b.Succs[1].Block(): 4929 s.oneFPJump(b, &jumps[1][0]) 4930 s.oneFPJump(b, &jumps[1][1]) 4931 default: 4932 s.oneFPJump(b, &jumps[1][0]) 4933 s.oneFPJump(b, &jumps[1][1]) 4934 q := s.Prog(obj.AJMP) 4935 q.Pos = b.Pos 4936 q.To.Type = obj.TYPE_BRANCH 4937 s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()}) 4938 } 4939 } 4940 4941 func AuxOffset(v *ssa.Value) (offset int64) { 4942 if v.Aux == nil { 4943 return 0 4944 } 4945 n, ok := v.Aux.(*Node) 4946 if !ok { 4947 v.Fatalf("bad aux type in %s\n", v.LongString()) 4948 } 4949 if n.Class() == PAUTO { 4950 return n.Xoffset 4951 } 4952 return 0 4953 } 4954 4955 // AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a. 4956 func AddAux(a *obj.Addr, v *ssa.Value) { 4957 AddAux2(a, v, v.AuxInt) 4958 } 4959 func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { 4960 if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR { 4961 v.Fatalf("bad AddAux addr %v", a) 4962 } 4963 // add integer offset 4964 a.Offset += offset 4965 4966 // If no additional symbol offset, we're done. 4967 if v.Aux == nil { 4968 return 4969 } 4970 // Add symbol's offset from its base register. 4971 switch n := v.Aux.(type) { 4972 case *obj.LSym: 4973 a.Name = obj.NAME_EXTERN 4974 a.Sym = n 4975 case *Node: 4976 if n.Class() == PPARAM || n.Class() == PPARAMOUT { 4977 a.Name = obj.NAME_PARAM 4978 a.Sym = n.Orig.Sym.Linksym() 4979 a.Offset += n.Xoffset 4980 break 4981 } 4982 a.Name = obj.NAME_AUTO 4983 a.Sym = n.Sym.Linksym() 4984 a.Offset += n.Xoffset 4985 default: 4986 v.Fatalf("aux in %s not implemented %#v", v, v.Aux) 4987 } 4988 } 4989 4990 // extendIndex extends v to a full int width. 4991 // panic using the given function if v does not fit in an int (only on 32-bit archs). 4992 func (s *state) extendIndex(v *ssa.Value, panicfn *obj.LSym) *ssa.Value { 4993 size := v.Type.Size() 4994 if size == s.config.PtrSize { 4995 return v 4996 } 4997 if size > s.config.PtrSize { 4998 // truncate 64-bit indexes on 32-bit pointer archs. Test the 4999 // high word and branch to out-of-bounds failure if it is not 0. 5000 if Debug['B'] == 0 { 5001 hi := s.newValue1(ssa.OpInt64Hi, types.Types[TUINT32], v) 5002 cmp := s.newValue2(ssa.OpEq32, types.Types[TBOOL], hi, s.constInt32(types.Types[TUINT32], 0)) 5003 s.check(cmp, panicfn) 5004 } 5005 return s.newValue1(ssa.OpTrunc64to32, types.Types[TINT], v) 5006 } 5007 5008 // Extend value to the required size 5009 var op ssa.Op 5010 if v.Type.IsSigned() { 5011 switch 10*size + s.config.PtrSize { 5012 case 14: 5013 op = ssa.OpSignExt8to32 5014 case 18: 5015 op = ssa.OpSignExt8to64 5016 case 24: 5017 op = ssa.OpSignExt16to32 5018 case 28: 5019 op = ssa.OpSignExt16to64 5020 case 48: 5021 op = ssa.OpSignExt32to64 5022 default: 5023 s.Fatalf("bad signed index extension %s", v.Type) 5024 } 5025 } else { 5026 switch 10*size + s.config.PtrSize { 5027 case 14: 5028 op = ssa.OpZeroExt8to32 5029 case 18: 5030 op = ssa.OpZeroExt8to64 5031 case 24: 5032 op = ssa.OpZeroExt16to32 5033 case 28: 5034 op = ssa.OpZeroExt16to64 5035 case 48: 5036 op = ssa.OpZeroExt32to64 5037 default: 5038 s.Fatalf("bad unsigned index extension %s", v.Type) 5039 } 5040 } 5041 return s.newValue1(op, types.Types[TINT], v) 5042 } 5043 5044 // CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values. 5045 // Called during ssaGenValue. 5046 func CheckLoweredPhi(v *ssa.Value) { 5047 if v.Op != ssa.OpPhi { 5048 v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString()) 5049 } 5050 if v.Type.IsMemory() { 5051 return 5052 } 5053 f := v.Block.Func 5054 loc := f.RegAlloc[v.ID] 5055 for _, a := range v.Args { 5056 if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead? 5057 v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func) 5058 } 5059 } 5060 } 5061 5062 // CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block. 5063 // The output of LoweredGetClosurePtr is generally hardwired to the correct register. 5064 // That register contains the closure pointer on closure entry. 5065 func CheckLoweredGetClosurePtr(v *ssa.Value) { 5066 entry := v.Block.Func.Entry 5067 if entry != v.Block || entry.Values[0] != v { 5068 Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v) 5069 } 5070 } 5071 5072 // AutoVar returns a *Node and int64 representing the auto variable and offset within it 5073 // where v should be spilled. 5074 func AutoVar(v *ssa.Value) (*Node, int64) { 5075 loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot) 5076 if v.Type.Size() > loc.Type.Size() { 5077 v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) 5078 } 5079 return loc.N.(*Node), loc.Off 5080 } 5081 5082 func AddrAuto(a *obj.Addr, v *ssa.Value) { 5083 n, off := AutoVar(v) 5084 a.Type = obj.TYPE_MEM 5085 a.Sym = n.Sym.Linksym() 5086 a.Reg = int16(thearch.REGSP) 5087 a.Offset = n.Xoffset + off 5088 if n.Class() == PPARAM || n.Class() == PPARAMOUT { 5089 a.Name = obj.NAME_PARAM 5090 } else { 5091 a.Name = obj.NAME_AUTO 5092 } 5093 } 5094 5095 func (s *SSAGenState) AddrScratch(a *obj.Addr) { 5096 if s.ScratchFpMem == nil { 5097 panic("no scratch memory available; forgot to declare usesScratch for Op?") 5098 } 5099 a.Type = obj.TYPE_MEM 5100 a.Name = obj.NAME_AUTO 5101 a.Sym = s.ScratchFpMem.Sym.Linksym() 5102 a.Reg = int16(thearch.REGSP) 5103 a.Offset = s.ScratchFpMem.Xoffset 5104 } 5105 5106 func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog { 5107 idx, ok := s.stackMapIndex[v] 5108 if !ok { 5109 Fatalf("missing stack map index for %v", v.LongString()) 5110 } 5111 p := s.Prog(obj.APCDATA) 5112 Addrconst(&p.From, objabi.PCDATA_StackMapIndex) 5113 Addrconst(&p.To, int64(idx)) 5114 5115 if sym, _ := v.Aux.(*obj.LSym); sym == Deferreturn { 5116 // Deferred calls will appear to be returning to 5117 // the CALL deferreturn(SB) that we are about to emit. 5118 // However, the stack trace code will show the line 5119 // of the instruction byte before the return PC. 5120 // To avoid that being an unrelated instruction, 5121 // insert an actual hardware NOP that will have the right line number. 5122 // This is different from obj.ANOP, which is a virtual no-op 5123 // that doesn't make it into the instruction stream. 5124 thearch.Ginsnop(s.pp) 5125 } 5126 5127 p = s.Prog(obj.ACALL) 5128 if sym, ok := v.Aux.(*obj.LSym); ok { 5129 p.To.Type = obj.TYPE_MEM 5130 p.To.Name = obj.NAME_EXTERN 5131 p.To.Sym = sym 5132 5133 // Record call graph information for nowritebarrierrec 5134 // analysis. 5135 if nowritebarrierrecCheck != nil { 5136 nowritebarrierrecCheck.recordCall(s.pp.curfn, sym, v.Pos) 5137 } 5138 } else { 5139 // TODO(mdempsky): Can these differences be eliminated? 5140 switch thearch.LinkArch.Family { 5141 case sys.AMD64, sys.I386, sys.PPC64, sys.S390X: 5142 p.To.Type = obj.TYPE_REG 5143 case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64: 5144 p.To.Type = obj.TYPE_MEM 5145 default: 5146 Fatalf("unknown indirect call family") 5147 } 5148 p.To.Reg = v.Args[0].Reg() 5149 } 5150 if s.maxarg < v.AuxInt { 5151 s.maxarg = v.AuxInt 5152 } 5153 return p 5154 } 5155 5156 // fieldIdx finds the index of the field referred to by the ODOT node n. 5157 func fieldIdx(n *Node) int { 5158 t := n.Left.Type 5159 f := n.Sym 5160 if !t.IsStruct() { 5161 panic("ODOT's LHS is not a struct") 5162 } 5163 5164 var i int 5165 for _, t1 := range t.Fields().Slice() { 5166 if t1.Sym != f { 5167 i++ 5168 continue 5169 } 5170 if t1.Offset != n.Xoffset { 5171 panic("field offset doesn't match") 5172 } 5173 return i 5174 } 5175 panic(fmt.Sprintf("can't find field in expr %v\n", n)) 5176 5177 // TODO: keep the result of this function somewhere in the ODOT Node 5178 // so we don't have to recompute it each time we need it. 5179 } 5180 5181 // ssafn holds frontend information about a function that the backend is processing. 5182 // It also exports a bunch of compiler services for the ssa backend. 5183 type ssafn struct { 5184 curfn *Node 5185 strings map[string]interface{} // map from constant string to data symbols 5186 scratchFpMem *Node // temp for floating point register / memory moves on some architectures 5187 stksize int64 // stack size for current frame 5188 stkptrsize int64 // prefix of stack containing pointers 5189 log bool 5190 } 5191 5192 // StringData returns a symbol (a *types.Sym wrapped in an interface) which 5193 // is the data component of a global string constant containing s. 5194 func (e *ssafn) StringData(s string) interface{} { 5195 if aux, ok := e.strings[s]; ok { 5196 return aux 5197 } 5198 if e.strings == nil { 5199 e.strings = make(map[string]interface{}) 5200 } 5201 data := stringsym(e.curfn.Pos, s) 5202 e.strings[s] = data 5203 return data 5204 } 5205 5206 func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode { 5207 n := tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list 5208 return n 5209 } 5210 5211 func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 5212 n := name.N.(*Node) 5213 ptrType := types.NewPtr(types.Types[TUINT8]) 5214 lenType := types.Types[TINT] 5215 if n.Class() == PAUTO && !n.Addrtaken() { 5216 // Split this string up into two separate variables. 5217 p := e.splitSlot(&name, ".ptr", 0, ptrType) 5218 l := e.splitSlot(&name, ".len", ptrType.Size(), lenType) 5219 return p, l 5220 } 5221 // Return the two parts of the larger variable. 5222 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)} 5223 } 5224 5225 func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 5226 n := name.N.(*Node) 5227 t := types.NewPtr(types.Types[TUINT8]) 5228 if n.Class() == PAUTO && !n.Addrtaken() { 5229 // Split this interface up into two separate variables. 5230 f := ".itab" 5231 if n.Type.IsEmptyInterface() { 5232 f = ".type" 5233 } 5234 c := e.splitSlot(&name, f, 0, t) 5235 d := e.splitSlot(&name, ".data", t.Size(), t) 5236 return c, d 5237 } 5238 // Return the two parts of the larger variable. 5239 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)} 5240 } 5241 5242 func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) { 5243 n := name.N.(*Node) 5244 ptrType := types.NewPtr(name.Type.ElemType()) 5245 lenType := types.Types[TINT] 5246 if n.Class() == PAUTO && !n.Addrtaken() { 5247 // Split this slice up into three separate variables. 5248 p := e.splitSlot(&name, ".ptr", 0, ptrType) 5249 l := e.splitSlot(&name, ".len", ptrType.Size(), lenType) 5250 c := e.splitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType) 5251 return p, l, c 5252 } 5253 // Return the three parts of the larger variable. 5254 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, 5255 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}, 5256 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)} 5257 } 5258 5259 func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 5260 n := name.N.(*Node) 5261 s := name.Type.Size() / 2 5262 var t *types.Type 5263 if s == 8 { 5264 t = types.Types[TFLOAT64] 5265 } else { 5266 t = types.Types[TFLOAT32] 5267 } 5268 if n.Class() == PAUTO && !n.Addrtaken() { 5269 // Split this complex up into two separate variables. 5270 r := e.splitSlot(&name, ".real", 0, t) 5271 i := e.splitSlot(&name, ".imag", t.Size(), t) 5272 return r, i 5273 } 5274 // Return the two parts of the larger variable. 5275 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s} 5276 } 5277 5278 func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 5279 n := name.N.(*Node) 5280 var t *types.Type 5281 if name.Type.IsSigned() { 5282 t = types.Types[TINT32] 5283 } else { 5284 t = types.Types[TUINT32] 5285 } 5286 if n.Class() == PAUTO && !n.Addrtaken() { 5287 // Split this int64 up into two separate variables. 5288 if thearch.LinkArch.ByteOrder == binary.BigEndian { 5289 return e.splitSlot(&name, ".hi", 0, t), e.splitSlot(&name, ".lo", t.Size(), types.Types[TUINT32]) 5290 } 5291 return e.splitSlot(&name, ".hi", t.Size(), t), e.splitSlot(&name, ".lo", 0, types.Types[TUINT32]) 5292 } 5293 // Return the two parts of the larger variable. 5294 if thearch.LinkArch.ByteOrder == binary.BigEndian { 5295 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off + 4} 5296 } 5297 return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off} 5298 } 5299 5300 func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot { 5301 n := name.N.(*Node) 5302 st := name.Type 5303 ft := st.FieldType(i) 5304 var offset int64 5305 for f := 0; f < i; f++ { 5306 offset += st.FieldType(f).Size() 5307 } 5308 if n.Class() == PAUTO && !n.Addrtaken() { 5309 // Note: the _ field may appear several times. But 5310 // have no fear, identically-named but distinct Autos are 5311 // ok, albeit maybe confusing for a debugger. 5312 return e.splitSlot(&name, "."+st.FieldName(i), offset, ft) 5313 } 5314 return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)} 5315 } 5316 5317 func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot { 5318 n := name.N.(*Node) 5319 at := name.Type 5320 if at.NumElem() != 1 { 5321 Fatalf("bad array size") 5322 } 5323 et := at.ElemType() 5324 if n.Class() == PAUTO && !n.Addrtaken() { 5325 return e.splitSlot(&name, "[0]", 0, et) 5326 } 5327 return ssa.LocalSlot{N: n, Type: et, Off: name.Off} 5328 } 5329 5330 func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym { 5331 return itabsym(it, offset) 5332 } 5333 5334 // splitSlot returns a slot representing the data of parent starting at offset. 5335 func (e *ssafn) splitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot { 5336 s := &types.Sym{Name: parent.N.(*Node).Sym.Name + suffix, Pkg: localpkg} 5337 5338 n := &Node{ 5339 Name: new(Name), 5340 Op: ONAME, 5341 Pos: parent.N.(*Node).Pos, 5342 } 5343 n.Orig = n 5344 5345 s.Def = asTypesNode(n) 5346 asNode(s.Def).Name.SetUsed(true) 5347 n.Sym = s 5348 n.Type = t 5349 n.SetClass(PAUTO) 5350 n.SetAddable(true) 5351 n.Esc = EscNever 5352 n.Name.Curfn = e.curfn 5353 e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n) 5354 dowidth(t) 5355 return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset} 5356 } 5357 5358 func (e *ssafn) CanSSA(t *types.Type) bool { 5359 return canSSAType(t) 5360 } 5361 5362 func (e *ssafn) Line(pos src.XPos) string { 5363 return linestr(pos) 5364 } 5365 5366 // Log logs a message from the compiler. 5367 func (e *ssafn) Logf(msg string, args ...interface{}) { 5368 if e.log { 5369 fmt.Printf(msg, args...) 5370 } 5371 } 5372 5373 func (e *ssafn) Log() bool { 5374 return e.log 5375 } 5376 5377 // Fatal reports a compiler error and exits. 5378 func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) { 5379 lineno = pos 5380 Fatalf(msg, args...) 5381 } 5382 5383 // Warnl reports a "warning", which is usually flag-triggered 5384 // logging output for the benefit of tests. 5385 func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) { 5386 Warnl(pos, fmt_, args...) 5387 } 5388 5389 func (e *ssafn) Debug_checknil() bool { 5390 return Debug_checknil != 0 5391 } 5392 5393 func (e *ssafn) Debug_eagerwb() bool { 5394 return Debug_eagerwb != 0 5395 } 5396 5397 func (e *ssafn) UseWriteBarrier() bool { 5398 return use_writebarrier 5399 } 5400 5401 func (e *ssafn) Syslook(name string) *obj.LSym { 5402 switch name { 5403 case "goschedguarded": 5404 return goschedguarded 5405 case "writeBarrier": 5406 return writeBarrier 5407 case "writebarrierptr": 5408 return writebarrierptr 5409 case "gcWriteBarrier": 5410 return gcWriteBarrier 5411 case "typedmemmove": 5412 return typedmemmove 5413 case "typedmemclr": 5414 return typedmemclr 5415 } 5416 Fatalf("unknown Syslook func %v", name) 5417 return nil 5418 } 5419 5420 func (e *ssafn) SetWBPos(pos src.XPos) { 5421 e.curfn.Func.setWBPos(pos) 5422 } 5423 5424 func (n *Node) Typ() *types.Type { 5425 return n.Type 5426 } 5427 func (n *Node) StorageClass() ssa.StorageClass { 5428 switch n.Class() { 5429 case PPARAM: 5430 return ssa.ClassParam 5431 case PPARAMOUT: 5432 return ssa.ClassParamOut 5433 case PAUTO: 5434 return ssa.ClassAuto 5435 default: 5436 Fatalf("untranslateable storage class for %v: %s", n, n.Class()) 5437 return 0 5438 } 5439 }