github.com/euank/go@v0.0.0-20160829210321-495514729181/src/cmd/compile/internal/gc/ssa.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "bytes" 9 "fmt" 10 "html" 11 "os" 12 "strings" 13 14 "cmd/compile/internal/ssa" 15 "cmd/internal/obj" 16 "cmd/internal/sys" 17 ) 18 19 var ssaEnabled = true 20 21 var ssaConfig *ssa.Config 22 var ssaExp ssaExport 23 24 func initssa() *ssa.Config { 25 ssaExp.unimplemented = false 26 ssaExp.mustImplement = true 27 if ssaConfig == nil { 28 ssaConfig = ssa.NewConfig(Thearch.LinkArch.Name, &ssaExp, Ctxt, Debug['N'] == 0) 29 if Thearch.LinkArch.Name == "386" { 30 ssaConfig.Set387(Thearch.Use387) 31 } 32 } 33 return ssaConfig 34 } 35 36 func shouldssa(fn *Node) bool { 37 switch Thearch.LinkArch.Name { 38 default: 39 // Only available for testing. 40 if os.Getenv("SSATEST") == "" { 41 return false 42 } 43 case "amd64", "amd64p32", "arm", "386", "arm64", "ppc64le", "mips64", "mips64le": 44 // Generally available. 45 } 46 if !ssaEnabled { 47 return false 48 } 49 50 // Environment variable control of SSA CG 51 // 1. IF GOSSAFUNC == current function name THEN 52 // compile this function with SSA and log output to ssa.html 53 54 // 2. IF GOSSAHASH == "" THEN 55 // compile this function (and everything else) with SSA 56 57 // 3. IF GOSSAHASH == "n" or "N" 58 // IF GOSSAPKG == current package name THEN 59 // compile this function (and everything in this package) with SSA 60 // ELSE 61 // use the old back end for this function. 62 // This is for compatibility with existing test harness and should go away. 63 64 // 4. IF GOSSAHASH is a suffix of the binary-rendered SHA1 hash of the function name THEN 65 // compile this function with SSA 66 // ELSE 67 // compile this function with the old back end. 68 69 // Plan is for 3 to be removed when the tests are revised. 70 // SSA is now default, and is disabled by setting 71 // GOSSAHASH to n or N, or selectively with strings of 72 // 0 and 1. 73 74 name := fn.Func.Nname.Sym.Name 75 76 funcname := os.Getenv("GOSSAFUNC") 77 if funcname != "" { 78 // If GOSSAFUNC is set, compile only that function. 79 return name == funcname 80 } 81 82 pkg := os.Getenv("GOSSAPKG") 83 if pkg != "" { 84 // If GOSSAPKG is set, compile only that package. 85 return localpkg.Name == pkg 86 } 87 88 return initssa().DebugHashMatch("GOSSAHASH", name) 89 } 90 91 // buildssa builds an SSA function. 92 func buildssa(fn *Node) *ssa.Func { 93 name := fn.Func.Nname.Sym.Name 94 printssa := name == os.Getenv("GOSSAFUNC") 95 if printssa { 96 fmt.Println("generating SSA for", name) 97 dumplist("buildssa-enter", fn.Func.Enter) 98 dumplist("buildssa-body", fn.Nbody) 99 dumplist("buildssa-exit", fn.Func.Exit) 100 } 101 102 var s state 103 s.pushLine(fn.Lineno) 104 defer s.popLine() 105 106 if fn.Func.Pragma&CgoUnsafeArgs != 0 { 107 s.cgoUnsafeArgs = true 108 } 109 if fn.Func.Pragma&Nowritebarrier != 0 { 110 s.noWB = true 111 } 112 defer func() { 113 if s.WBLineno != 0 { 114 fn.Func.WBLineno = s.WBLineno 115 } 116 }() 117 // TODO(khr): build config just once at the start of the compiler binary 118 119 ssaExp.log = printssa 120 121 s.config = initssa() 122 s.f = s.config.NewFunc() 123 s.f.Name = name 124 s.exitCode = fn.Func.Exit 125 s.panics = map[funcLine]*ssa.Block{} 126 127 if name == os.Getenv("GOSSAFUNC") { 128 // TODO: tempfile? it is handy to have the location 129 // of this file be stable, so you can just reload in the browser. 130 s.config.HTML = ssa.NewHTMLWriter("ssa.html", s.config, name) 131 // TODO: generate and print a mapping from nodes to values and blocks 132 } 133 defer func() { 134 if !printssa { 135 s.config.HTML.Close() 136 } 137 }() 138 139 // Allocate starting block 140 s.f.Entry = s.f.NewBlock(ssa.BlockPlain) 141 142 // Allocate starting values 143 s.labels = map[string]*ssaLabel{} 144 s.labeledNodes = map[*Node]*ssaLabel{} 145 s.startmem = s.entryNewValue0(ssa.OpInitMem, ssa.TypeMem) 146 s.sp = s.entryNewValue0(ssa.OpSP, Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead 147 s.sb = s.entryNewValue0(ssa.OpSB, Types[TUINTPTR]) 148 149 s.startBlock(s.f.Entry) 150 s.vars[&memVar] = s.startmem 151 152 s.varsyms = map[*Node]interface{}{} 153 154 // Generate addresses of local declarations 155 s.decladdrs = map[*Node]*ssa.Value{} 156 for _, n := range fn.Func.Dcl { 157 switch n.Class { 158 case PPARAM, PPARAMOUT: 159 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n}) 160 s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sp) 161 if n.Class == PPARAMOUT && s.canSSA(n) { 162 // Save ssa-able PPARAMOUT variables so we can 163 // store them back to the stack at the end of 164 // the function. 165 s.returns = append(s.returns, n) 166 } 167 if n.Class == PPARAM && s.canSSA(n) && n.Type.IsPtrShaped() { 168 s.ptrargs = append(s.ptrargs, n) 169 n.SetNotLiveAtEnd(true) // SSA takes care of this explicitly 170 } 171 case PAUTO: 172 // processed at each use, to prevent Addr coming 173 // before the decl. 174 case PAUTOHEAP: 175 // moved to heap - already handled by frontend 176 case PFUNC: 177 // local function - already handled by frontend 178 default: 179 s.Unimplementedf("local variable with class %s unimplemented", classnames[n.Class]) 180 } 181 } 182 183 // Convert the AST-based IR to the SSA-based IR 184 s.stmts(fn.Func.Enter) 185 s.stmts(fn.Nbody) 186 187 // fallthrough to exit 188 if s.curBlock != nil { 189 s.pushLine(fn.Func.Endlineno) 190 s.exit() 191 s.popLine() 192 } 193 194 // Check that we used all labels 195 for name, lab := range s.labels { 196 if !lab.used() && !lab.reported && !lab.defNode.Used { 197 yyerrorl(lab.defNode.Lineno, "label %v defined and not used", name) 198 lab.reported = true 199 } 200 if lab.used() && !lab.defined() && !lab.reported { 201 yyerrorl(lab.useNode.Lineno, "label %v not defined", name) 202 lab.reported = true 203 } 204 } 205 206 // Check any forward gotos. Non-forward gotos have already been checked. 207 for _, n := range s.fwdGotos { 208 lab := s.labels[n.Left.Sym.Name] 209 // If the label is undefined, we have already have printed an error. 210 if lab.defined() { 211 s.checkgoto(n, lab.defNode) 212 } 213 } 214 215 if nerrors > 0 { 216 s.f.Free() 217 return nil 218 } 219 220 prelinkNumvars := s.f.NumValues() 221 sparseDefState := s.locatePotentialPhiFunctions(fn) 222 223 // Link up variable uses to variable definitions 224 s.linkForwardReferences(sparseDefState) 225 226 if ssa.BuildStats > 0 { 227 s.f.LogStat("build", s.f.NumBlocks(), "blocks", prelinkNumvars, "vars_before", 228 s.f.NumValues(), "vars_after", prelinkNumvars*s.f.NumBlocks(), "ssa_phi_loc_cutoff_score") 229 } 230 231 // Don't carry reference this around longer than necessary 232 s.exitCode = Nodes{} 233 234 // Main call to ssa package to compile function 235 ssa.Compile(s.f) 236 237 return s.f 238 } 239 240 type state struct { 241 // configuration (arch) information 242 config *ssa.Config 243 244 // function we're building 245 f *ssa.Func 246 247 // labels and labeled control flow nodes (OFOR, OSWITCH, OSELECT) in f 248 labels map[string]*ssaLabel 249 labeledNodes map[*Node]*ssaLabel 250 251 // gotos that jump forward; required for deferred checkgoto calls 252 fwdGotos []*Node 253 // Code that must precede any return 254 // (e.g., copying value of heap-escaped paramout back to true paramout) 255 exitCode Nodes 256 257 // unlabeled break and continue statement tracking 258 breakTo *ssa.Block // current target for plain break statement 259 continueTo *ssa.Block // current target for plain continue statement 260 261 // current location where we're interpreting the AST 262 curBlock *ssa.Block 263 264 // variable assignments in the current block (map from variable symbol to ssa value) 265 // *Node is the unique identifier (an ONAME Node) for the variable. 266 vars map[*Node]*ssa.Value 267 268 // all defined variables at the end of each block. Indexed by block ID. 269 defvars []map[*Node]*ssa.Value 270 271 // addresses of PPARAM and PPARAMOUT variables. 272 decladdrs map[*Node]*ssa.Value 273 274 // symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused. 275 varsyms map[*Node]interface{} 276 277 // starting values. Memory, stack pointer, and globals pointer 278 startmem *ssa.Value 279 sp *ssa.Value 280 sb *ssa.Value 281 282 // line number stack. The current line number is top of stack 283 line []int32 284 285 // list of panic calls by function name and line number. 286 // Used to deduplicate panic calls. 287 panics map[funcLine]*ssa.Block 288 289 // list of FwdRef values. 290 fwdRefs []*ssa.Value 291 292 // list of PPARAMOUT (return) variables. 293 returns []*Node 294 295 // list of PPARAM SSA-able pointer-shaped args. We ensure these are live 296 // throughout the function to help users avoid premature finalizers. 297 ptrargs []*Node 298 299 cgoUnsafeArgs bool 300 noWB bool 301 WBLineno int32 // line number of first write barrier. 0=no write barriers 302 } 303 304 type funcLine struct { 305 f *Node 306 line int32 307 } 308 309 type ssaLabel struct { 310 target *ssa.Block // block identified by this label 311 breakTarget *ssa.Block // block to break to in control flow node identified by this label 312 continueTarget *ssa.Block // block to continue to in control flow node identified by this label 313 defNode *Node // label definition Node (OLABEL) 314 // Label use Node (OGOTO, OBREAK, OCONTINUE). 315 // Used only for error detection and reporting. 316 // There might be multiple uses, but we only need to track one. 317 useNode *Node 318 reported bool // reported indicates whether an error has already been reported for this label 319 } 320 321 // defined reports whether the label has a definition (OLABEL node). 322 func (l *ssaLabel) defined() bool { return l.defNode != nil } 323 324 // used reports whether the label has a use (OGOTO, OBREAK, or OCONTINUE node). 325 func (l *ssaLabel) used() bool { return l.useNode != nil } 326 327 // label returns the label associated with sym, creating it if necessary. 328 func (s *state) label(sym *Sym) *ssaLabel { 329 lab := s.labels[sym.Name] 330 if lab == nil { 331 lab = new(ssaLabel) 332 s.labels[sym.Name] = lab 333 } 334 return lab 335 } 336 337 func (s *state) Logf(msg string, args ...interface{}) { s.config.Logf(msg, args...) } 338 func (s *state) Log() bool { return s.config.Log() } 339 func (s *state) Fatalf(msg string, args ...interface{}) { s.config.Fatalf(s.peekLine(), msg, args...) } 340 func (s *state) Unimplementedf(msg string, args ...interface{}) { 341 s.config.Unimplementedf(s.peekLine(), msg, args...) 342 } 343 func (s *state) Warnl(line int32, msg string, args ...interface{}) { s.config.Warnl(line, msg, args...) } 344 func (s *state) Debug_checknil() bool { return s.config.Debug_checknil() } 345 346 var ( 347 // dummy node for the memory variable 348 memVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "mem"}} 349 350 // dummy nodes for temporary variables 351 ptrVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ptr"}} 352 lenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "len"}} 353 newlenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "newlen"}} 354 capVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "cap"}} 355 typVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "typ"}} 356 idataVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "idata"}} 357 okVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ok"}} 358 deltaVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "delta"}} 359 ) 360 361 // startBlock sets the current block we're generating code in to b. 362 func (s *state) startBlock(b *ssa.Block) { 363 if s.curBlock != nil { 364 s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock) 365 } 366 s.curBlock = b 367 s.vars = map[*Node]*ssa.Value{} 368 } 369 370 // endBlock marks the end of generating code for the current block. 371 // Returns the (former) current block. Returns nil if there is no current 372 // block, i.e. if no code flows to the current execution point. 373 func (s *state) endBlock() *ssa.Block { 374 b := s.curBlock 375 if b == nil { 376 return nil 377 } 378 for len(s.defvars) <= int(b.ID) { 379 s.defvars = append(s.defvars, nil) 380 } 381 s.defvars[b.ID] = s.vars 382 s.curBlock = nil 383 s.vars = nil 384 b.Line = s.peekLine() 385 return b 386 } 387 388 // pushLine pushes a line number on the line number stack. 389 func (s *state) pushLine(line int32) { 390 if line == 0 { 391 // the frontend may emit node with line number missing, 392 // use the parent line number in this case. 393 line = s.peekLine() 394 if Debug['K'] != 0 { 395 Warn("buildssa: line 0") 396 } 397 } 398 s.line = append(s.line, line) 399 } 400 401 // popLine pops the top of the line number stack. 402 func (s *state) popLine() { 403 s.line = s.line[:len(s.line)-1] 404 } 405 406 // peekLine peek the top of the line number stack. 407 func (s *state) peekLine() int32 { 408 return s.line[len(s.line)-1] 409 } 410 411 func (s *state) Error(msg string, args ...interface{}) { 412 yyerrorl(s.peekLine(), msg, args...) 413 } 414 415 // newValue0 adds a new value with no arguments to the current block. 416 func (s *state) newValue0(op ssa.Op, t ssa.Type) *ssa.Value { 417 return s.curBlock.NewValue0(s.peekLine(), op, t) 418 } 419 420 // newValue0A adds a new value with no arguments and an aux value to the current block. 421 func (s *state) newValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value { 422 return s.curBlock.NewValue0A(s.peekLine(), op, t, aux) 423 } 424 425 // newValue0I adds a new value with no arguments and an auxint value to the current block. 426 func (s *state) newValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value { 427 return s.curBlock.NewValue0I(s.peekLine(), op, t, auxint) 428 } 429 430 // newValue1 adds a new value with one argument to the current block. 431 func (s *state) newValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value { 432 return s.curBlock.NewValue1(s.peekLine(), op, t, arg) 433 } 434 435 // newValue1A adds a new value with one argument and an aux value to the current block. 436 func (s *state) newValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 437 return s.curBlock.NewValue1A(s.peekLine(), op, t, aux, arg) 438 } 439 440 // newValue1I adds a new value with one argument and an auxint value to the current block. 441 func (s *state) newValue1I(op ssa.Op, t ssa.Type, aux int64, arg *ssa.Value) *ssa.Value { 442 return s.curBlock.NewValue1I(s.peekLine(), op, t, aux, arg) 443 } 444 445 // newValue2 adds a new value with two arguments to the current block. 446 func (s *state) newValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value { 447 return s.curBlock.NewValue2(s.peekLine(), op, t, arg0, arg1) 448 } 449 450 // newValue2I adds a new value with two arguments and an auxint value to the current block. 451 func (s *state) newValue2I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value { 452 return s.curBlock.NewValue2I(s.peekLine(), op, t, aux, arg0, arg1) 453 } 454 455 // newValue3 adds a new value with three arguments to the current block. 456 func (s *state) newValue3(op ssa.Op, t ssa.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 457 return s.curBlock.NewValue3(s.peekLine(), op, t, arg0, arg1, arg2) 458 } 459 460 // newValue3I adds a new value with three arguments and an auxint value to the current block. 461 func (s *state) newValue3I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 462 return s.curBlock.NewValue3I(s.peekLine(), op, t, aux, arg0, arg1, arg2) 463 } 464 465 // newValue4 adds a new value with four arguments to the current block. 466 func (s *state) newValue4(op ssa.Op, t ssa.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value { 467 return s.curBlock.NewValue4(s.peekLine(), op, t, arg0, arg1, arg2, arg3) 468 } 469 470 // entryNewValue0 adds a new value with no arguments to the entry block. 471 func (s *state) entryNewValue0(op ssa.Op, t ssa.Type) *ssa.Value { 472 return s.f.Entry.NewValue0(s.peekLine(), op, t) 473 } 474 475 // entryNewValue0A adds a new value with no arguments and an aux value to the entry block. 476 func (s *state) entryNewValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value { 477 return s.f.Entry.NewValue0A(s.peekLine(), op, t, aux) 478 } 479 480 // entryNewValue0I adds a new value with no arguments and an auxint value to the entry block. 481 func (s *state) entryNewValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value { 482 return s.f.Entry.NewValue0I(s.peekLine(), op, t, auxint) 483 } 484 485 // entryNewValue1 adds a new value with one argument to the entry block. 486 func (s *state) entryNewValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value { 487 return s.f.Entry.NewValue1(s.peekLine(), op, t, arg) 488 } 489 490 // entryNewValue1 adds a new value with one argument and an auxint value to the entry block. 491 func (s *state) entryNewValue1I(op ssa.Op, t ssa.Type, auxint int64, arg *ssa.Value) *ssa.Value { 492 return s.f.Entry.NewValue1I(s.peekLine(), op, t, auxint, arg) 493 } 494 495 // entryNewValue1A adds a new value with one argument and an aux value to the entry block. 496 func (s *state) entryNewValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 497 return s.f.Entry.NewValue1A(s.peekLine(), op, t, aux, arg) 498 } 499 500 // entryNewValue2 adds a new value with two arguments to the entry block. 501 func (s *state) entryNewValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value { 502 return s.f.Entry.NewValue2(s.peekLine(), op, t, arg0, arg1) 503 } 504 505 // const* routines add a new const value to the entry block. 506 func (s *state) constSlice(t ssa.Type) *ssa.Value { return s.f.ConstSlice(s.peekLine(), t) } 507 func (s *state) constInterface(t ssa.Type) *ssa.Value { return s.f.ConstInterface(s.peekLine(), t) } 508 func (s *state) constNil(t ssa.Type) *ssa.Value { return s.f.ConstNil(s.peekLine(), t) } 509 func (s *state) constEmptyString(t ssa.Type) *ssa.Value { return s.f.ConstEmptyString(s.peekLine(), t) } 510 func (s *state) constBool(c bool) *ssa.Value { 511 return s.f.ConstBool(s.peekLine(), Types[TBOOL], c) 512 } 513 func (s *state) constInt8(t ssa.Type, c int8) *ssa.Value { 514 return s.f.ConstInt8(s.peekLine(), t, c) 515 } 516 func (s *state) constInt16(t ssa.Type, c int16) *ssa.Value { 517 return s.f.ConstInt16(s.peekLine(), t, c) 518 } 519 func (s *state) constInt32(t ssa.Type, c int32) *ssa.Value { 520 return s.f.ConstInt32(s.peekLine(), t, c) 521 } 522 func (s *state) constInt64(t ssa.Type, c int64) *ssa.Value { 523 return s.f.ConstInt64(s.peekLine(), t, c) 524 } 525 func (s *state) constFloat32(t ssa.Type, c float64) *ssa.Value { 526 return s.f.ConstFloat32(s.peekLine(), t, c) 527 } 528 func (s *state) constFloat64(t ssa.Type, c float64) *ssa.Value { 529 return s.f.ConstFloat64(s.peekLine(), t, c) 530 } 531 func (s *state) constInt(t ssa.Type, c int64) *ssa.Value { 532 if s.config.IntSize == 8 { 533 return s.constInt64(t, c) 534 } 535 if int64(int32(c)) != c { 536 s.Fatalf("integer constant too big %d", c) 537 } 538 return s.constInt32(t, int32(c)) 539 } 540 541 func (s *state) stmts(a Nodes) { 542 for _, x := range a.Slice() { 543 s.stmt(x) 544 } 545 } 546 547 // ssaStmtList converts the statement n to SSA and adds it to s. 548 func (s *state) stmtList(l Nodes) { 549 for _, n := range l.Slice() { 550 s.stmt(n) 551 } 552 } 553 554 // ssaStmt converts the statement n to SSA and adds it to s. 555 func (s *state) stmt(n *Node) { 556 s.pushLine(n.Lineno) 557 defer s.popLine() 558 559 // If s.curBlock is nil, then we're about to generate dead code. 560 // We can't just short-circuit here, though, 561 // because we check labels and gotos as part of SSA generation. 562 // Provide a block for the dead code so that we don't have 563 // to add special cases everywhere else. 564 if s.curBlock == nil { 565 dead := s.f.NewBlock(ssa.BlockPlain) 566 s.startBlock(dead) 567 } 568 569 s.stmtList(n.Ninit) 570 switch n.Op { 571 572 case OBLOCK: 573 s.stmtList(n.List) 574 575 // No-ops 576 case OEMPTY, ODCLCONST, ODCLTYPE, OFALL: 577 578 // Expression statements 579 case OCALLFUNC: 580 if isIntrinsicCall(n) { 581 s.intrinsicCall(n) 582 return 583 } 584 fallthrough 585 586 case OCALLMETH, OCALLINTER: 587 s.call(n, callNormal) 588 if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class == PFUNC && 589 (compiling_runtime && n.Left.Sym.Name == "throw" || 590 n.Left.Sym.Pkg == Runtimepkg && (n.Left.Sym.Name == "gopanic" || n.Left.Sym.Name == "selectgo" || n.Left.Sym.Name == "block")) { 591 m := s.mem() 592 b := s.endBlock() 593 b.Kind = ssa.BlockExit 594 b.SetControl(m) 595 // TODO: never rewrite OPANIC to OCALLFUNC in the 596 // first place. Need to wait until all backends 597 // go through SSA. 598 } 599 case ODEFER: 600 s.call(n.Left, callDefer) 601 case OPROC: 602 s.call(n.Left, callGo) 603 604 case OAS2DOTTYPE: 605 res, resok := s.dottype(n.Rlist.First(), true) 606 s.assign(n.List.First(), res, needwritebarrier(n.List.First(), n.Rlist.First()), false, n.Lineno, 0, false) 607 s.assign(n.List.Second(), resok, false, false, n.Lineno, 0, false) 608 return 609 610 case ODCL: 611 if n.Left.Class == PAUTOHEAP { 612 Fatalf("DCL %v", n) 613 } 614 615 case OLABEL: 616 sym := n.Left.Sym 617 618 if isblanksym(sym) { 619 // Empty identifier is valid but useless. 620 // See issues 11589, 11593. 621 return 622 } 623 624 lab := s.label(sym) 625 626 // Associate label with its control flow node, if any 627 if ctl := n.Name.Defn; ctl != nil { 628 switch ctl.Op { 629 case OFOR, OSWITCH, OSELECT: 630 s.labeledNodes[ctl] = lab 631 } 632 } 633 634 if !lab.defined() { 635 lab.defNode = n 636 } else { 637 s.Error("label %v already defined at %v", sym, linestr(lab.defNode.Lineno)) 638 lab.reported = true 639 } 640 // The label might already have a target block via a goto. 641 if lab.target == nil { 642 lab.target = s.f.NewBlock(ssa.BlockPlain) 643 } 644 645 // go to that label (we pretend "label:" is preceded by "goto label") 646 b := s.endBlock() 647 b.AddEdgeTo(lab.target) 648 s.startBlock(lab.target) 649 650 case OGOTO: 651 sym := n.Left.Sym 652 653 lab := s.label(sym) 654 if lab.target == nil { 655 lab.target = s.f.NewBlock(ssa.BlockPlain) 656 } 657 if !lab.used() { 658 lab.useNode = n 659 } 660 661 if lab.defined() { 662 s.checkgoto(n, lab.defNode) 663 } else { 664 s.fwdGotos = append(s.fwdGotos, n) 665 } 666 667 b := s.endBlock() 668 b.AddEdgeTo(lab.target) 669 670 case OAS, OASWB: 671 // Check whether we can generate static data rather than code. 672 // If so, ignore n and defer data generation until codegen. 673 // Failure to do this causes writes to readonly symbols. 674 if gen_as_init(n, true) { 675 var data []*Node 676 if s.f.StaticData != nil { 677 data = s.f.StaticData.([]*Node) 678 } 679 s.f.StaticData = append(data, n) 680 return 681 } 682 683 if n.Left == n.Right && n.Left.Op == ONAME { 684 // An x=x assignment. No point in doing anything 685 // here. In addition, skipping this assignment 686 // prevents generating: 687 // VARDEF x 688 // COPY x -> x 689 // which is bad because x is incorrectly considered 690 // dead before the vardef. See issue #14904. 691 return 692 } 693 694 var t *Type 695 if n.Right != nil { 696 t = n.Right.Type 697 } else { 698 t = n.Left.Type 699 } 700 701 // Evaluate RHS. 702 rhs := n.Right 703 if rhs != nil { 704 switch rhs.Op { 705 case OSTRUCTLIT, OARRAYLIT: 706 // All literals with nonzero fields have already been 707 // rewritten during walk. Any that remain are just T{} 708 // or equivalents. Use the zero value. 709 if !iszero(rhs) { 710 Fatalf("literal with nonzero value in SSA: %v", rhs) 711 } 712 rhs = nil 713 case OAPPEND: 714 // If we're writing the result of an append back to the same slice, 715 // handle it specially to avoid write barriers on the fast (non-growth) path. 716 // If the slice can be SSA'd, it'll be on the stack, 717 // so there will be no write barriers, 718 // so there's no need to attempt to prevent them. 719 if samesafeexpr(n.Left, rhs.List.First()) && !s.canSSA(n.Left) { 720 s.append(rhs, true) 721 return 722 } 723 } 724 } 725 var r *ssa.Value 726 var isVolatile bool 727 needwb := n.Op == OASWB && rhs != nil 728 deref := !canSSAType(t) 729 if deref { 730 if rhs == nil { 731 r = nil // Signal assign to use OpZero. 732 } else { 733 r, isVolatile = s.addr(rhs, false) 734 } 735 } else { 736 if rhs == nil { 737 r = s.zeroVal(t) 738 } else { 739 r = s.expr(rhs) 740 } 741 } 742 if rhs != nil && rhs.Op == OAPPEND { 743 // The frontend gets rid of the write barrier to enable the special OAPPEND 744 // handling above, but since this is not a special case, we need it. 745 // TODO: just add a ptr graying to the end of growslice? 746 // TODO: check whether we need to provide special handling and a write barrier 747 // for ODOTTYPE and ORECV also. 748 // They get similar wb-removal treatment in walk.go:OAS. 749 needwb = true 750 } 751 752 var skip skipMask 753 if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) { 754 // We're assigning a slicing operation back to its source. 755 // Don't write back fields we aren't changing. See issue #14855. 756 i, j, k := rhs.SliceBounds() 757 if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) { 758 // [0:...] is the same as [:...] 759 i = nil 760 } 761 // TODO: detect defaults for len/cap also. 762 // Currently doesn't really work because (*p)[:len(*p)] appears here as: 763 // tmp = len(*p) 764 // (*p)[:tmp] 765 //if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) { 766 // j = nil 767 //} 768 //if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) { 769 // k = nil 770 //} 771 if i == nil { 772 skip |= skipPtr 773 if j == nil { 774 skip |= skipLen 775 } 776 if k == nil { 777 skip |= skipCap 778 } 779 } 780 } 781 782 s.assign(n.Left, r, needwb, deref, n.Lineno, skip, isVolatile) 783 784 case OIF: 785 bThen := s.f.NewBlock(ssa.BlockPlain) 786 bEnd := s.f.NewBlock(ssa.BlockPlain) 787 var bElse *ssa.Block 788 if n.Rlist.Len() != 0 { 789 bElse = s.f.NewBlock(ssa.BlockPlain) 790 s.condBranch(n.Left, bThen, bElse, n.Likely) 791 } else { 792 s.condBranch(n.Left, bThen, bEnd, n.Likely) 793 } 794 795 s.startBlock(bThen) 796 s.stmts(n.Nbody) 797 if b := s.endBlock(); b != nil { 798 b.AddEdgeTo(bEnd) 799 } 800 801 if n.Rlist.Len() != 0 { 802 s.startBlock(bElse) 803 s.stmtList(n.Rlist) 804 if b := s.endBlock(); b != nil { 805 b.AddEdgeTo(bEnd) 806 } 807 } 808 s.startBlock(bEnd) 809 810 case ORETURN: 811 s.stmtList(n.List) 812 s.exit() 813 case ORETJMP: 814 s.stmtList(n.List) 815 b := s.exit() 816 b.Kind = ssa.BlockRetJmp // override BlockRet 817 b.Aux = n.Left.Sym 818 819 case OCONTINUE, OBREAK: 820 var op string 821 var to *ssa.Block 822 switch n.Op { 823 case OCONTINUE: 824 op = "continue" 825 to = s.continueTo 826 case OBREAK: 827 op = "break" 828 to = s.breakTo 829 } 830 if n.Left == nil { 831 // plain break/continue 832 if to == nil { 833 s.Error("%s is not in a loop", op) 834 return 835 } 836 // nothing to do; "to" is already the correct target 837 } else { 838 // labeled break/continue; look up the target 839 sym := n.Left.Sym 840 lab := s.label(sym) 841 if !lab.used() { 842 lab.useNode = n.Left 843 } 844 if !lab.defined() { 845 s.Error("%s label not defined: %v", op, sym) 846 lab.reported = true 847 return 848 } 849 switch n.Op { 850 case OCONTINUE: 851 to = lab.continueTarget 852 case OBREAK: 853 to = lab.breakTarget 854 } 855 if to == nil { 856 // Valid label but not usable with a break/continue here, e.g.: 857 // for { 858 // continue abc 859 // } 860 // abc: 861 // for {} 862 s.Error("invalid %s label %v", op, sym) 863 lab.reported = true 864 return 865 } 866 } 867 868 b := s.endBlock() 869 b.AddEdgeTo(to) 870 871 case OFOR: 872 // OFOR: for Ninit; Left; Right { Nbody } 873 bCond := s.f.NewBlock(ssa.BlockPlain) 874 bBody := s.f.NewBlock(ssa.BlockPlain) 875 bIncr := s.f.NewBlock(ssa.BlockPlain) 876 bEnd := s.f.NewBlock(ssa.BlockPlain) 877 878 // first, jump to condition test 879 b := s.endBlock() 880 b.AddEdgeTo(bCond) 881 882 // generate code to test condition 883 s.startBlock(bCond) 884 if n.Left != nil { 885 s.condBranch(n.Left, bBody, bEnd, 1) 886 } else { 887 b := s.endBlock() 888 b.Kind = ssa.BlockPlain 889 b.AddEdgeTo(bBody) 890 } 891 892 // set up for continue/break in body 893 prevContinue := s.continueTo 894 prevBreak := s.breakTo 895 s.continueTo = bIncr 896 s.breakTo = bEnd 897 lab := s.labeledNodes[n] 898 if lab != nil { 899 // labeled for loop 900 lab.continueTarget = bIncr 901 lab.breakTarget = bEnd 902 } 903 904 // generate body 905 s.startBlock(bBody) 906 s.stmts(n.Nbody) 907 908 // tear down continue/break 909 s.continueTo = prevContinue 910 s.breakTo = prevBreak 911 if lab != nil { 912 lab.continueTarget = nil 913 lab.breakTarget = nil 914 } 915 916 // done with body, goto incr 917 if b := s.endBlock(); b != nil { 918 b.AddEdgeTo(bIncr) 919 } 920 921 // generate incr 922 s.startBlock(bIncr) 923 if n.Right != nil { 924 s.stmt(n.Right) 925 } 926 if b := s.endBlock(); b != nil { 927 b.AddEdgeTo(bCond) 928 } 929 s.startBlock(bEnd) 930 931 case OSWITCH, OSELECT: 932 // These have been mostly rewritten by the front end into their Nbody fields. 933 // Our main task is to correctly hook up any break statements. 934 bEnd := s.f.NewBlock(ssa.BlockPlain) 935 936 prevBreak := s.breakTo 937 s.breakTo = bEnd 938 lab := s.labeledNodes[n] 939 if lab != nil { 940 // labeled 941 lab.breakTarget = bEnd 942 } 943 944 // generate body code 945 s.stmts(n.Nbody) 946 947 s.breakTo = prevBreak 948 if lab != nil { 949 lab.breakTarget = nil 950 } 951 952 // OSWITCH never falls through (s.curBlock == nil here). 953 // OSELECT does not fall through if we're calling selectgo. 954 // OSELECT does fall through if we're calling selectnb{send,recv}[2]. 955 // In those latter cases, go to the code after the select. 956 if b := s.endBlock(); b != nil { 957 b.AddEdgeTo(bEnd) 958 } 959 s.startBlock(bEnd) 960 961 case OVARKILL: 962 // Insert a varkill op to record that a variable is no longer live. 963 // We only care about liveness info at call sites, so putting the 964 // varkill in the store chain is enough to keep it correctly ordered 965 // with respect to call ops. 966 if !s.canSSA(n.Left) { 967 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, n.Left, s.mem()) 968 } 969 970 case OVARLIVE: 971 // Insert a varlive op to record that a variable is still live. 972 if !n.Left.Addrtaken { 973 s.Fatalf("VARLIVE variable %s must have Addrtaken set", n.Left) 974 } 975 s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, ssa.TypeMem, n.Left, s.mem()) 976 977 case OCHECKNIL: 978 p := s.expr(n.Left) 979 s.nilCheck(p) 980 981 case OSQRT: 982 s.expr(n.Left) 983 984 default: 985 s.Unimplementedf("unhandled stmt %s", n.Op) 986 } 987 } 988 989 // exit processes any code that needs to be generated just before returning. 990 // It returns a BlockRet block that ends the control flow. Its control value 991 // will be set to the final memory state. 992 func (s *state) exit() *ssa.Block { 993 if hasdefer { 994 s.rtcall(Deferreturn, true, nil) 995 } 996 997 // Run exit code. Typically, this code copies heap-allocated PPARAMOUT 998 // variables back to the stack. 999 s.stmts(s.exitCode) 1000 1001 // Store SSAable PPARAMOUT variables back to stack locations. 1002 for _, n := range s.returns { 1003 addr := s.decladdrs[n] 1004 val := s.variable(n, n.Type) 1005 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, n, s.mem()) 1006 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, n.Type.Size(), addr, val, s.mem()) 1007 // TODO: if val is ever spilled, we'd like to use the 1008 // PPARAMOUT slot for spilling it. That won't happen 1009 // currently. 1010 } 1011 1012 // Keep input pointer args live until the return. This is a bandaid 1013 // fix for 1.7 for what will become in 1.8 explicit runtime.KeepAlive calls. 1014 // For <= 1.7 we guarantee that pointer input arguments live to the end of 1015 // the function to prevent premature (from the user's point of view) 1016 // execution of finalizers. See issue 15277. 1017 // TODO: remove for 1.8? 1018 for _, n := range s.ptrargs { 1019 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, s.variable(n, n.Type), s.mem()) 1020 } 1021 1022 // Do actual return. 1023 m := s.mem() 1024 b := s.endBlock() 1025 b.Kind = ssa.BlockRet 1026 b.SetControl(m) 1027 return b 1028 } 1029 1030 type opAndType struct { 1031 op Op 1032 etype EType 1033 } 1034 1035 var opToSSA = map[opAndType]ssa.Op{ 1036 opAndType{OADD, TINT8}: ssa.OpAdd8, 1037 opAndType{OADD, TUINT8}: ssa.OpAdd8, 1038 opAndType{OADD, TINT16}: ssa.OpAdd16, 1039 opAndType{OADD, TUINT16}: ssa.OpAdd16, 1040 opAndType{OADD, TINT32}: ssa.OpAdd32, 1041 opAndType{OADD, TUINT32}: ssa.OpAdd32, 1042 opAndType{OADD, TPTR32}: ssa.OpAdd32, 1043 opAndType{OADD, TINT64}: ssa.OpAdd64, 1044 opAndType{OADD, TUINT64}: ssa.OpAdd64, 1045 opAndType{OADD, TPTR64}: ssa.OpAdd64, 1046 opAndType{OADD, TFLOAT32}: ssa.OpAdd32F, 1047 opAndType{OADD, TFLOAT64}: ssa.OpAdd64F, 1048 1049 opAndType{OSUB, TINT8}: ssa.OpSub8, 1050 opAndType{OSUB, TUINT8}: ssa.OpSub8, 1051 opAndType{OSUB, TINT16}: ssa.OpSub16, 1052 opAndType{OSUB, TUINT16}: ssa.OpSub16, 1053 opAndType{OSUB, TINT32}: ssa.OpSub32, 1054 opAndType{OSUB, TUINT32}: ssa.OpSub32, 1055 opAndType{OSUB, TINT64}: ssa.OpSub64, 1056 opAndType{OSUB, TUINT64}: ssa.OpSub64, 1057 opAndType{OSUB, TFLOAT32}: ssa.OpSub32F, 1058 opAndType{OSUB, TFLOAT64}: ssa.OpSub64F, 1059 1060 opAndType{ONOT, TBOOL}: ssa.OpNot, 1061 1062 opAndType{OMINUS, TINT8}: ssa.OpNeg8, 1063 opAndType{OMINUS, TUINT8}: ssa.OpNeg8, 1064 opAndType{OMINUS, TINT16}: ssa.OpNeg16, 1065 opAndType{OMINUS, TUINT16}: ssa.OpNeg16, 1066 opAndType{OMINUS, TINT32}: ssa.OpNeg32, 1067 opAndType{OMINUS, TUINT32}: ssa.OpNeg32, 1068 opAndType{OMINUS, TINT64}: ssa.OpNeg64, 1069 opAndType{OMINUS, TUINT64}: ssa.OpNeg64, 1070 opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F, 1071 opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F, 1072 1073 opAndType{OCOM, TINT8}: ssa.OpCom8, 1074 opAndType{OCOM, TUINT8}: ssa.OpCom8, 1075 opAndType{OCOM, TINT16}: ssa.OpCom16, 1076 opAndType{OCOM, TUINT16}: ssa.OpCom16, 1077 opAndType{OCOM, TINT32}: ssa.OpCom32, 1078 opAndType{OCOM, TUINT32}: ssa.OpCom32, 1079 opAndType{OCOM, TINT64}: ssa.OpCom64, 1080 opAndType{OCOM, TUINT64}: ssa.OpCom64, 1081 1082 opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag, 1083 opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag, 1084 opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal, 1085 opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal, 1086 1087 opAndType{OMUL, TINT8}: ssa.OpMul8, 1088 opAndType{OMUL, TUINT8}: ssa.OpMul8, 1089 opAndType{OMUL, TINT16}: ssa.OpMul16, 1090 opAndType{OMUL, TUINT16}: ssa.OpMul16, 1091 opAndType{OMUL, TINT32}: ssa.OpMul32, 1092 opAndType{OMUL, TUINT32}: ssa.OpMul32, 1093 opAndType{OMUL, TINT64}: ssa.OpMul64, 1094 opAndType{OMUL, TUINT64}: ssa.OpMul64, 1095 opAndType{OMUL, TFLOAT32}: ssa.OpMul32F, 1096 opAndType{OMUL, TFLOAT64}: ssa.OpMul64F, 1097 1098 opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F, 1099 opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F, 1100 1101 opAndType{OHMUL, TINT8}: ssa.OpHmul8, 1102 opAndType{OHMUL, TUINT8}: ssa.OpHmul8u, 1103 opAndType{OHMUL, TINT16}: ssa.OpHmul16, 1104 opAndType{OHMUL, TUINT16}: ssa.OpHmul16u, 1105 opAndType{OHMUL, TINT32}: ssa.OpHmul32, 1106 opAndType{OHMUL, TUINT32}: ssa.OpHmul32u, 1107 1108 opAndType{ODIV, TINT8}: ssa.OpDiv8, 1109 opAndType{ODIV, TUINT8}: ssa.OpDiv8u, 1110 opAndType{ODIV, TINT16}: ssa.OpDiv16, 1111 opAndType{ODIV, TUINT16}: ssa.OpDiv16u, 1112 opAndType{ODIV, TINT32}: ssa.OpDiv32, 1113 opAndType{ODIV, TUINT32}: ssa.OpDiv32u, 1114 opAndType{ODIV, TINT64}: ssa.OpDiv64, 1115 opAndType{ODIV, TUINT64}: ssa.OpDiv64u, 1116 1117 opAndType{OMOD, TINT8}: ssa.OpMod8, 1118 opAndType{OMOD, TUINT8}: ssa.OpMod8u, 1119 opAndType{OMOD, TINT16}: ssa.OpMod16, 1120 opAndType{OMOD, TUINT16}: ssa.OpMod16u, 1121 opAndType{OMOD, TINT32}: ssa.OpMod32, 1122 opAndType{OMOD, TUINT32}: ssa.OpMod32u, 1123 opAndType{OMOD, TINT64}: ssa.OpMod64, 1124 opAndType{OMOD, TUINT64}: ssa.OpMod64u, 1125 1126 opAndType{OAND, TINT8}: ssa.OpAnd8, 1127 opAndType{OAND, TUINT8}: ssa.OpAnd8, 1128 opAndType{OAND, TINT16}: ssa.OpAnd16, 1129 opAndType{OAND, TUINT16}: ssa.OpAnd16, 1130 opAndType{OAND, TINT32}: ssa.OpAnd32, 1131 opAndType{OAND, TUINT32}: ssa.OpAnd32, 1132 opAndType{OAND, TINT64}: ssa.OpAnd64, 1133 opAndType{OAND, TUINT64}: ssa.OpAnd64, 1134 1135 opAndType{OOR, TINT8}: ssa.OpOr8, 1136 opAndType{OOR, TUINT8}: ssa.OpOr8, 1137 opAndType{OOR, TINT16}: ssa.OpOr16, 1138 opAndType{OOR, TUINT16}: ssa.OpOr16, 1139 opAndType{OOR, TINT32}: ssa.OpOr32, 1140 opAndType{OOR, TUINT32}: ssa.OpOr32, 1141 opAndType{OOR, TINT64}: ssa.OpOr64, 1142 opAndType{OOR, TUINT64}: ssa.OpOr64, 1143 1144 opAndType{OXOR, TINT8}: ssa.OpXor8, 1145 opAndType{OXOR, TUINT8}: ssa.OpXor8, 1146 opAndType{OXOR, TINT16}: ssa.OpXor16, 1147 opAndType{OXOR, TUINT16}: ssa.OpXor16, 1148 opAndType{OXOR, TINT32}: ssa.OpXor32, 1149 opAndType{OXOR, TUINT32}: ssa.OpXor32, 1150 opAndType{OXOR, TINT64}: ssa.OpXor64, 1151 opAndType{OXOR, TUINT64}: ssa.OpXor64, 1152 1153 opAndType{OEQ, TBOOL}: ssa.OpEqB, 1154 opAndType{OEQ, TINT8}: ssa.OpEq8, 1155 opAndType{OEQ, TUINT8}: ssa.OpEq8, 1156 opAndType{OEQ, TINT16}: ssa.OpEq16, 1157 opAndType{OEQ, TUINT16}: ssa.OpEq16, 1158 opAndType{OEQ, TINT32}: ssa.OpEq32, 1159 opAndType{OEQ, TUINT32}: ssa.OpEq32, 1160 opAndType{OEQ, TINT64}: ssa.OpEq64, 1161 opAndType{OEQ, TUINT64}: ssa.OpEq64, 1162 opAndType{OEQ, TINTER}: ssa.OpEqInter, 1163 opAndType{OEQ, TSLICE}: ssa.OpEqSlice, 1164 opAndType{OEQ, TFUNC}: ssa.OpEqPtr, 1165 opAndType{OEQ, TMAP}: ssa.OpEqPtr, 1166 opAndType{OEQ, TCHAN}: ssa.OpEqPtr, 1167 opAndType{OEQ, TPTR32}: ssa.OpEqPtr, 1168 opAndType{OEQ, TPTR64}: ssa.OpEqPtr, 1169 opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr, 1170 opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr, 1171 opAndType{OEQ, TFLOAT64}: ssa.OpEq64F, 1172 opAndType{OEQ, TFLOAT32}: ssa.OpEq32F, 1173 1174 opAndType{ONE, TBOOL}: ssa.OpNeqB, 1175 opAndType{ONE, TINT8}: ssa.OpNeq8, 1176 opAndType{ONE, TUINT8}: ssa.OpNeq8, 1177 opAndType{ONE, TINT16}: ssa.OpNeq16, 1178 opAndType{ONE, TUINT16}: ssa.OpNeq16, 1179 opAndType{ONE, TINT32}: ssa.OpNeq32, 1180 opAndType{ONE, TUINT32}: ssa.OpNeq32, 1181 opAndType{ONE, TINT64}: ssa.OpNeq64, 1182 opAndType{ONE, TUINT64}: ssa.OpNeq64, 1183 opAndType{ONE, TINTER}: ssa.OpNeqInter, 1184 opAndType{ONE, TSLICE}: ssa.OpNeqSlice, 1185 opAndType{ONE, TFUNC}: ssa.OpNeqPtr, 1186 opAndType{ONE, TMAP}: ssa.OpNeqPtr, 1187 opAndType{ONE, TCHAN}: ssa.OpNeqPtr, 1188 opAndType{ONE, TPTR32}: ssa.OpNeqPtr, 1189 opAndType{ONE, TPTR64}: ssa.OpNeqPtr, 1190 opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr, 1191 opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr, 1192 opAndType{ONE, TFLOAT64}: ssa.OpNeq64F, 1193 opAndType{ONE, TFLOAT32}: ssa.OpNeq32F, 1194 1195 opAndType{OLT, TINT8}: ssa.OpLess8, 1196 opAndType{OLT, TUINT8}: ssa.OpLess8U, 1197 opAndType{OLT, TINT16}: ssa.OpLess16, 1198 opAndType{OLT, TUINT16}: ssa.OpLess16U, 1199 opAndType{OLT, TINT32}: ssa.OpLess32, 1200 opAndType{OLT, TUINT32}: ssa.OpLess32U, 1201 opAndType{OLT, TINT64}: ssa.OpLess64, 1202 opAndType{OLT, TUINT64}: ssa.OpLess64U, 1203 opAndType{OLT, TFLOAT64}: ssa.OpLess64F, 1204 opAndType{OLT, TFLOAT32}: ssa.OpLess32F, 1205 1206 opAndType{OGT, TINT8}: ssa.OpGreater8, 1207 opAndType{OGT, TUINT8}: ssa.OpGreater8U, 1208 opAndType{OGT, TINT16}: ssa.OpGreater16, 1209 opAndType{OGT, TUINT16}: ssa.OpGreater16U, 1210 opAndType{OGT, TINT32}: ssa.OpGreater32, 1211 opAndType{OGT, TUINT32}: ssa.OpGreater32U, 1212 opAndType{OGT, TINT64}: ssa.OpGreater64, 1213 opAndType{OGT, TUINT64}: ssa.OpGreater64U, 1214 opAndType{OGT, TFLOAT64}: ssa.OpGreater64F, 1215 opAndType{OGT, TFLOAT32}: ssa.OpGreater32F, 1216 1217 opAndType{OLE, TINT8}: ssa.OpLeq8, 1218 opAndType{OLE, TUINT8}: ssa.OpLeq8U, 1219 opAndType{OLE, TINT16}: ssa.OpLeq16, 1220 opAndType{OLE, TUINT16}: ssa.OpLeq16U, 1221 opAndType{OLE, TINT32}: ssa.OpLeq32, 1222 opAndType{OLE, TUINT32}: ssa.OpLeq32U, 1223 opAndType{OLE, TINT64}: ssa.OpLeq64, 1224 opAndType{OLE, TUINT64}: ssa.OpLeq64U, 1225 opAndType{OLE, TFLOAT64}: ssa.OpLeq64F, 1226 opAndType{OLE, TFLOAT32}: ssa.OpLeq32F, 1227 1228 opAndType{OGE, TINT8}: ssa.OpGeq8, 1229 opAndType{OGE, TUINT8}: ssa.OpGeq8U, 1230 opAndType{OGE, TINT16}: ssa.OpGeq16, 1231 opAndType{OGE, TUINT16}: ssa.OpGeq16U, 1232 opAndType{OGE, TINT32}: ssa.OpGeq32, 1233 opAndType{OGE, TUINT32}: ssa.OpGeq32U, 1234 opAndType{OGE, TINT64}: ssa.OpGeq64, 1235 opAndType{OGE, TUINT64}: ssa.OpGeq64U, 1236 opAndType{OGE, TFLOAT64}: ssa.OpGeq64F, 1237 opAndType{OGE, TFLOAT32}: ssa.OpGeq32F, 1238 1239 opAndType{OLROT, TUINT8}: ssa.OpLrot8, 1240 opAndType{OLROT, TUINT16}: ssa.OpLrot16, 1241 opAndType{OLROT, TUINT32}: ssa.OpLrot32, 1242 opAndType{OLROT, TUINT64}: ssa.OpLrot64, 1243 1244 opAndType{OSQRT, TFLOAT64}: ssa.OpSqrt, 1245 } 1246 1247 func (s *state) concreteEtype(t *Type) EType { 1248 e := t.Etype 1249 switch e { 1250 default: 1251 return e 1252 case TINT: 1253 if s.config.IntSize == 8 { 1254 return TINT64 1255 } 1256 return TINT32 1257 case TUINT: 1258 if s.config.IntSize == 8 { 1259 return TUINT64 1260 } 1261 return TUINT32 1262 case TUINTPTR: 1263 if s.config.PtrSize == 8 { 1264 return TUINT64 1265 } 1266 return TUINT32 1267 } 1268 } 1269 1270 func (s *state) ssaOp(op Op, t *Type) ssa.Op { 1271 etype := s.concreteEtype(t) 1272 x, ok := opToSSA[opAndType{op, etype}] 1273 if !ok { 1274 s.Unimplementedf("unhandled binary op %s %s", op, etype) 1275 } 1276 return x 1277 } 1278 1279 func floatForComplex(t *Type) *Type { 1280 if t.Size() == 8 { 1281 return Types[TFLOAT32] 1282 } else { 1283 return Types[TFLOAT64] 1284 } 1285 } 1286 1287 type opAndTwoTypes struct { 1288 op Op 1289 etype1 EType 1290 etype2 EType 1291 } 1292 1293 type twoTypes struct { 1294 etype1 EType 1295 etype2 EType 1296 } 1297 1298 type twoOpsAndType struct { 1299 op1 ssa.Op 1300 op2 ssa.Op 1301 intermediateType EType 1302 } 1303 1304 var fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1305 1306 twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32}, 1307 twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32}, 1308 twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32}, 1309 twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64}, 1310 1311 twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32}, 1312 twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32}, 1313 twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32}, 1314 twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64}, 1315 1316 twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1317 twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1318 twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32}, 1319 twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64}, 1320 1321 twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1322 twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1323 twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32}, 1324 twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64}, 1325 // unsigned 1326 twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32}, 1327 twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32}, 1328 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned 1329 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead 1330 1331 twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32}, 1332 twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32}, 1333 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned 1334 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead 1335 1336 twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1337 twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1338 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1339 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead 1340 1341 twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1342 twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1343 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1344 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead 1345 1346 // float 1347 twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32}, 1348 twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT64}, 1349 twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT32}, 1350 twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64}, 1351 } 1352 1353 // this map is used only for 32-bit arch, and only includes the difference 1354 // on 32-bit arch, don't use int64<->float conversion for uint32 1355 var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{ 1356 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32}, 1357 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32}, 1358 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32}, 1359 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32}, 1360 } 1361 1362 // uint64<->float conversions, only on machines that have intructions for that 1363 var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1364 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64}, 1365 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64}, 1366 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64}, 1367 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64}, 1368 } 1369 1370 var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{ 1371 opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8, 1372 opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8, 1373 opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16, 1374 opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16, 1375 opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32, 1376 opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32, 1377 opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64, 1378 opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64, 1379 1380 opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8, 1381 opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8, 1382 opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16, 1383 opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16, 1384 opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32, 1385 opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32, 1386 opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64, 1387 opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64, 1388 1389 opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8, 1390 opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8, 1391 opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16, 1392 opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16, 1393 opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32, 1394 opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32, 1395 opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64, 1396 opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64, 1397 1398 opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8, 1399 opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8, 1400 opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16, 1401 opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16, 1402 opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32, 1403 opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32, 1404 opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64, 1405 opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64, 1406 1407 opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8, 1408 opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8, 1409 opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16, 1410 opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16, 1411 opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32, 1412 opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32, 1413 opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64, 1414 opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64, 1415 1416 opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8, 1417 opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8, 1418 opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16, 1419 opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16, 1420 opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32, 1421 opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32, 1422 opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64, 1423 opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64, 1424 1425 opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8, 1426 opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8, 1427 opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16, 1428 opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16, 1429 opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32, 1430 opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32, 1431 opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64, 1432 opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64, 1433 1434 opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8, 1435 opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8, 1436 opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16, 1437 opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16, 1438 opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32, 1439 opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32, 1440 opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64, 1441 opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64, 1442 } 1443 1444 func (s *state) ssaShiftOp(op Op, t *Type, u *Type) ssa.Op { 1445 etype1 := s.concreteEtype(t) 1446 etype2 := s.concreteEtype(u) 1447 x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}] 1448 if !ok { 1449 s.Unimplementedf("unhandled shift op %s etype=%s/%s", op, etype1, etype2) 1450 } 1451 return x 1452 } 1453 1454 func (s *state) ssaRotateOp(op Op, t *Type) ssa.Op { 1455 etype1 := s.concreteEtype(t) 1456 x, ok := opToSSA[opAndType{op, etype1}] 1457 if !ok { 1458 s.Unimplementedf("unhandled rotate op %s etype=%s", op, etype1) 1459 } 1460 return x 1461 } 1462 1463 // expr converts the expression n to ssa, adds it to s and returns the ssa result. 1464 func (s *state) expr(n *Node) *ssa.Value { 1465 if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) { 1466 // ONAMEs and named OLITERALs have the line number 1467 // of the decl, not the use. See issue 14742. 1468 s.pushLine(n.Lineno) 1469 defer s.popLine() 1470 } 1471 1472 s.stmtList(n.Ninit) 1473 switch n.Op { 1474 case OCFUNC: 1475 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: n.Left.Sym}) 1476 return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb) 1477 case ONAME: 1478 if n.Class == PFUNC { 1479 // "value" of a function is the address of the function's closure 1480 sym := funcsym(n.Sym) 1481 aux := &ssa.ExternSymbol{Typ: n.Type, Sym: sym} 1482 return s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sb) 1483 } 1484 if s.canSSA(n) { 1485 return s.variable(n, n.Type) 1486 } 1487 addr, _ := s.addr(n, false) 1488 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1489 case OCLOSUREVAR: 1490 addr, _ := s.addr(n, false) 1491 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1492 case OLITERAL: 1493 switch u := n.Val().U.(type) { 1494 case *Mpint: 1495 i := u.Int64() 1496 switch n.Type.Size() { 1497 case 1: 1498 return s.constInt8(n.Type, int8(i)) 1499 case 2: 1500 return s.constInt16(n.Type, int16(i)) 1501 case 4: 1502 return s.constInt32(n.Type, int32(i)) 1503 case 8: 1504 return s.constInt64(n.Type, i) 1505 default: 1506 s.Fatalf("bad integer size %d", n.Type.Size()) 1507 return nil 1508 } 1509 case string: 1510 if u == "" { 1511 return s.constEmptyString(n.Type) 1512 } 1513 return s.entryNewValue0A(ssa.OpConstString, n.Type, u) 1514 case bool: 1515 return s.constBool(u) 1516 case *NilVal: 1517 t := n.Type 1518 switch { 1519 case t.IsSlice(): 1520 return s.constSlice(t) 1521 case t.IsInterface(): 1522 return s.constInterface(t) 1523 default: 1524 return s.constNil(t) 1525 } 1526 case *Mpflt: 1527 switch n.Type.Size() { 1528 case 4: 1529 return s.constFloat32(n.Type, u.Float32()) 1530 case 8: 1531 return s.constFloat64(n.Type, u.Float64()) 1532 default: 1533 s.Fatalf("bad float size %d", n.Type.Size()) 1534 return nil 1535 } 1536 case *Mpcplx: 1537 r := &u.Real 1538 i := &u.Imag 1539 switch n.Type.Size() { 1540 case 8: 1541 pt := Types[TFLOAT32] 1542 return s.newValue2(ssa.OpComplexMake, n.Type, 1543 s.constFloat32(pt, r.Float32()), 1544 s.constFloat32(pt, i.Float32())) 1545 case 16: 1546 pt := Types[TFLOAT64] 1547 return s.newValue2(ssa.OpComplexMake, n.Type, 1548 s.constFloat64(pt, r.Float64()), 1549 s.constFloat64(pt, i.Float64())) 1550 default: 1551 s.Fatalf("bad float size %d", n.Type.Size()) 1552 return nil 1553 } 1554 1555 default: 1556 s.Unimplementedf("unhandled OLITERAL %v", n.Val().Ctype()) 1557 return nil 1558 } 1559 case OCONVNOP: 1560 to := n.Type 1561 from := n.Left.Type 1562 1563 // Assume everything will work out, so set up our return value. 1564 // Anything interesting that happens from here is a fatal. 1565 x := s.expr(n.Left) 1566 1567 // Special case for not confusing GC and liveness. 1568 // We don't want pointers accidentally classified 1569 // as not-pointers or vice-versa because of copy 1570 // elision. 1571 if to.IsPtrShaped() != from.IsPtrShaped() { 1572 return s.newValue2(ssa.OpConvert, to, x, s.mem()) 1573 } 1574 1575 v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type 1576 1577 // CONVNOP closure 1578 if to.Etype == TFUNC && from.IsPtrShaped() { 1579 return v 1580 } 1581 1582 // named <--> unnamed type or typed <--> untyped const 1583 if from.Etype == to.Etype { 1584 return v 1585 } 1586 1587 // unsafe.Pointer <--> *T 1588 if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() { 1589 return v 1590 } 1591 1592 dowidth(from) 1593 dowidth(to) 1594 if from.Width != to.Width { 1595 s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width) 1596 return nil 1597 } 1598 if etypesign(from.Etype) != etypesign(to.Etype) { 1599 s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype) 1600 return nil 1601 } 1602 1603 if instrumenting { 1604 // These appear to be fine, but they fail the 1605 // integer constraint below, so okay them here. 1606 // Sample non-integer conversion: map[string]string -> *uint8 1607 return v 1608 } 1609 1610 if etypesign(from.Etype) == 0 { 1611 s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to) 1612 return nil 1613 } 1614 1615 // integer, same width, same sign 1616 return v 1617 1618 case OCONV: 1619 x := s.expr(n.Left) 1620 ft := n.Left.Type // from type 1621 tt := n.Type // to type 1622 if ft.IsInteger() && tt.IsInteger() { 1623 var op ssa.Op 1624 if tt.Size() == ft.Size() { 1625 op = ssa.OpCopy 1626 } else if tt.Size() < ft.Size() { 1627 // truncation 1628 switch 10*ft.Size() + tt.Size() { 1629 case 21: 1630 op = ssa.OpTrunc16to8 1631 case 41: 1632 op = ssa.OpTrunc32to8 1633 case 42: 1634 op = ssa.OpTrunc32to16 1635 case 81: 1636 op = ssa.OpTrunc64to8 1637 case 82: 1638 op = ssa.OpTrunc64to16 1639 case 84: 1640 op = ssa.OpTrunc64to32 1641 default: 1642 s.Fatalf("weird integer truncation %s -> %s", ft, tt) 1643 } 1644 } else if ft.IsSigned() { 1645 // sign extension 1646 switch 10*ft.Size() + tt.Size() { 1647 case 12: 1648 op = ssa.OpSignExt8to16 1649 case 14: 1650 op = ssa.OpSignExt8to32 1651 case 18: 1652 op = ssa.OpSignExt8to64 1653 case 24: 1654 op = ssa.OpSignExt16to32 1655 case 28: 1656 op = ssa.OpSignExt16to64 1657 case 48: 1658 op = ssa.OpSignExt32to64 1659 default: 1660 s.Fatalf("bad integer sign extension %s -> %s", ft, tt) 1661 } 1662 } else { 1663 // zero extension 1664 switch 10*ft.Size() + tt.Size() { 1665 case 12: 1666 op = ssa.OpZeroExt8to16 1667 case 14: 1668 op = ssa.OpZeroExt8to32 1669 case 18: 1670 op = ssa.OpZeroExt8to64 1671 case 24: 1672 op = ssa.OpZeroExt16to32 1673 case 28: 1674 op = ssa.OpZeroExt16to64 1675 case 48: 1676 op = ssa.OpZeroExt32to64 1677 default: 1678 s.Fatalf("weird integer sign extension %s -> %s", ft, tt) 1679 } 1680 } 1681 return s.newValue1(op, n.Type, x) 1682 } 1683 1684 if ft.IsFloat() || tt.IsFloat() { 1685 conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}] 1686 if s.config.IntSize == 4 && Thearch.LinkArch.Name != "amd64p32" { 1687 if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { 1688 conv = conv1 1689 } 1690 } 1691 if Thearch.LinkArch.Name == "arm64" { 1692 if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { 1693 conv = conv1 1694 } 1695 } 1696 if !ok { 1697 s.Fatalf("weird float conversion %s -> %s", ft, tt) 1698 } 1699 op1, op2, it := conv.op1, conv.op2, conv.intermediateType 1700 1701 if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid { 1702 // normal case, not tripping over unsigned 64 1703 if op1 == ssa.OpCopy { 1704 if op2 == ssa.OpCopy { 1705 return x 1706 } 1707 return s.newValue1(op2, n.Type, x) 1708 } 1709 if op2 == ssa.OpCopy { 1710 return s.newValue1(op1, n.Type, x) 1711 } 1712 return s.newValue1(op2, n.Type, s.newValue1(op1, Types[it], x)) 1713 } 1714 // Tricky 64-bit unsigned cases. 1715 if ft.IsInteger() { 1716 // therefore tt is float32 or float64, and ft is also unsigned 1717 if tt.Size() == 4 { 1718 return s.uint64Tofloat32(n, x, ft, tt) 1719 } 1720 if tt.Size() == 8 { 1721 return s.uint64Tofloat64(n, x, ft, tt) 1722 } 1723 s.Fatalf("weird unsigned integer to float conversion %s -> %s", ft, tt) 1724 } 1725 // therefore ft is float32 or float64, and tt is unsigned integer 1726 if ft.Size() == 4 { 1727 return s.float32ToUint64(n, x, ft, tt) 1728 } 1729 if ft.Size() == 8 { 1730 return s.float64ToUint64(n, x, ft, tt) 1731 } 1732 s.Fatalf("weird float to unsigned integer conversion %s -> %s", ft, tt) 1733 return nil 1734 } 1735 1736 if ft.IsComplex() && tt.IsComplex() { 1737 var op ssa.Op 1738 if ft.Size() == tt.Size() { 1739 op = ssa.OpCopy 1740 } else if ft.Size() == 8 && tt.Size() == 16 { 1741 op = ssa.OpCvt32Fto64F 1742 } else if ft.Size() == 16 && tt.Size() == 8 { 1743 op = ssa.OpCvt64Fto32F 1744 } else { 1745 s.Fatalf("weird complex conversion %s -> %s", ft, tt) 1746 } 1747 ftp := floatForComplex(ft) 1748 ttp := floatForComplex(tt) 1749 return s.newValue2(ssa.OpComplexMake, tt, 1750 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)), 1751 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x))) 1752 } 1753 1754 s.Unimplementedf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype) 1755 return nil 1756 1757 case ODOTTYPE: 1758 res, _ := s.dottype(n, false) 1759 return res 1760 1761 // binary ops 1762 case OLT, OEQ, ONE, OLE, OGE, OGT: 1763 a := s.expr(n.Left) 1764 b := s.expr(n.Right) 1765 if n.Left.Type.IsComplex() { 1766 pt := floatForComplex(n.Left.Type) 1767 op := s.ssaOp(OEQ, pt) 1768 r := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)) 1769 i := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)) 1770 c := s.newValue2(ssa.OpAnd8, Types[TBOOL], r, i) 1771 switch n.Op { 1772 case OEQ: 1773 return c 1774 case ONE: 1775 return s.newValue1(ssa.OpNot, Types[TBOOL], c) 1776 default: 1777 s.Fatalf("ordered complex compare %s", n.Op) 1778 } 1779 } 1780 return s.newValue2(s.ssaOp(n.Op, n.Left.Type), Types[TBOOL], a, b) 1781 case OMUL: 1782 a := s.expr(n.Left) 1783 b := s.expr(n.Right) 1784 if n.Type.IsComplex() { 1785 mulop := ssa.OpMul64F 1786 addop := ssa.OpAdd64F 1787 subop := ssa.OpSub64F 1788 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1789 wt := Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 1790 1791 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1792 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1793 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1794 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1795 1796 if pt != wt { // Widen for calculation 1797 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal) 1798 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal) 1799 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag) 1800 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag) 1801 } 1802 1803 xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag)) 1804 ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal)) 1805 1806 if pt != wt { // Narrow to store back 1807 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal) 1808 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag) 1809 } 1810 1811 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1812 } 1813 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1814 1815 case ODIV: 1816 a := s.expr(n.Left) 1817 b := s.expr(n.Right) 1818 if n.Type.IsComplex() { 1819 // TODO this is not executed because the front-end substitutes a runtime call. 1820 // That probably ought to change; with modest optimization the widen/narrow 1821 // conversions could all be elided in larger expression trees. 1822 mulop := ssa.OpMul64F 1823 addop := ssa.OpAdd64F 1824 subop := ssa.OpSub64F 1825 divop := ssa.OpDiv64F 1826 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1827 wt := Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 1828 1829 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1830 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1831 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1832 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1833 1834 if pt != wt { // Widen for calculation 1835 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal) 1836 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal) 1837 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag) 1838 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag) 1839 } 1840 1841 denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag)) 1842 xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag)) 1843 ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag)) 1844 1845 // TODO not sure if this is best done in wide precision or narrow 1846 // Double-rounding might be an issue. 1847 // Note that the pre-SSA implementation does the entire calculation 1848 // in wide format, so wide is compatible. 1849 xreal = s.newValue2(divop, wt, xreal, denom) 1850 ximag = s.newValue2(divop, wt, ximag, denom) 1851 1852 if pt != wt { // Narrow to store back 1853 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal) 1854 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag) 1855 } 1856 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1857 } 1858 if n.Type.IsFloat() { 1859 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1860 } else { 1861 // do a size-appropriate check for zero 1862 cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type)) 1863 s.check(cmp, panicdivide) 1864 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1865 } 1866 case OMOD: 1867 a := s.expr(n.Left) 1868 b := s.expr(n.Right) 1869 // do a size-appropriate check for zero 1870 cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type)) 1871 s.check(cmp, panicdivide) 1872 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1873 case OADD, OSUB: 1874 a := s.expr(n.Left) 1875 b := s.expr(n.Right) 1876 if n.Type.IsComplex() { 1877 pt := floatForComplex(n.Type) 1878 op := s.ssaOp(n.Op, pt) 1879 return s.newValue2(ssa.OpComplexMake, n.Type, 1880 s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)), 1881 s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))) 1882 } 1883 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1884 case OAND, OOR, OHMUL, OXOR: 1885 a := s.expr(n.Left) 1886 b := s.expr(n.Right) 1887 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1888 case OLSH, ORSH: 1889 a := s.expr(n.Left) 1890 b := s.expr(n.Right) 1891 return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b) 1892 case OLROT: 1893 a := s.expr(n.Left) 1894 i := n.Right.Int64() 1895 if i <= 0 || i >= n.Type.Size()*8 { 1896 s.Fatalf("Wrong rotate distance for LROT, expected 1 through %d, saw %d", n.Type.Size()*8-1, i) 1897 } 1898 return s.newValue1I(s.ssaRotateOp(n.Op, n.Type), a.Type, i, a) 1899 case OANDAND, OOROR: 1900 // To implement OANDAND (and OOROR), we introduce a 1901 // new temporary variable to hold the result. The 1902 // variable is associated with the OANDAND node in the 1903 // s.vars table (normally variables are only 1904 // associated with ONAME nodes). We convert 1905 // A && B 1906 // to 1907 // var = A 1908 // if var { 1909 // var = B 1910 // } 1911 // Using var in the subsequent block introduces the 1912 // necessary phi variable. 1913 el := s.expr(n.Left) 1914 s.vars[n] = el 1915 1916 b := s.endBlock() 1917 b.Kind = ssa.BlockIf 1918 b.SetControl(el) 1919 // In theory, we should set b.Likely here based on context. 1920 // However, gc only gives us likeliness hints 1921 // in a single place, for plain OIF statements, 1922 // and passing around context is finnicky, so don't bother for now. 1923 1924 bRight := s.f.NewBlock(ssa.BlockPlain) 1925 bResult := s.f.NewBlock(ssa.BlockPlain) 1926 if n.Op == OANDAND { 1927 b.AddEdgeTo(bRight) 1928 b.AddEdgeTo(bResult) 1929 } else if n.Op == OOROR { 1930 b.AddEdgeTo(bResult) 1931 b.AddEdgeTo(bRight) 1932 } 1933 1934 s.startBlock(bRight) 1935 er := s.expr(n.Right) 1936 s.vars[n] = er 1937 1938 b = s.endBlock() 1939 b.AddEdgeTo(bResult) 1940 1941 s.startBlock(bResult) 1942 return s.variable(n, Types[TBOOL]) 1943 case OCOMPLEX: 1944 r := s.expr(n.Left) 1945 i := s.expr(n.Right) 1946 return s.newValue2(ssa.OpComplexMake, n.Type, r, i) 1947 1948 // unary ops 1949 case OMINUS: 1950 a := s.expr(n.Left) 1951 if n.Type.IsComplex() { 1952 tp := floatForComplex(n.Type) 1953 negop := s.ssaOp(n.Op, tp) 1954 return s.newValue2(ssa.OpComplexMake, n.Type, 1955 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)), 1956 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a))) 1957 } 1958 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 1959 case ONOT, OCOM, OSQRT: 1960 a := s.expr(n.Left) 1961 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 1962 case OIMAG, OREAL: 1963 a := s.expr(n.Left) 1964 return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a) 1965 case OPLUS: 1966 return s.expr(n.Left) 1967 1968 case OADDR: 1969 a, _ := s.addr(n.Left, n.Bounded) 1970 // Note we know the volatile result is false because you can't write &f() in Go. 1971 return a 1972 1973 case OINDREG: 1974 if int(n.Reg) != Thearch.REGSP { 1975 s.Unimplementedf("OINDREG of non-SP register %s in expr: %v", obj.Rconv(int(n.Reg)), n) 1976 return nil 1977 } 1978 addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(n.Type), n.Xoffset, s.sp) 1979 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1980 1981 case OIND: 1982 p := s.exprPtr(n.Left, false, n.Lineno) 1983 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1984 1985 case ODOT: 1986 t := n.Left.Type 1987 if canSSAType(t) { 1988 v := s.expr(n.Left) 1989 return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v) 1990 } 1991 p, _ := s.addr(n, false) 1992 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1993 1994 case ODOTPTR: 1995 p := s.exprPtr(n.Left, false, n.Lineno) 1996 p = s.newValue1I(ssa.OpOffPtr, p.Type, n.Xoffset, p) 1997 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1998 1999 case OINDEX: 2000 switch { 2001 case n.Left.Type.IsString(): 2002 a := s.expr(n.Left) 2003 i := s.expr(n.Right) 2004 i = s.extendIndex(i, Panicindex) 2005 if !n.Bounded { 2006 len := s.newValue1(ssa.OpStringLen, Types[TINT], a) 2007 s.boundsCheck(i, len) 2008 } 2009 ptrtyp := Ptrto(Types[TUINT8]) 2010 ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a) 2011 if Isconst(n.Right, CTINT) { 2012 ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr) 2013 } else { 2014 ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i) 2015 } 2016 return s.newValue2(ssa.OpLoad, Types[TUINT8], ptr, s.mem()) 2017 case n.Left.Type.IsSlice(): 2018 p, _ := s.addr(n, false) 2019 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 2020 case n.Left.Type.IsArray(): 2021 // TODO: fix when we can SSA arrays of length 1. 2022 p, _ := s.addr(n, false) 2023 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 2024 default: 2025 s.Fatalf("bad type for index %v", n.Left.Type) 2026 return nil 2027 } 2028 2029 case OLEN, OCAP: 2030 switch { 2031 case n.Left.Type.IsSlice(): 2032 op := ssa.OpSliceLen 2033 if n.Op == OCAP { 2034 op = ssa.OpSliceCap 2035 } 2036 return s.newValue1(op, Types[TINT], s.expr(n.Left)) 2037 case n.Left.Type.IsString(): // string; not reachable for OCAP 2038 return s.newValue1(ssa.OpStringLen, Types[TINT], s.expr(n.Left)) 2039 case n.Left.Type.IsMap(), n.Left.Type.IsChan(): 2040 return s.referenceTypeBuiltin(n, s.expr(n.Left)) 2041 default: // array 2042 return s.constInt(Types[TINT], n.Left.Type.NumElem()) 2043 } 2044 2045 case OSPTR: 2046 a := s.expr(n.Left) 2047 if n.Left.Type.IsSlice() { 2048 return s.newValue1(ssa.OpSlicePtr, n.Type, a) 2049 } else { 2050 return s.newValue1(ssa.OpStringPtr, n.Type, a) 2051 } 2052 2053 case OITAB: 2054 a := s.expr(n.Left) 2055 return s.newValue1(ssa.OpITab, n.Type, a) 2056 2057 case OIDATA: 2058 a := s.expr(n.Left) 2059 return s.newValue1(ssa.OpIData, n.Type, a) 2060 2061 case OEFACE: 2062 tab := s.expr(n.Left) 2063 data := s.expr(n.Right) 2064 // The frontend allows putting things like struct{*byte} in 2065 // the data portion of an eface. But we don't want struct{*byte} 2066 // as a register type because (among other reasons) the liveness 2067 // analysis is confused by the "fat" variables that result from 2068 // such types being spilled. 2069 // So here we ensure that we are selecting the underlying pointer 2070 // when we build an eface. 2071 // TODO: get rid of this now that structs can be SSA'd? 2072 for !data.Type.IsPtrShaped() { 2073 switch { 2074 case data.Type.IsArray(): 2075 data = s.newValue1I(ssa.OpArrayIndex, data.Type.ElemType(), 0, data) 2076 case data.Type.IsStruct(): 2077 for i := data.Type.NumFields() - 1; i >= 0; i-- { 2078 f := data.Type.FieldType(i) 2079 if f.Size() == 0 { 2080 // eface type could also be struct{p *byte; q [0]int} 2081 continue 2082 } 2083 data = s.newValue1I(ssa.OpStructSelect, f, int64(i), data) 2084 break 2085 } 2086 default: 2087 s.Fatalf("type being put into an eface isn't a pointer") 2088 } 2089 } 2090 return s.newValue2(ssa.OpIMake, n.Type, tab, data) 2091 2092 case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR: 2093 v := s.expr(n.Left) 2094 var i, j, k *ssa.Value 2095 low, high, max := n.SliceBounds() 2096 if low != nil { 2097 i = s.extendIndex(s.expr(low), panicslice) 2098 } 2099 if high != nil { 2100 j = s.extendIndex(s.expr(high), panicslice) 2101 } 2102 if max != nil { 2103 k = s.extendIndex(s.expr(max), panicslice) 2104 } 2105 p, l, c := s.slice(n.Left.Type, v, i, j, k) 2106 return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c) 2107 2108 case OSLICESTR: 2109 v := s.expr(n.Left) 2110 var i, j *ssa.Value 2111 low, high, _ := n.SliceBounds() 2112 if low != nil { 2113 i = s.extendIndex(s.expr(low), panicslice) 2114 } 2115 if high != nil { 2116 j = s.extendIndex(s.expr(high), panicslice) 2117 } 2118 p, l, _ := s.slice(n.Left.Type, v, i, j, nil) 2119 return s.newValue2(ssa.OpStringMake, n.Type, p, l) 2120 2121 case OCALLFUNC: 2122 if isIntrinsicCall(n) { 2123 return s.intrinsicCall(n) 2124 } 2125 fallthrough 2126 2127 case OCALLINTER, OCALLMETH: 2128 a := s.call(n, callNormal) 2129 return s.newValue2(ssa.OpLoad, n.Type, a, s.mem()) 2130 2131 case OGETG: 2132 return s.newValue1(ssa.OpGetG, n.Type, s.mem()) 2133 2134 case OAPPEND: 2135 return s.append(n, false) 2136 2137 default: 2138 s.Unimplementedf("unhandled expr %s", n.Op) 2139 return nil 2140 } 2141 } 2142 2143 // append converts an OAPPEND node to SSA. 2144 // If inplace is false, it converts the OAPPEND expression n to an ssa.Value, 2145 // adds it to s, and returns the Value. 2146 // If inplace is true, it writes the result of the OAPPEND expression n 2147 // back to the slice being appended to, and returns nil. 2148 // inplace MUST be set to false if the slice can be SSA'd. 2149 func (s *state) append(n *Node, inplace bool) *ssa.Value { 2150 // If inplace is false, process as expression "append(s, e1, e2, e3)": 2151 // 2152 // ptr, len, cap := s 2153 // newlen := len + 3 2154 // if newlen > cap { 2155 // ptr, len, cap = growslice(s, newlen) 2156 // newlen = len + 3 // recalculate to avoid a spill 2157 // } 2158 // // with write barriers, if needed: 2159 // *(ptr+len) = e1 2160 // *(ptr+len+1) = e2 2161 // *(ptr+len+2) = e3 2162 // return makeslice(ptr, newlen, cap) 2163 // 2164 // 2165 // If inplace is true, process as statement "s = append(s, e1, e2, e3)": 2166 // 2167 // a := &s 2168 // ptr, len, cap := s 2169 // newlen := len + 3 2170 // if newlen > cap { 2171 // newptr, len, newcap = growslice(ptr, len, cap, newlen) 2172 // vardef(a) // if necessary, advise liveness we are writing a new a 2173 // *a.cap = newcap // write before ptr to avoid a spill 2174 // *a.ptr = newptr // with write barrier 2175 // } 2176 // newlen = len + 3 // recalculate to avoid a spill 2177 // *a.len = newlen 2178 // // with write barriers, if needed: 2179 // *(ptr+len) = e1 2180 // *(ptr+len+1) = e2 2181 // *(ptr+len+2) = e3 2182 2183 et := n.Type.Elem() 2184 pt := Ptrto(et) 2185 2186 // Evaluate slice 2187 sn := n.List.First() // the slice node is the first in the list 2188 2189 var slice, addr *ssa.Value 2190 if inplace { 2191 addr, _ = s.addr(sn, false) 2192 slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 2193 } else { 2194 slice = s.expr(sn) 2195 } 2196 2197 // Allocate new blocks 2198 grow := s.f.NewBlock(ssa.BlockPlain) 2199 assign := s.f.NewBlock(ssa.BlockPlain) 2200 2201 // Decide if we need to grow 2202 nargs := int64(n.List.Len() - 1) 2203 p := s.newValue1(ssa.OpSlicePtr, pt, slice) 2204 l := s.newValue1(ssa.OpSliceLen, Types[TINT], slice) 2205 c := s.newValue1(ssa.OpSliceCap, Types[TINT], slice) 2206 nl := s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs)) 2207 2208 cmp := s.newValue2(s.ssaOp(OGT, Types[TINT]), Types[TBOOL], nl, c) 2209 s.vars[&ptrVar] = p 2210 2211 if !inplace { 2212 s.vars[&newlenVar] = nl 2213 s.vars[&capVar] = c 2214 } else { 2215 s.vars[&lenVar] = l 2216 } 2217 2218 b := s.endBlock() 2219 b.Kind = ssa.BlockIf 2220 b.Likely = ssa.BranchUnlikely 2221 b.SetControl(cmp) 2222 b.AddEdgeTo(grow) 2223 b.AddEdgeTo(assign) 2224 2225 // Call growslice 2226 s.startBlock(grow) 2227 taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(n.Type.Elem())}, s.sb) 2228 2229 r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl) 2230 2231 if inplace { 2232 if sn.Op == ONAME { 2233 // Tell liveness we're about to build a new slice 2234 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, sn, s.mem()) 2235 } 2236 capaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(Array_cap), addr) 2237 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capaddr, r[2], s.mem()) 2238 s.insertWBstore(pt, addr, r[0], n.Lineno, 0) 2239 // load the value we just stored to avoid having to spill it 2240 s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem()) 2241 s.vars[&lenVar] = r[1] // avoid a spill in the fast path 2242 } else { 2243 s.vars[&ptrVar] = r[0] 2244 s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], r[1], s.constInt(Types[TINT], nargs)) 2245 s.vars[&capVar] = r[2] 2246 } 2247 2248 b = s.endBlock() 2249 b.AddEdgeTo(assign) 2250 2251 // assign new elements to slots 2252 s.startBlock(assign) 2253 2254 if inplace { 2255 l = s.variable(&lenVar, Types[TINT]) // generates phi for len 2256 nl = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs)) 2257 lenaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(Array_nel), addr) 2258 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenaddr, nl, s.mem()) 2259 } 2260 2261 // Evaluate args 2262 type argRec struct { 2263 // if store is true, we're appending the value v. If false, we're appending the 2264 // value at *v. If store==false, isVolatile reports whether the source 2265 // is in the outargs section of the stack frame. 2266 v *ssa.Value 2267 store bool 2268 isVolatile bool 2269 } 2270 args := make([]argRec, 0, nargs) 2271 for _, n := range n.List.Slice()[1:] { 2272 if canSSAType(n.Type) { 2273 args = append(args, argRec{v: s.expr(n), store: true}) 2274 } else { 2275 v, isVolatile := s.addr(n, false) 2276 args = append(args, argRec{v: v, isVolatile: isVolatile}) 2277 } 2278 } 2279 2280 p = s.variable(&ptrVar, pt) // generates phi for ptr 2281 if !inplace { 2282 nl = s.variable(&newlenVar, Types[TINT]) // generates phi for nl 2283 c = s.variable(&capVar, Types[TINT]) // generates phi for cap 2284 } 2285 p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l) 2286 // TODO: just one write barrier call for all of these writes? 2287 // TODO: maybe just one writeBarrier.enabled check? 2288 for i, arg := range args { 2289 addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(Types[TINT], int64(i))) 2290 if arg.store { 2291 if haspointers(et) { 2292 s.insertWBstore(et, addr, arg.v, n.Lineno, 0) 2293 } else { 2294 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, et.Size(), addr, arg.v, s.mem()) 2295 } 2296 } else { 2297 if haspointers(et) { 2298 s.insertWBmove(et, addr, arg.v, n.Lineno, arg.isVolatile) 2299 } else { 2300 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, SizeAlignAuxInt(et), addr, arg.v, s.mem()) 2301 } 2302 } 2303 } 2304 2305 delete(s.vars, &ptrVar) 2306 if inplace { 2307 delete(s.vars, &lenVar) 2308 return nil 2309 } 2310 delete(s.vars, &newlenVar) 2311 delete(s.vars, &capVar) 2312 // make result 2313 return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c) 2314 } 2315 2316 // condBranch evaluates the boolean expression cond and branches to yes 2317 // if cond is true and no if cond is false. 2318 // This function is intended to handle && and || better than just calling 2319 // s.expr(cond) and branching on the result. 2320 func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) { 2321 if cond.Op == OANDAND { 2322 mid := s.f.NewBlock(ssa.BlockPlain) 2323 s.stmtList(cond.Ninit) 2324 s.condBranch(cond.Left, mid, no, max8(likely, 0)) 2325 s.startBlock(mid) 2326 s.condBranch(cond.Right, yes, no, likely) 2327 return 2328 // Note: if likely==1, then both recursive calls pass 1. 2329 // If likely==-1, then we don't have enough information to decide 2330 // whether the first branch is likely or not. So we pass 0 for 2331 // the likeliness of the first branch. 2332 // TODO: have the frontend give us branch prediction hints for 2333 // OANDAND and OOROR nodes (if it ever has such info). 2334 } 2335 if cond.Op == OOROR { 2336 mid := s.f.NewBlock(ssa.BlockPlain) 2337 s.stmtList(cond.Ninit) 2338 s.condBranch(cond.Left, yes, mid, min8(likely, 0)) 2339 s.startBlock(mid) 2340 s.condBranch(cond.Right, yes, no, likely) 2341 return 2342 // Note: if likely==-1, then both recursive calls pass -1. 2343 // If likely==1, then we don't have enough info to decide 2344 // the likelihood of the first branch. 2345 } 2346 if cond.Op == ONOT { 2347 s.stmtList(cond.Ninit) 2348 s.condBranch(cond.Left, no, yes, -likely) 2349 return 2350 } 2351 c := s.expr(cond) 2352 b := s.endBlock() 2353 b.Kind = ssa.BlockIf 2354 b.SetControl(c) 2355 b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness 2356 b.AddEdgeTo(yes) 2357 b.AddEdgeTo(no) 2358 } 2359 2360 type skipMask uint8 2361 2362 const ( 2363 skipPtr skipMask = 1 << iota 2364 skipLen 2365 skipCap 2366 ) 2367 2368 // assign does left = right. 2369 // Right has already been evaluated to ssa, left has not. 2370 // If deref is true, then we do left = *right instead (and right has already been nil-checked). 2371 // If deref is true and right == nil, just do left = 0. 2372 // If deref is true, rightIsVolatile reports whether right points to volatile (clobbered by a call) storage. 2373 // Include a write barrier if wb is true. 2374 // skip indicates assignments (at the top level) that can be avoided. 2375 func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, line int32, skip skipMask, rightIsVolatile bool) { 2376 if left.Op == ONAME && isblank(left) { 2377 return 2378 } 2379 t := left.Type 2380 dowidth(t) 2381 if s.canSSA(left) { 2382 if deref { 2383 s.Fatalf("can SSA LHS %s but not RHS %s", left, right) 2384 } 2385 if left.Op == ODOT { 2386 // We're assigning to a field of an ssa-able value. 2387 // We need to build a new structure with the new value for the 2388 // field we're assigning and the old values for the other fields. 2389 // For instance: 2390 // type T struct {a, b, c int} 2391 // var T x 2392 // x.b = 5 2393 // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c} 2394 2395 // Grab information about the structure type. 2396 t := left.Left.Type 2397 nf := t.NumFields() 2398 idx := fieldIdx(left) 2399 2400 // Grab old value of structure. 2401 old := s.expr(left.Left) 2402 2403 // Make new structure. 2404 new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t) 2405 2406 // Add fields as args. 2407 for i := 0; i < nf; i++ { 2408 if i == idx { 2409 new.AddArg(right) 2410 } else { 2411 new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old)) 2412 } 2413 } 2414 2415 // Recursively assign the new value we've made to the base of the dot op. 2416 s.assign(left.Left, new, false, false, line, 0, rightIsVolatile) 2417 // TODO: do we need to update named values here? 2418 return 2419 } 2420 // Update variable assignment. 2421 s.vars[left] = right 2422 s.addNamedValue(left, right) 2423 return 2424 } 2425 // Left is not ssa-able. Compute its address. 2426 addr, _ := s.addr(left, false) 2427 if left.Op == ONAME && skip == 0 { 2428 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, left, s.mem()) 2429 } 2430 if deref { 2431 // Treat as a mem->mem move. 2432 if right == nil { 2433 s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, SizeAlignAuxInt(t), addr, s.mem()) 2434 return 2435 } 2436 if wb { 2437 s.insertWBmove(t, addr, right, line, rightIsVolatile) 2438 return 2439 } 2440 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, SizeAlignAuxInt(t), addr, right, s.mem()) 2441 return 2442 } 2443 // Treat as a store. 2444 if wb { 2445 if skip&skipPtr != 0 { 2446 // Special case: if we don't write back the pointers, don't bother 2447 // doing the write barrier check. 2448 s.storeTypeScalars(t, addr, right, skip) 2449 return 2450 } 2451 s.insertWBstore(t, addr, right, line, skip) 2452 return 2453 } 2454 if skip != 0 { 2455 if skip&skipPtr == 0 { 2456 s.storeTypePtrs(t, addr, right) 2457 } 2458 s.storeTypeScalars(t, addr, right, skip) 2459 return 2460 } 2461 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), addr, right, s.mem()) 2462 } 2463 2464 // zeroVal returns the zero value for type t. 2465 func (s *state) zeroVal(t *Type) *ssa.Value { 2466 switch { 2467 case t.IsInteger(): 2468 switch t.Size() { 2469 case 1: 2470 return s.constInt8(t, 0) 2471 case 2: 2472 return s.constInt16(t, 0) 2473 case 4: 2474 return s.constInt32(t, 0) 2475 case 8: 2476 return s.constInt64(t, 0) 2477 default: 2478 s.Fatalf("bad sized integer type %s", t) 2479 } 2480 case t.IsFloat(): 2481 switch t.Size() { 2482 case 4: 2483 return s.constFloat32(t, 0) 2484 case 8: 2485 return s.constFloat64(t, 0) 2486 default: 2487 s.Fatalf("bad sized float type %s", t) 2488 } 2489 case t.IsComplex(): 2490 switch t.Size() { 2491 case 8: 2492 z := s.constFloat32(Types[TFLOAT32], 0) 2493 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2494 case 16: 2495 z := s.constFloat64(Types[TFLOAT64], 0) 2496 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2497 default: 2498 s.Fatalf("bad sized complex type %s", t) 2499 } 2500 2501 case t.IsString(): 2502 return s.constEmptyString(t) 2503 case t.IsPtrShaped(): 2504 return s.constNil(t) 2505 case t.IsBoolean(): 2506 return s.constBool(false) 2507 case t.IsInterface(): 2508 return s.constInterface(t) 2509 case t.IsSlice(): 2510 return s.constSlice(t) 2511 case t.IsStruct(): 2512 n := t.NumFields() 2513 v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t) 2514 for i := 0; i < n; i++ { 2515 v.AddArg(s.zeroVal(t.FieldType(i).(*Type))) 2516 } 2517 return v 2518 } 2519 s.Unimplementedf("zero for type %v not implemented", t) 2520 return nil 2521 } 2522 2523 type callKind int8 2524 2525 const ( 2526 callNormal callKind = iota 2527 callDefer 2528 callGo 2529 ) 2530 2531 // isSSAIntrinsic returns true if n is a call to a recognized intrinsic 2532 // that can be handled by the SSA backend. 2533 // SSA uses this, but so does the front end to see if should not 2534 // inline a function because it is a candidate for intrinsic 2535 // substitution. 2536 func isSSAIntrinsic(s *Sym) bool { 2537 // The test below is not quite accurate -- in the event that 2538 // a function is disabled on a per-function basis, for example 2539 // because of hash-keyed binary failure search, SSA might be 2540 // disabled for that function but it would not be noted here, 2541 // and thus an inlining would not occur (in practice, inlining 2542 // so far has only been noticed for Bswap32 and the 16-bit count 2543 // leading/trailing instructions, but heuristics might change 2544 // in the future or on different architectures). 2545 if !ssaEnabled || ssa.IntrinsicsDisable || Thearch.LinkArch.Family != sys.AMD64 { 2546 return false 2547 } 2548 if s != nil && s.Pkg != nil && s.Pkg.Path == "runtime/internal/sys" { 2549 switch s.Name { 2550 case 2551 "Ctz64", "Ctz32", 2552 "Bswap64", "Bswap32": 2553 return true 2554 } 2555 } 2556 if s != nil && s.Pkg != nil && s.Pkg.Path == "runtime/internal/atomic" { 2557 switch s.Name { 2558 case "Load", "Load64", "Loadint64", "Loadp", "Loaduint", "Loaduintptr": 2559 return true 2560 case "Store", "Store64", "StorepNoWB", "Storeuintptr": 2561 return true 2562 case "Xchg", "Xchg64", "Xchguintptr": 2563 return true 2564 case "Xadd", "Xadd64", "Xaddint64", "Xadduintptr": 2565 return true 2566 case "Cas", "Cas64", "Casp1", "Casuintptr": 2567 return true 2568 case "And8", "Or8": 2569 return true 2570 } 2571 } 2572 return false 2573 } 2574 2575 func isIntrinsicCall(n *Node) bool { 2576 if n == nil || n.Left == nil { 2577 return false 2578 } 2579 return isSSAIntrinsic(n.Left.Sym) 2580 } 2581 2582 // intrinsicArg extracts the ith arg from n.List and returns its value. 2583 func (s *state) intrinsicArg(n *Node, i int) *ssa.Value { 2584 x := n.List.Slice()[i] 2585 if x.Op == OAS { 2586 x = x.Right 2587 } 2588 return s.expr(x) 2589 } 2590 func (s *state) intrinsicFirstArg(n *Node) *ssa.Value { 2591 return s.intrinsicArg(n, 0) 2592 } 2593 2594 // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation. 2595 func (s *state) intrinsicCall(n *Node) (ret *ssa.Value) { 2596 var result *ssa.Value 2597 name := n.Left.Sym.Name 2598 switch { 2599 case name == "Ctz64": 2600 result = s.newValue1(ssa.OpCtz64, Types[TUINT64], s.intrinsicFirstArg(n)) 2601 ret = result 2602 case name == "Ctz32": 2603 result = s.newValue1(ssa.OpCtz32, Types[TUINT32], s.intrinsicFirstArg(n)) 2604 ret = result 2605 case name == "Bswap64": 2606 result = s.newValue1(ssa.OpBswap64, Types[TUINT64], s.intrinsicFirstArg(n)) 2607 ret = result 2608 case name == "Bswap32": 2609 result = s.newValue1(ssa.OpBswap32, Types[TUINT32], s.intrinsicFirstArg(n)) 2610 ret = result 2611 case name == "Load" || name == "Loaduint" && s.config.IntSize == 4 || name == "Loaduintptr" && s.config.PtrSize == 4: 2612 result = s.newValue2(ssa.OpAtomicLoad32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), s.intrinsicArg(n, 0), s.mem()) 2613 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, result) 2614 ret = s.newValue1(ssa.OpSelect0, Types[TUINT32], result) 2615 case name == "Load64" || name == "Loadint64" || name == "Loaduint" && s.config.IntSize == 8 || name == "Loaduintptr" && s.config.PtrSize == 8: 2616 result = s.newValue2(ssa.OpAtomicLoad64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), s.intrinsicArg(n, 0), s.mem()) 2617 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, result) 2618 ret = s.newValue1(ssa.OpSelect0, Types[TUINT64], result) 2619 case name == "Loadp": 2620 result = s.newValue2(ssa.OpAtomicLoadPtr, ssa.MakeTuple(Ptrto(Types[TUINT8]), ssa.TypeMem), s.intrinsicArg(n, 0), s.mem()) 2621 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, result) 2622 ret = s.newValue1(ssa.OpSelect0, Ptrto(Types[TUINT8]), result) 2623 case name == "Store" || name == "Storeuintptr" && s.config.PtrSize == 4: 2624 result = s.newValue3(ssa.OpAtomicStore32, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem()) 2625 s.vars[&memVar] = result 2626 case name == "Store64" || name == "Storeuintptr" && s.config.PtrSize == 8: 2627 result = s.newValue3(ssa.OpAtomicStore64, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem()) 2628 s.vars[&memVar] = result 2629 case name == "StorepNoWB": 2630 result = s.newValue3(ssa.OpAtomicStorePtrNoWB, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem()) 2631 s.vars[&memVar] = result 2632 case name == "Xchg" || name == "Xchguintptr" && s.config.PtrSize == 4: 2633 result = s.newValue3(ssa.OpAtomicExchange32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem()) 2634 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, result) 2635 ret = s.newValue1(ssa.OpSelect0, Types[TUINT32], result) 2636 case name == "Xchg64" || name == "Xchguintptr" && s.config.PtrSize == 8: 2637 result = s.newValue3(ssa.OpAtomicExchange64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem()) 2638 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, result) 2639 ret = s.newValue1(ssa.OpSelect0, Types[TUINT64], result) 2640 case name == "Xadd" || name == "Xadduintptr" && s.config.PtrSize == 4: 2641 result = s.newValue3(ssa.OpAtomicAdd32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem()) 2642 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, result) 2643 ret = s.newValue1(ssa.OpSelect0, Types[TUINT32], result) 2644 case name == "Xadd64" || name == "Xaddint64" || name == "Xadduintptr" && s.config.PtrSize == 8: 2645 result = s.newValue3(ssa.OpAtomicAdd64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem()) 2646 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, result) 2647 ret = s.newValue1(ssa.OpSelect0, Types[TUINT64], result) 2648 case name == "Cas" || (name == "Casp1" || name == "Casuintptr") && s.config.PtrSize == 4: 2649 result = s.newValue4(ssa.OpAtomicCompareAndSwap32, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.intrinsicArg(n, 2), s.mem()) 2650 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, result) 2651 ret = s.newValue1(ssa.OpSelect0, Types[TBOOL], result) 2652 case name == "Cas64" || (name == "Casp1" || name == "Casuintptr") && s.config.PtrSize == 8: 2653 result = s.newValue4(ssa.OpAtomicCompareAndSwap64, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.intrinsicArg(n, 2), s.mem()) 2654 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, result) 2655 ret = s.newValue1(ssa.OpSelect0, Types[TBOOL], result) 2656 case name == "And8": 2657 result = s.newValue3(ssa.OpAtomicAnd8, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem()) 2658 s.vars[&memVar] = result 2659 case name == "Or8": 2660 result = s.newValue3(ssa.OpAtomicOr8, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem()) 2661 s.vars[&memVar] = result 2662 } 2663 if result == nil { 2664 Fatalf("Unknown special call: %v", n.Left.Sym) 2665 } 2666 if ssa.IntrinsicsDebug > 0 { 2667 Warnl(n.Lineno, "intrinsic substitution for %v with %s", n.Left.Sym.Name, result.LongString()) 2668 } 2669 return 2670 } 2671 2672 // Calls the function n using the specified call type. 2673 // Returns the address of the return value (or nil if none). 2674 func (s *state) call(n *Node, k callKind) *ssa.Value { 2675 var sym *Sym // target symbol (if static) 2676 var closure *ssa.Value // ptr to closure to run (if dynamic) 2677 var codeptr *ssa.Value // ptr to target code (if dynamic) 2678 var rcvr *ssa.Value // receiver to set 2679 fn := n.Left 2680 switch n.Op { 2681 case OCALLFUNC: 2682 if k == callNormal && fn.Op == ONAME && fn.Class == PFUNC { 2683 sym = fn.Sym 2684 break 2685 } 2686 closure = s.expr(fn) 2687 case OCALLMETH: 2688 if fn.Op != ODOTMETH { 2689 Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) 2690 } 2691 if k == callNormal { 2692 sym = fn.Sym 2693 break 2694 } 2695 n2 := newname(fn.Sym) 2696 n2.Class = PFUNC 2697 n2.Lineno = fn.Lineno 2698 closure = s.expr(n2) 2699 // Note: receiver is already assigned in n.List, so we don't 2700 // want to set it here. 2701 case OCALLINTER: 2702 if fn.Op != ODOTINTER { 2703 Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op) 2704 } 2705 i := s.expr(fn.Left) 2706 itab := s.newValue1(ssa.OpITab, Types[TUINTPTR], i) 2707 if k != callNormal { 2708 s.nilCheck(itab) 2709 } 2710 itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab 2711 itab = s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TUINTPTR]), itabidx, itab) 2712 if k == callNormal { 2713 codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], itab, s.mem()) 2714 } else { 2715 closure = itab 2716 } 2717 rcvr = s.newValue1(ssa.OpIData, Types[TUINTPTR], i) 2718 } 2719 dowidth(fn.Type) 2720 stksize := fn.Type.ArgWidth() // includes receiver 2721 2722 // Run all argument assignments. The arg slots have already 2723 // been offset by the appropriate amount (+2*widthptr for go/defer, 2724 // +widthptr for interface calls). 2725 // For OCALLMETH, the receiver is set in these statements. 2726 s.stmtList(n.List) 2727 2728 // Set receiver (for interface calls) 2729 if rcvr != nil { 2730 argStart := Ctxt.FixedFrameSize() 2731 if k != callNormal { 2732 argStart += int64(2 * Widthptr) 2733 } 2734 addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(Types[TUINTPTR]), argStart, s.sp) 2735 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, rcvr, s.mem()) 2736 } 2737 2738 // Defer/go args 2739 if k != callNormal { 2740 // Write argsize and closure (args to Newproc/Deferproc). 2741 argStart := Ctxt.FixedFrameSize() 2742 argsize := s.constInt32(Types[TUINT32], int32(stksize)) 2743 addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(Types[TUINT32]), argStart, s.sp) 2744 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, 4, addr, argsize, s.mem()) 2745 addr = s.entryNewValue1I(ssa.OpOffPtr, Ptrto(Types[TUINTPTR]), argStart+int64(Widthptr), s.sp) 2746 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, closure, s.mem()) 2747 stksize += 2 * int64(Widthptr) 2748 } 2749 2750 // call target 2751 bNext := s.f.NewBlock(ssa.BlockPlain) 2752 var call *ssa.Value 2753 switch { 2754 case k == callDefer: 2755 call = s.newValue1(ssa.OpDeferCall, ssa.TypeMem, s.mem()) 2756 case k == callGo: 2757 call = s.newValue1(ssa.OpGoCall, ssa.TypeMem, s.mem()) 2758 case closure != nil: 2759 codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], closure, s.mem()) 2760 call = s.newValue3(ssa.OpClosureCall, ssa.TypeMem, codeptr, closure, s.mem()) 2761 case codeptr != nil: 2762 call = s.newValue2(ssa.OpInterCall, ssa.TypeMem, codeptr, s.mem()) 2763 case sym != nil: 2764 call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, sym, s.mem()) 2765 default: 2766 Fatalf("bad call type %s %v", n.Op, n) 2767 } 2768 call.AuxInt = stksize // Call operations carry the argsize of the callee along with them 2769 2770 // Finish call block 2771 s.vars[&memVar] = call 2772 b := s.endBlock() 2773 b.Kind = ssa.BlockCall 2774 b.SetControl(call) 2775 b.AddEdgeTo(bNext) 2776 if k == callDefer { 2777 // Add recover edge to exit code. 2778 b.Kind = ssa.BlockDefer 2779 r := s.f.NewBlock(ssa.BlockPlain) 2780 s.startBlock(r) 2781 s.exit() 2782 b.AddEdgeTo(r) 2783 b.Likely = ssa.BranchLikely 2784 } 2785 2786 // Start exit block, find address of result. 2787 s.startBlock(bNext) 2788 // Keep input pointer args live across calls. This is a bandaid until 1.8. 2789 for _, n := range s.ptrargs { 2790 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, s.variable(n, n.Type), s.mem()) 2791 } 2792 res := n.Left.Type.Results() 2793 if res.NumFields() == 0 || k != callNormal { 2794 // call has no return value. Continue with the next statement. 2795 return nil 2796 } 2797 fp := res.Field(0) 2798 return s.entryNewValue1I(ssa.OpOffPtr, Ptrto(fp.Type), fp.Offset+Ctxt.FixedFrameSize(), s.sp) 2799 } 2800 2801 // etypesign returns the signed-ness of e, for integer/pointer etypes. 2802 // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer. 2803 func etypesign(e EType) int8 { 2804 switch e { 2805 case TINT8, TINT16, TINT32, TINT64, TINT: 2806 return -1 2807 case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR: 2808 return +1 2809 } 2810 return 0 2811 } 2812 2813 // lookupSymbol is used to retrieve the symbol (Extern, Arg or Auto) used for a particular node. 2814 // This improves the effectiveness of cse by using the same Aux values for the 2815 // same symbols. 2816 func (s *state) lookupSymbol(n *Node, sym interface{}) interface{} { 2817 switch sym.(type) { 2818 default: 2819 s.Fatalf("sym %v is of uknown type %T", sym, sym) 2820 case *ssa.ExternSymbol, *ssa.ArgSymbol, *ssa.AutoSymbol: 2821 // these are the only valid types 2822 } 2823 2824 if lsym, ok := s.varsyms[n]; ok { 2825 return lsym 2826 } else { 2827 s.varsyms[n] = sym 2828 return sym 2829 } 2830 } 2831 2832 // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result. 2833 // Also returns a bool reporting whether the returned value is "volatile", that is it 2834 // points to the outargs section and thus the referent will be clobbered by any call. 2835 // The value that the returned Value represents is guaranteed to be non-nil. 2836 // If bounded is true then this address does not require a nil check for its operand 2837 // even if that would otherwise be implied. 2838 func (s *state) addr(n *Node, bounded bool) (*ssa.Value, bool) { 2839 t := Ptrto(n.Type) 2840 switch n.Op { 2841 case ONAME: 2842 switch n.Class { 2843 case PEXTERN: 2844 // global variable 2845 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: n.Sym}) 2846 v := s.entryNewValue1A(ssa.OpAddr, t, aux, s.sb) 2847 // TODO: Make OpAddr use AuxInt as well as Aux. 2848 if n.Xoffset != 0 { 2849 v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v) 2850 } 2851 return v, false 2852 case PPARAM: 2853 // parameter slot 2854 v := s.decladdrs[n] 2855 if v != nil { 2856 return v, false 2857 } 2858 if n.String() == ".fp" { 2859 // Special arg that points to the frame pointer. 2860 // (Used by the race detector, others?) 2861 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n}) 2862 return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp), false 2863 } 2864 s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs) 2865 return nil, false 2866 case PAUTO: 2867 aux := s.lookupSymbol(n, &ssa.AutoSymbol{Typ: n.Type, Node: n}) 2868 return s.newValue1A(ssa.OpAddr, t, aux, s.sp), false 2869 case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early. 2870 // ensure that we reuse symbols for out parameters so 2871 // that cse works on their addresses 2872 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n}) 2873 return s.newValue1A(ssa.OpAddr, t, aux, s.sp), false 2874 default: 2875 s.Unimplementedf("variable address class %v not implemented", classnames[n.Class]) 2876 return nil, false 2877 } 2878 case OINDREG: 2879 // indirect off a register 2880 // used for storing/loading arguments/returns to/from callees 2881 if int(n.Reg) != Thearch.REGSP { 2882 s.Unimplementedf("OINDREG of non-SP register %s in addr: %v", obj.Rconv(int(n.Reg)), n) 2883 return nil, false 2884 } 2885 return s.entryNewValue1I(ssa.OpOffPtr, t, n.Xoffset, s.sp), true 2886 case OINDEX: 2887 if n.Left.Type.IsSlice() { 2888 a := s.expr(n.Left) 2889 i := s.expr(n.Right) 2890 i = s.extendIndex(i, Panicindex) 2891 len := s.newValue1(ssa.OpSliceLen, Types[TINT], a) 2892 if !n.Bounded { 2893 s.boundsCheck(i, len) 2894 } 2895 p := s.newValue1(ssa.OpSlicePtr, t, a) 2896 return s.newValue2(ssa.OpPtrIndex, t, p, i), false 2897 } else { // array 2898 a, isVolatile := s.addr(n.Left, bounded) 2899 i := s.expr(n.Right) 2900 i = s.extendIndex(i, Panicindex) 2901 len := s.constInt(Types[TINT], n.Left.Type.NumElem()) 2902 if !n.Bounded { 2903 s.boundsCheck(i, len) 2904 } 2905 return s.newValue2(ssa.OpPtrIndex, Ptrto(n.Left.Type.Elem()), a, i), isVolatile 2906 } 2907 case OIND: 2908 return s.exprPtr(n.Left, bounded, n.Lineno), false 2909 case ODOT: 2910 p, isVolatile := s.addr(n.Left, bounded) 2911 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p), isVolatile 2912 case ODOTPTR: 2913 p := s.exprPtr(n.Left, bounded, n.Lineno) 2914 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p), false 2915 case OCLOSUREVAR: 2916 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, 2917 s.entryNewValue0(ssa.OpGetClosurePtr, Ptrto(Types[TUINT8]))), false 2918 case OCONVNOP: 2919 addr, isVolatile := s.addr(n.Left, bounded) 2920 return s.newValue1(ssa.OpCopy, t, addr), isVolatile // ensure that addr has the right type 2921 case OCALLFUNC, OCALLINTER, OCALLMETH: 2922 return s.call(n, callNormal), true 2923 2924 default: 2925 s.Unimplementedf("unhandled addr %v", n.Op) 2926 return nil, false 2927 } 2928 } 2929 2930 // canSSA reports whether n is SSA-able. 2931 // n must be an ONAME (or an ODOT sequence with an ONAME base). 2932 func (s *state) canSSA(n *Node) bool { 2933 if Debug['N'] != 0 { 2934 return false 2935 } 2936 for n.Op == ODOT { 2937 n = n.Left 2938 } 2939 if n.Op != ONAME { 2940 return false 2941 } 2942 if n.Addrtaken { 2943 return false 2944 } 2945 if n.isParamHeapCopy() { 2946 return false 2947 } 2948 if n.Class == PAUTOHEAP { 2949 Fatalf("canSSA of PAUTOHEAP %v", n) 2950 } 2951 switch n.Class { 2952 case PEXTERN: 2953 return false 2954 case PPARAMOUT: 2955 if hasdefer { 2956 // TODO: handle this case? Named return values must be 2957 // in memory so that the deferred function can see them. 2958 // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false } 2959 return false 2960 } 2961 if s.cgoUnsafeArgs { 2962 // Cgo effectively takes the address of all result args, 2963 // but the compiler can't see that. 2964 return false 2965 } 2966 } 2967 if n.Class == PPARAM && n.String() == ".this" { 2968 // wrappers generated by genwrapper need to update 2969 // the .this pointer in place. 2970 // TODO: treat as a PPARMOUT? 2971 return false 2972 } 2973 return canSSAType(n.Type) 2974 // TODO: try to make more variables SSAable? 2975 } 2976 2977 // canSSA reports whether variables of type t are SSA-able. 2978 func canSSAType(t *Type) bool { 2979 dowidth(t) 2980 if t.Width > int64(4*Widthptr) { 2981 // 4*Widthptr is an arbitrary constant. We want it 2982 // to be at least 3*Widthptr so slices can be registerized. 2983 // Too big and we'll introduce too much register pressure. 2984 return false 2985 } 2986 switch t.Etype { 2987 case TARRAY: 2988 // We can't do arrays because dynamic indexing is 2989 // not supported on SSA variables. 2990 // TODO: maybe allow if length is <=1? All indexes 2991 // are constant? Might be good for the arrays 2992 // introduced by the compiler for variadic functions. 2993 return false 2994 case TSTRUCT: 2995 if t.NumFields() > ssa.MaxStruct { 2996 return false 2997 } 2998 for _, t1 := range t.Fields().Slice() { 2999 if !canSSAType(t1.Type) { 3000 return false 3001 } 3002 } 3003 return true 3004 default: 3005 return true 3006 } 3007 } 3008 3009 // exprPtr evaluates n to a pointer and nil-checks it. 3010 func (s *state) exprPtr(n *Node, bounded bool, lineno int32) *ssa.Value { 3011 p := s.expr(n) 3012 if bounded || n.NonNil { 3013 if s.f.Config.Debug_checknil() && lineno > 1 { 3014 s.f.Config.Warnl(lineno, "removed nil check") 3015 } 3016 return p 3017 } 3018 s.nilCheck(p) 3019 return p 3020 } 3021 3022 // nilCheck generates nil pointer checking code. 3023 // Starts a new block on return, unless nil checks are disabled. 3024 // Used only for automatically inserted nil checks, 3025 // not for user code like 'x != nil'. 3026 func (s *state) nilCheck(ptr *ssa.Value) { 3027 if Disable_checknil != 0 { 3028 return 3029 } 3030 chk := s.newValue2(ssa.OpNilCheck, ssa.TypeVoid, ptr, s.mem()) 3031 b := s.endBlock() 3032 b.Kind = ssa.BlockCheck 3033 b.SetControl(chk) 3034 bNext := s.f.NewBlock(ssa.BlockPlain) 3035 b.AddEdgeTo(bNext) 3036 s.startBlock(bNext) 3037 } 3038 3039 // boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not. 3040 // Starts a new block on return. 3041 // idx is already converted to full int width. 3042 func (s *state) boundsCheck(idx, len *ssa.Value) { 3043 if Debug['B'] != 0 { 3044 return 3045 } 3046 3047 // bounds check 3048 cmp := s.newValue2(ssa.OpIsInBounds, Types[TBOOL], idx, len) 3049 s.check(cmp, Panicindex) 3050 } 3051 3052 // sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not. 3053 // Starts a new block on return. 3054 // idx and len are already converted to full int width. 3055 func (s *state) sliceBoundsCheck(idx, len *ssa.Value) { 3056 if Debug['B'] != 0 { 3057 return 3058 } 3059 3060 // bounds check 3061 cmp := s.newValue2(ssa.OpIsSliceInBounds, Types[TBOOL], idx, len) 3062 s.check(cmp, panicslice) 3063 } 3064 3065 // If cmp (a bool) is false, panic using the given function. 3066 func (s *state) check(cmp *ssa.Value, fn *Node) { 3067 b := s.endBlock() 3068 b.Kind = ssa.BlockIf 3069 b.SetControl(cmp) 3070 b.Likely = ssa.BranchLikely 3071 bNext := s.f.NewBlock(ssa.BlockPlain) 3072 line := s.peekLine() 3073 bPanic := s.panics[funcLine{fn, line}] 3074 if bPanic == nil { 3075 bPanic = s.f.NewBlock(ssa.BlockPlain) 3076 s.panics[funcLine{fn, line}] = bPanic 3077 s.startBlock(bPanic) 3078 // The panic call takes/returns memory to ensure that the right 3079 // memory state is observed if the panic happens. 3080 s.rtcall(fn, false, nil) 3081 } 3082 b.AddEdgeTo(bNext) 3083 b.AddEdgeTo(bPanic) 3084 s.startBlock(bNext) 3085 } 3086 3087 // rtcall issues a call to the given runtime function fn with the listed args. 3088 // Returns a slice of results of the given result types. 3089 // The call is added to the end of the current block. 3090 // If returns is false, the block is marked as an exit block. 3091 // If returns is true, the block is marked as a call block. A new block 3092 // is started to load the return values. 3093 func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Value) []*ssa.Value { 3094 // Write args to the stack 3095 off := Ctxt.FixedFrameSize() 3096 for _, arg := range args { 3097 t := arg.Type 3098 off = Rnd(off, t.Alignment()) 3099 ptr := s.sp 3100 if off != 0 { 3101 ptr = s.newValue1I(ssa.OpOffPtr, t.PtrTo(), off, s.sp) 3102 } 3103 size := t.Size() 3104 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, size, ptr, arg, s.mem()) 3105 off += size 3106 } 3107 off = Rnd(off, int64(Widthptr)) 3108 if Thearch.LinkArch.Name == "amd64p32" { 3109 // amd64p32 wants 8-byte alignment of the start of the return values. 3110 off = Rnd(off, 8) 3111 } 3112 3113 // Issue call 3114 call := s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, fn.Sym, s.mem()) 3115 s.vars[&memVar] = call 3116 3117 // Finish block 3118 b := s.endBlock() 3119 if !returns { 3120 b.Kind = ssa.BlockExit 3121 b.SetControl(call) 3122 call.AuxInt = off - Ctxt.FixedFrameSize() 3123 if len(results) > 0 { 3124 Fatalf("panic call can't have results") 3125 } 3126 return nil 3127 } 3128 b.Kind = ssa.BlockCall 3129 b.SetControl(call) 3130 bNext := s.f.NewBlock(ssa.BlockPlain) 3131 b.AddEdgeTo(bNext) 3132 s.startBlock(bNext) 3133 3134 // Keep input pointer args live across calls. This is a bandaid until 1.8. 3135 for _, n := range s.ptrargs { 3136 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, s.variable(n, n.Type), s.mem()) 3137 } 3138 3139 // Load results 3140 res := make([]*ssa.Value, len(results)) 3141 for i, t := range results { 3142 off = Rnd(off, t.Alignment()) 3143 ptr := s.sp 3144 if off != 0 { 3145 ptr = s.newValue1I(ssa.OpOffPtr, Ptrto(t), off, s.sp) 3146 } 3147 res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem()) 3148 off += t.Size() 3149 } 3150 off = Rnd(off, int64(Widthptr)) 3151 3152 // Remember how much callee stack space we needed. 3153 call.AuxInt = off 3154 3155 return res 3156 } 3157 3158 // insertWBmove inserts the assignment *left = *right including a write barrier. 3159 // t is the type being assigned. 3160 func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32, rightIsVolatile bool) { 3161 // if writeBarrier.enabled { 3162 // typedmemmove(&t, left, right) 3163 // } else { 3164 // *left = *right 3165 // } 3166 3167 if s.noWB { 3168 s.Fatalf("write barrier prohibited") 3169 } 3170 if s.WBLineno == 0 { 3171 s.WBLineno = left.Line 3172 } 3173 bThen := s.f.NewBlock(ssa.BlockPlain) 3174 bElse := s.f.NewBlock(ssa.BlockPlain) 3175 bEnd := s.f.NewBlock(ssa.BlockPlain) 3176 3177 aux := &ssa.ExternSymbol{Typ: Types[TBOOL], Sym: syslook("writeBarrier").Sym} 3178 flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb) 3179 // Load word, test word, avoiding partial register write from load byte. 3180 flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem()) 3181 flag = s.newValue2(ssa.OpNeq32, Types[TBOOL], flag, s.constInt32(Types[TUINT32], 0)) 3182 b := s.endBlock() 3183 b.Kind = ssa.BlockIf 3184 b.Likely = ssa.BranchUnlikely 3185 b.SetControl(flag) 3186 b.AddEdgeTo(bThen) 3187 b.AddEdgeTo(bElse) 3188 3189 s.startBlock(bThen) 3190 3191 if !rightIsVolatile { 3192 // Issue typedmemmove call. 3193 taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(t)}, s.sb) 3194 s.rtcall(typedmemmove, true, nil, taddr, left, right) 3195 } else { 3196 // Copy to temp location if the source is volatile (will be clobbered by 3197 // a function call). Marshaling the args to typedmemmove might clobber the 3198 // value we're trying to move. 3199 tmp := temp(t) 3200 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, tmp, s.mem()) 3201 tmpaddr, _ := s.addr(tmp, true) 3202 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, SizeAlignAuxInt(t), tmpaddr, right, s.mem()) 3203 // Issue typedmemmove call. 3204 taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(t)}, s.sb) 3205 s.rtcall(typedmemmove, true, nil, taddr, left, tmpaddr) 3206 // Mark temp as dead. 3207 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, tmp, s.mem()) 3208 } 3209 s.endBlock().AddEdgeTo(bEnd) 3210 3211 s.startBlock(bElse) 3212 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, SizeAlignAuxInt(t), left, right, s.mem()) 3213 s.endBlock().AddEdgeTo(bEnd) 3214 3215 s.startBlock(bEnd) 3216 3217 if Debug_wb > 0 { 3218 Warnl(line, "write barrier") 3219 } 3220 } 3221 3222 // insertWBstore inserts the assignment *left = right including a write barrier. 3223 // t is the type being assigned. 3224 func (s *state) insertWBstore(t *Type, left, right *ssa.Value, line int32, skip skipMask) { 3225 // store scalar fields 3226 // if writeBarrier.enabled { 3227 // writebarrierptr for pointer fields 3228 // } else { 3229 // store pointer fields 3230 // } 3231 3232 if s.noWB { 3233 s.Fatalf("write barrier prohibited") 3234 } 3235 if s.WBLineno == 0 { 3236 s.WBLineno = left.Line 3237 } 3238 s.storeTypeScalars(t, left, right, skip) 3239 3240 bThen := s.f.NewBlock(ssa.BlockPlain) 3241 bElse := s.f.NewBlock(ssa.BlockPlain) 3242 bEnd := s.f.NewBlock(ssa.BlockPlain) 3243 3244 aux := &ssa.ExternSymbol{Typ: Types[TBOOL], Sym: syslook("writeBarrier").Sym} 3245 flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb) 3246 // Load word, test word, avoiding partial register write from load byte. 3247 flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem()) 3248 flag = s.newValue2(ssa.OpNeq32, Types[TBOOL], flag, s.constInt32(Types[TUINT32], 0)) 3249 b := s.endBlock() 3250 b.Kind = ssa.BlockIf 3251 b.Likely = ssa.BranchUnlikely 3252 b.SetControl(flag) 3253 b.AddEdgeTo(bThen) 3254 b.AddEdgeTo(bElse) 3255 3256 // Issue write barriers for pointer writes. 3257 s.startBlock(bThen) 3258 s.storeTypePtrsWB(t, left, right) 3259 s.endBlock().AddEdgeTo(bEnd) 3260 3261 // Issue regular stores for pointer writes. 3262 s.startBlock(bElse) 3263 s.storeTypePtrs(t, left, right) 3264 s.endBlock().AddEdgeTo(bEnd) 3265 3266 s.startBlock(bEnd) 3267 3268 if Debug_wb > 0 { 3269 Warnl(line, "write barrier") 3270 } 3271 } 3272 3273 // do *left = right for all scalar (non-pointer) parts of t. 3274 func (s *state) storeTypeScalars(t *Type, left, right *ssa.Value, skip skipMask) { 3275 switch { 3276 case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex(): 3277 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), left, right, s.mem()) 3278 case t.IsPtrShaped(): 3279 // no scalar fields. 3280 case t.IsString(): 3281 if skip&skipLen != 0 { 3282 return 3283 } 3284 len := s.newValue1(ssa.OpStringLen, Types[TINT], right) 3285 lenAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), s.config.IntSize, left) 3286 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem()) 3287 case t.IsSlice(): 3288 if skip&skipLen == 0 { 3289 len := s.newValue1(ssa.OpSliceLen, Types[TINT], right) 3290 lenAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), s.config.IntSize, left) 3291 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem()) 3292 } 3293 if skip&skipCap == 0 { 3294 cap := s.newValue1(ssa.OpSliceCap, Types[TINT], right) 3295 capAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), 2*s.config.IntSize, left) 3296 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capAddr, cap, s.mem()) 3297 } 3298 case t.IsInterface(): 3299 // itab field doesn't need a write barrier (even though it is a pointer). 3300 itab := s.newValue1(ssa.OpITab, Ptrto(Types[TUINT8]), right) 3301 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, left, itab, s.mem()) 3302 case t.IsStruct(): 3303 n := t.NumFields() 3304 for i := 0; i < n; i++ { 3305 ft := t.FieldType(i) 3306 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3307 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3308 s.storeTypeScalars(ft.(*Type), addr, val, 0) 3309 } 3310 default: 3311 s.Fatalf("bad write barrier type %s", t) 3312 } 3313 } 3314 3315 // do *left = right for all pointer parts of t. 3316 func (s *state) storeTypePtrs(t *Type, left, right *ssa.Value) { 3317 switch { 3318 case t.IsPtrShaped(): 3319 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, right, s.mem()) 3320 case t.IsString(): 3321 ptr := s.newValue1(ssa.OpStringPtr, Ptrto(Types[TUINT8]), right) 3322 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem()) 3323 case t.IsSlice(): 3324 ptr := s.newValue1(ssa.OpSlicePtr, Ptrto(Types[TUINT8]), right) 3325 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem()) 3326 case t.IsInterface(): 3327 // itab field is treated as a scalar. 3328 idata := s.newValue1(ssa.OpIData, Ptrto(Types[TUINT8]), right) 3329 idataAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TUINT8]), s.config.PtrSize, left) 3330 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, idataAddr, idata, s.mem()) 3331 case t.IsStruct(): 3332 n := t.NumFields() 3333 for i := 0; i < n; i++ { 3334 ft := t.FieldType(i) 3335 if !haspointers(ft.(*Type)) { 3336 continue 3337 } 3338 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3339 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3340 s.storeTypePtrs(ft.(*Type), addr, val) 3341 } 3342 default: 3343 s.Fatalf("bad write barrier type %s", t) 3344 } 3345 } 3346 3347 // do *left = right with a write barrier for all pointer parts of t. 3348 func (s *state) storeTypePtrsWB(t *Type, left, right *ssa.Value) { 3349 switch { 3350 case t.IsPtrShaped(): 3351 s.rtcall(writebarrierptr, true, nil, left, right) 3352 case t.IsString(): 3353 ptr := s.newValue1(ssa.OpStringPtr, Ptrto(Types[TUINT8]), right) 3354 s.rtcall(writebarrierptr, true, nil, left, ptr) 3355 case t.IsSlice(): 3356 ptr := s.newValue1(ssa.OpSlicePtr, Ptrto(Types[TUINT8]), right) 3357 s.rtcall(writebarrierptr, true, nil, left, ptr) 3358 case t.IsInterface(): 3359 idata := s.newValue1(ssa.OpIData, Ptrto(Types[TUINT8]), right) 3360 idataAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TUINT8]), s.config.PtrSize, left) 3361 s.rtcall(writebarrierptr, true, nil, idataAddr, idata) 3362 case t.IsStruct(): 3363 n := t.NumFields() 3364 for i := 0; i < n; i++ { 3365 ft := t.FieldType(i) 3366 if !haspointers(ft.(*Type)) { 3367 continue 3368 } 3369 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3370 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3371 s.storeTypePtrsWB(ft.(*Type), addr, val) 3372 } 3373 default: 3374 s.Fatalf("bad write barrier type %s", t) 3375 } 3376 } 3377 3378 // slice computes the slice v[i:j:k] and returns ptr, len, and cap of result. 3379 // i,j,k may be nil, in which case they are set to their default value. 3380 // t is a slice, ptr to array, or string type. 3381 func (s *state) slice(t *Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) { 3382 var elemtype *Type 3383 var ptrtype *Type 3384 var ptr *ssa.Value 3385 var len *ssa.Value 3386 var cap *ssa.Value 3387 zero := s.constInt(Types[TINT], 0) 3388 switch { 3389 case t.IsSlice(): 3390 elemtype = t.Elem() 3391 ptrtype = Ptrto(elemtype) 3392 ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v) 3393 len = s.newValue1(ssa.OpSliceLen, Types[TINT], v) 3394 cap = s.newValue1(ssa.OpSliceCap, Types[TINT], v) 3395 case t.IsString(): 3396 elemtype = Types[TUINT8] 3397 ptrtype = Ptrto(elemtype) 3398 ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v) 3399 len = s.newValue1(ssa.OpStringLen, Types[TINT], v) 3400 cap = len 3401 case t.IsPtr(): 3402 if !t.Elem().IsArray() { 3403 s.Fatalf("bad ptr to array in slice %v\n", t) 3404 } 3405 elemtype = t.Elem().Elem() 3406 ptrtype = Ptrto(elemtype) 3407 s.nilCheck(v) 3408 ptr = v 3409 len = s.constInt(Types[TINT], t.Elem().NumElem()) 3410 cap = len 3411 default: 3412 s.Fatalf("bad type in slice %v\n", t) 3413 } 3414 3415 // Set default values 3416 if i == nil { 3417 i = zero 3418 } 3419 if j == nil { 3420 j = len 3421 } 3422 if k == nil { 3423 k = cap 3424 } 3425 3426 // Panic if slice indices are not in bounds. 3427 s.sliceBoundsCheck(i, j) 3428 if j != k { 3429 s.sliceBoundsCheck(j, k) 3430 } 3431 if k != cap { 3432 s.sliceBoundsCheck(k, cap) 3433 } 3434 3435 // Generate the following code assuming that indexes are in bounds. 3436 // The conditional is to make sure that we don't generate a slice 3437 // that points to the next object in memory. 3438 // rlen = j-i 3439 // rcap = k-i 3440 // delta = i*elemsize 3441 // if rcap == 0 { 3442 // delta = 0 3443 // } 3444 // rptr = p+delta 3445 // result = (SliceMake rptr rlen rcap) 3446 subOp := s.ssaOp(OSUB, Types[TINT]) 3447 eqOp := s.ssaOp(OEQ, Types[TINT]) 3448 mulOp := s.ssaOp(OMUL, Types[TINT]) 3449 rlen := s.newValue2(subOp, Types[TINT], j, i) 3450 var rcap *ssa.Value 3451 switch { 3452 case t.IsString(): 3453 // Capacity of the result is unimportant. However, we use 3454 // rcap to test if we've generated a zero-length slice. 3455 // Use length of strings for that. 3456 rcap = rlen 3457 case j == k: 3458 rcap = rlen 3459 default: 3460 rcap = s.newValue2(subOp, Types[TINT], k, i) 3461 } 3462 3463 // delta = # of elements to offset pointer by. 3464 s.vars[&deltaVar] = i 3465 3466 // Generate code to set delta=0 if the resulting capacity is zero. 3467 if !((i.Op == ssa.OpConst64 && i.AuxInt == 0) || 3468 (i.Op == ssa.OpConst32 && int32(i.AuxInt) == 0)) { 3469 cmp := s.newValue2(eqOp, Types[TBOOL], rcap, zero) 3470 3471 b := s.endBlock() 3472 b.Kind = ssa.BlockIf 3473 b.Likely = ssa.BranchUnlikely 3474 b.SetControl(cmp) 3475 3476 // Generate block which zeros the delta variable. 3477 nz := s.f.NewBlock(ssa.BlockPlain) 3478 b.AddEdgeTo(nz) 3479 s.startBlock(nz) 3480 s.vars[&deltaVar] = zero 3481 s.endBlock() 3482 3483 // All done. 3484 merge := s.f.NewBlock(ssa.BlockPlain) 3485 b.AddEdgeTo(merge) 3486 nz.AddEdgeTo(merge) 3487 s.startBlock(merge) 3488 3489 // TODO: use conditional moves somehow? 3490 } 3491 3492 // Compute rptr = ptr + delta * elemsize 3493 rptr := s.newValue2(ssa.OpAddPtr, ptrtype, ptr, s.newValue2(mulOp, Types[TINT], s.variable(&deltaVar, Types[TINT]), s.constInt(Types[TINT], elemtype.Width))) 3494 delete(s.vars, &deltaVar) 3495 return rptr, rlen, rcap 3496 } 3497 3498 type u2fcvtTab struct { 3499 geq, cvt2F, and, rsh, or, add ssa.Op 3500 one func(*state, ssa.Type, int64) *ssa.Value 3501 } 3502 3503 var u64_f64 u2fcvtTab = u2fcvtTab{ 3504 geq: ssa.OpGeq64, 3505 cvt2F: ssa.OpCvt64to64F, 3506 and: ssa.OpAnd64, 3507 rsh: ssa.OpRsh64Ux64, 3508 or: ssa.OpOr64, 3509 add: ssa.OpAdd64F, 3510 one: (*state).constInt64, 3511 } 3512 3513 var u64_f32 u2fcvtTab = u2fcvtTab{ 3514 geq: ssa.OpGeq64, 3515 cvt2F: ssa.OpCvt64to32F, 3516 and: ssa.OpAnd64, 3517 rsh: ssa.OpRsh64Ux64, 3518 or: ssa.OpOr64, 3519 add: ssa.OpAdd32F, 3520 one: (*state).constInt64, 3521 } 3522 3523 func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3524 return s.uintTofloat(&u64_f64, n, x, ft, tt) 3525 } 3526 3527 func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3528 return s.uintTofloat(&u64_f32, n, x, ft, tt) 3529 } 3530 3531 func (s *state) uintTofloat(cvttab *u2fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3532 // if x >= 0 { 3533 // result = (floatY) x 3534 // } else { 3535 // y = uintX(x) ; y = x & 1 3536 // z = uintX(x) ; z = z >> 1 3537 // z = z >> 1 3538 // z = z | y 3539 // result = floatY(z) 3540 // result = result + result 3541 // } 3542 // 3543 // Code borrowed from old code generator. 3544 // What's going on: large 64-bit "unsigned" looks like 3545 // negative number to hardware's integer-to-float 3546 // conversion. However, because the mantissa is only 3547 // 63 bits, we don't need the LSB, so instead we do an 3548 // unsigned right shift (divide by two), convert, and 3549 // double. However, before we do that, we need to be 3550 // sure that we do not lose a "1" if that made the 3551 // difference in the resulting rounding. Therefore, we 3552 // preserve it, and OR (not ADD) it back in. The case 3553 // that matters is when the eleven discarded bits are 3554 // equal to 10000000001; that rounds up, and the 1 cannot 3555 // be lost else it would round down if the LSB of the 3556 // candidate mantissa is 0. 3557 cmp := s.newValue2(cvttab.geq, Types[TBOOL], x, s.zeroVal(ft)) 3558 b := s.endBlock() 3559 b.Kind = ssa.BlockIf 3560 b.SetControl(cmp) 3561 b.Likely = ssa.BranchLikely 3562 3563 bThen := s.f.NewBlock(ssa.BlockPlain) 3564 bElse := s.f.NewBlock(ssa.BlockPlain) 3565 bAfter := s.f.NewBlock(ssa.BlockPlain) 3566 3567 b.AddEdgeTo(bThen) 3568 s.startBlock(bThen) 3569 a0 := s.newValue1(cvttab.cvt2F, tt, x) 3570 s.vars[n] = a0 3571 s.endBlock() 3572 bThen.AddEdgeTo(bAfter) 3573 3574 b.AddEdgeTo(bElse) 3575 s.startBlock(bElse) 3576 one := cvttab.one(s, ft, 1) 3577 y := s.newValue2(cvttab.and, ft, x, one) 3578 z := s.newValue2(cvttab.rsh, ft, x, one) 3579 z = s.newValue2(cvttab.or, ft, z, y) 3580 a := s.newValue1(cvttab.cvt2F, tt, z) 3581 a1 := s.newValue2(cvttab.add, tt, a, a) 3582 s.vars[n] = a1 3583 s.endBlock() 3584 bElse.AddEdgeTo(bAfter) 3585 3586 s.startBlock(bAfter) 3587 return s.variable(n, n.Type) 3588 } 3589 3590 // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels. 3591 func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value { 3592 if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() { 3593 s.Fatalf("node must be a map or a channel") 3594 } 3595 // if n == nil { 3596 // return 0 3597 // } else { 3598 // // len 3599 // return *((*int)n) 3600 // // cap 3601 // return *(((*int)n)+1) 3602 // } 3603 lenType := n.Type 3604 nilValue := s.constNil(Types[TUINTPTR]) 3605 cmp := s.newValue2(ssa.OpEqPtr, Types[TBOOL], x, nilValue) 3606 b := s.endBlock() 3607 b.Kind = ssa.BlockIf 3608 b.SetControl(cmp) 3609 b.Likely = ssa.BranchUnlikely 3610 3611 bThen := s.f.NewBlock(ssa.BlockPlain) 3612 bElse := s.f.NewBlock(ssa.BlockPlain) 3613 bAfter := s.f.NewBlock(ssa.BlockPlain) 3614 3615 // length/capacity of a nil map/chan is zero 3616 b.AddEdgeTo(bThen) 3617 s.startBlock(bThen) 3618 s.vars[n] = s.zeroVal(lenType) 3619 s.endBlock() 3620 bThen.AddEdgeTo(bAfter) 3621 3622 b.AddEdgeTo(bElse) 3623 s.startBlock(bElse) 3624 if n.Op == OLEN { 3625 // length is stored in the first word for map/chan 3626 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem()) 3627 } else if n.Op == OCAP { 3628 // capacity is stored in the second word for chan 3629 sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x) 3630 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem()) 3631 } else { 3632 s.Fatalf("op must be OLEN or OCAP") 3633 } 3634 s.endBlock() 3635 bElse.AddEdgeTo(bAfter) 3636 3637 s.startBlock(bAfter) 3638 return s.variable(n, lenType) 3639 } 3640 3641 type f2uCvtTab struct { 3642 ltf, cvt2U, subf ssa.Op 3643 value func(*state, ssa.Type, float64) *ssa.Value 3644 } 3645 3646 var f32_u64 f2uCvtTab = f2uCvtTab{ 3647 ltf: ssa.OpLess32F, 3648 cvt2U: ssa.OpCvt32Fto64, 3649 subf: ssa.OpSub32F, 3650 value: (*state).constFloat32, 3651 } 3652 3653 var f64_u64 f2uCvtTab = f2uCvtTab{ 3654 ltf: ssa.OpLess64F, 3655 cvt2U: ssa.OpCvt64Fto64, 3656 subf: ssa.OpSub64F, 3657 value: (*state).constFloat64, 3658 } 3659 3660 func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3661 return s.floatToUint(&f32_u64, n, x, ft, tt) 3662 } 3663 func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3664 return s.floatToUint(&f64_u64, n, x, ft, tt) 3665 } 3666 3667 func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3668 // if x < 9223372036854775808.0 { 3669 // result = uintY(x) 3670 // } else { 3671 // y = x - 9223372036854775808.0 3672 // z = uintY(y) 3673 // result = z | -9223372036854775808 3674 // } 3675 twoToThe63 := cvttab.value(s, ft, 9223372036854775808.0) 3676 cmp := s.newValue2(cvttab.ltf, Types[TBOOL], x, twoToThe63) 3677 b := s.endBlock() 3678 b.Kind = ssa.BlockIf 3679 b.SetControl(cmp) 3680 b.Likely = ssa.BranchLikely 3681 3682 bThen := s.f.NewBlock(ssa.BlockPlain) 3683 bElse := s.f.NewBlock(ssa.BlockPlain) 3684 bAfter := s.f.NewBlock(ssa.BlockPlain) 3685 3686 b.AddEdgeTo(bThen) 3687 s.startBlock(bThen) 3688 a0 := s.newValue1(cvttab.cvt2U, tt, x) 3689 s.vars[n] = a0 3690 s.endBlock() 3691 bThen.AddEdgeTo(bAfter) 3692 3693 b.AddEdgeTo(bElse) 3694 s.startBlock(bElse) 3695 y := s.newValue2(cvttab.subf, ft, x, twoToThe63) 3696 y = s.newValue1(cvttab.cvt2U, tt, y) 3697 z := s.constInt64(tt, -9223372036854775808) 3698 a1 := s.newValue2(ssa.OpOr64, tt, y, z) 3699 s.vars[n] = a1 3700 s.endBlock() 3701 bElse.AddEdgeTo(bAfter) 3702 3703 s.startBlock(bAfter) 3704 return s.variable(n, n.Type) 3705 } 3706 3707 // ifaceType returns the value for the word containing the type. 3708 // n is the node for the interface expression. 3709 // v is the corresponding value. 3710 func (s *state) ifaceType(n *Node, v *ssa.Value) *ssa.Value { 3711 byteptr := Ptrto(Types[TUINT8]) // type used in runtime prototypes for runtime type (*byte) 3712 3713 if n.Type.IsEmptyInterface() { 3714 // Have *eface. The type is the first word in the struct. 3715 return s.newValue1(ssa.OpITab, byteptr, v) 3716 } 3717 3718 // Have *iface. 3719 // The first word in the struct is the *itab. 3720 // If the *itab is nil, return 0. 3721 // Otherwise, the second word in the *itab is the type. 3722 3723 tab := s.newValue1(ssa.OpITab, byteptr, v) 3724 s.vars[&typVar] = tab 3725 isnonnil := s.newValue2(ssa.OpNeqPtr, Types[TBOOL], tab, s.constNil(byteptr)) 3726 b := s.endBlock() 3727 b.Kind = ssa.BlockIf 3728 b.SetControl(isnonnil) 3729 b.Likely = ssa.BranchLikely 3730 3731 bLoad := s.f.NewBlock(ssa.BlockPlain) 3732 bEnd := s.f.NewBlock(ssa.BlockPlain) 3733 3734 b.AddEdgeTo(bLoad) 3735 b.AddEdgeTo(bEnd) 3736 bLoad.AddEdgeTo(bEnd) 3737 3738 s.startBlock(bLoad) 3739 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), tab) 3740 s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem()) 3741 s.endBlock() 3742 3743 s.startBlock(bEnd) 3744 typ := s.variable(&typVar, byteptr) 3745 delete(s.vars, &typVar) 3746 return typ 3747 } 3748 3749 // dottype generates SSA for a type assertion node. 3750 // commaok indicates whether to panic or return a bool. 3751 // If commaok is false, resok will be nil. 3752 func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { 3753 iface := s.expr(n.Left) 3754 typ := s.ifaceType(n.Left, iface) // actual concrete type 3755 target := s.expr(typename(n.Type)) // target type 3756 if !isdirectiface(n.Type) { 3757 // walk rewrites ODOTTYPE/OAS2DOTTYPE into runtime calls except for this case. 3758 Fatalf("dottype needs a direct iface type %s", n.Type) 3759 } 3760 3761 if Debug_typeassert > 0 { 3762 Warnl(n.Lineno, "type assertion inlined") 3763 } 3764 3765 // TODO: If we have a nonempty interface and its itab field is nil, 3766 // then this test is redundant and ifaceType should just branch directly to bFail. 3767 cond := s.newValue2(ssa.OpEqPtr, Types[TBOOL], typ, target) 3768 b := s.endBlock() 3769 b.Kind = ssa.BlockIf 3770 b.SetControl(cond) 3771 b.Likely = ssa.BranchLikely 3772 3773 byteptr := Ptrto(Types[TUINT8]) 3774 3775 bOk := s.f.NewBlock(ssa.BlockPlain) 3776 bFail := s.f.NewBlock(ssa.BlockPlain) 3777 b.AddEdgeTo(bOk) 3778 b.AddEdgeTo(bFail) 3779 3780 if !commaok { 3781 // on failure, panic by calling panicdottype 3782 s.startBlock(bFail) 3783 taddr := s.newValue1A(ssa.OpAddr, byteptr, &ssa.ExternSymbol{Typ: byteptr, Sym: typenamesym(n.Left.Type)}, s.sb) 3784 s.rtcall(panicdottype, false, nil, typ, target, taddr) 3785 3786 // on success, return idata field 3787 s.startBlock(bOk) 3788 return s.newValue1(ssa.OpIData, n.Type, iface), nil 3789 } 3790 3791 // commaok is the more complicated case because we have 3792 // a control flow merge point. 3793 bEnd := s.f.NewBlock(ssa.BlockPlain) 3794 3795 // type assertion succeeded 3796 s.startBlock(bOk) 3797 s.vars[&idataVar] = s.newValue1(ssa.OpIData, n.Type, iface) 3798 s.vars[&okVar] = s.constBool(true) 3799 s.endBlock() 3800 bOk.AddEdgeTo(bEnd) 3801 3802 // type assertion failed 3803 s.startBlock(bFail) 3804 s.vars[&idataVar] = s.constNil(byteptr) 3805 s.vars[&okVar] = s.constBool(false) 3806 s.endBlock() 3807 bFail.AddEdgeTo(bEnd) 3808 3809 // merge point 3810 s.startBlock(bEnd) 3811 res = s.variable(&idataVar, byteptr) 3812 resok = s.variable(&okVar, Types[TBOOL]) 3813 delete(s.vars, &idataVar) 3814 delete(s.vars, &okVar) 3815 return res, resok 3816 } 3817 3818 // checkgoto checks that a goto from from to to does not 3819 // jump into a block or jump over variable declarations. 3820 // It is a copy of checkgoto in the pre-SSA backend, 3821 // modified only for line number handling. 3822 // TODO: document how this works and why it is designed the way it is. 3823 func (s *state) checkgoto(from *Node, to *Node) { 3824 if from.Sym == to.Sym { 3825 return 3826 } 3827 3828 nf := 0 3829 for fs := from.Sym; fs != nil; fs = fs.Link { 3830 nf++ 3831 } 3832 nt := 0 3833 for fs := to.Sym; fs != nil; fs = fs.Link { 3834 nt++ 3835 } 3836 fs := from.Sym 3837 for ; nf > nt; nf-- { 3838 fs = fs.Link 3839 } 3840 if fs != to.Sym { 3841 // decide what to complain about. 3842 // prefer to complain about 'into block' over declarations, 3843 // so scan backward to find most recent block or else dcl. 3844 var block *Sym 3845 3846 var dcl *Sym 3847 ts := to.Sym 3848 for ; nt > nf; nt-- { 3849 if ts.Pkg == nil { 3850 block = ts 3851 } else { 3852 dcl = ts 3853 } 3854 ts = ts.Link 3855 } 3856 3857 for ts != fs { 3858 if ts.Pkg == nil { 3859 block = ts 3860 } else { 3861 dcl = ts 3862 } 3863 ts = ts.Link 3864 fs = fs.Link 3865 } 3866 3867 lno := from.Left.Lineno 3868 if block != nil { 3869 yyerrorl(lno, "goto %v jumps into block starting at %v", from.Left.Sym, linestr(block.Lastlineno)) 3870 } else { 3871 yyerrorl(lno, "goto %v jumps over declaration of %v at %v", from.Left.Sym, dcl, linestr(dcl.Lastlineno)) 3872 } 3873 } 3874 } 3875 3876 // variable returns the value of a variable at the current location. 3877 func (s *state) variable(name *Node, t ssa.Type) *ssa.Value { 3878 v := s.vars[name] 3879 if v == nil { 3880 v = s.newValue0A(ssa.OpFwdRef, t, name) 3881 s.fwdRefs = append(s.fwdRefs, v) 3882 s.vars[name] = v 3883 s.addNamedValue(name, v) 3884 } 3885 return v 3886 } 3887 3888 func (s *state) mem() *ssa.Value { 3889 return s.variable(&memVar, ssa.TypeMem) 3890 } 3891 3892 func (s *state) linkForwardReferences(dm *sparseDefState) { 3893 3894 // Build SSA graph. Each variable on its first use in a basic block 3895 // leaves a FwdRef in that block representing the incoming value 3896 // of that variable. This function links that ref up with possible definitions, 3897 // inserting Phi values as needed. This is essentially the algorithm 3898 // described by Braun, Buchwald, Hack, Leißa, Mallon, and Zwinkau: 3899 // http://pp.info.uni-karlsruhe.de/uploads/publikationen/braun13cc.pdf 3900 // Differences: 3901 // - We use FwdRef nodes to postpone phi building until the CFG is 3902 // completely built. That way we can avoid the notion of "sealed" 3903 // blocks. 3904 // - Phi optimization is a separate pass (in ../ssa/phielim.go). 3905 for len(s.fwdRefs) > 0 { 3906 v := s.fwdRefs[len(s.fwdRefs)-1] 3907 s.fwdRefs = s.fwdRefs[:len(s.fwdRefs)-1] 3908 s.resolveFwdRef(v, dm) 3909 } 3910 } 3911 3912 // resolveFwdRef modifies v to be the variable's value at the start of its block. 3913 // v must be a FwdRef op. 3914 func (s *state) resolveFwdRef(v *ssa.Value, dm *sparseDefState) { 3915 b := v.Block 3916 name := v.Aux.(*Node) 3917 v.Aux = nil 3918 if b == s.f.Entry { 3919 // Live variable at start of function. 3920 if s.canSSA(name) { 3921 if strings.HasPrefix(name.Sym.Name, "autotmp_") { 3922 // It's likely that this is an uninitialized variable in the entry block. 3923 s.Fatalf("Treating auto as if it were arg, func %s, node %v, value %v", b.Func.Name, name, v) 3924 } 3925 v.Op = ssa.OpArg 3926 v.Aux = name 3927 return 3928 } 3929 // Not SSAable. Load it. 3930 addr := s.decladdrs[name] 3931 if addr == nil { 3932 // TODO: closure args reach here. 3933 s.Unimplementedf("unhandled closure arg %s at entry to function %s", name, b.Func.Name) 3934 } 3935 if _, ok := addr.Aux.(*ssa.ArgSymbol); !ok { 3936 s.Fatalf("variable live at start of function %s is not an argument %s", b.Func.Name, name) 3937 } 3938 v.Op = ssa.OpLoad 3939 v.AddArgs(addr, s.startmem) 3940 return 3941 } 3942 if len(b.Preds) == 0 { 3943 // This block is dead; we have no predecessors and we're not the entry block. 3944 // It doesn't matter what we use here as long as it is well-formed. 3945 v.Op = ssa.OpUnknown 3946 return 3947 } 3948 // Find variable value on each predecessor. 3949 var argstore [4]*ssa.Value 3950 args := argstore[:0] 3951 for _, e := range b.Preds { 3952 p := e.Block() 3953 p = dm.FindBetterDefiningBlock(name, p) // try sparse improvement on p 3954 args = append(args, s.lookupVarOutgoing(p, v.Type, name, v.Line)) 3955 } 3956 3957 // Decide if we need a phi or not. We need a phi if there 3958 // are two different args (which are both not v). 3959 var w *ssa.Value 3960 for _, a := range args { 3961 if a == v { 3962 continue // self-reference 3963 } 3964 if a == w { 3965 continue // already have this witness 3966 } 3967 if w != nil { 3968 // two witnesses, need a phi value 3969 v.Op = ssa.OpPhi 3970 v.AddArgs(args...) 3971 return 3972 } 3973 w = a // save witness 3974 } 3975 if w == nil { 3976 s.Fatalf("no witness for reachable phi %s", v) 3977 } 3978 // One witness. Make v a copy of w. 3979 v.Op = ssa.OpCopy 3980 v.AddArg(w) 3981 } 3982 3983 // lookupVarOutgoing finds the variable's value at the end of block b. 3984 func (s *state) lookupVarOutgoing(b *ssa.Block, t ssa.Type, name *Node, line int32) *ssa.Value { 3985 for { 3986 if v, ok := s.defvars[b.ID][name]; ok { 3987 return v 3988 } 3989 // The variable is not defined by b and we haven't looked it up yet. 3990 // If b has exactly one predecessor, loop to look it up there. 3991 // Otherwise, give up and insert a new FwdRef and resolve it later. 3992 if len(b.Preds) != 1 { 3993 break 3994 } 3995 b = b.Preds[0].Block() 3996 } 3997 // Generate a FwdRef for the variable and return that. 3998 v := b.NewValue0A(line, ssa.OpFwdRef, t, name) 3999 s.fwdRefs = append(s.fwdRefs, v) 4000 s.defvars[b.ID][name] = v 4001 s.addNamedValue(name, v) 4002 return v 4003 } 4004 4005 func (s *state) addNamedValue(n *Node, v *ssa.Value) { 4006 if n.Class == Pxxx { 4007 // Don't track our dummy nodes (&memVar etc.). 4008 return 4009 } 4010 if strings.HasPrefix(n.Sym.Name, "autotmp_") { 4011 // Don't track autotmp_ variables. 4012 return 4013 } 4014 if n.Class == PPARAMOUT { 4015 // Don't track named output values. This prevents return values 4016 // from being assigned too early. See #14591 and #14762. TODO: allow this. 4017 return 4018 } 4019 if n.Class == PAUTO && n.Xoffset != 0 { 4020 s.Fatalf("AUTO var with offset %s %d", n, n.Xoffset) 4021 } 4022 loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0} 4023 values, ok := s.f.NamedValues[loc] 4024 if !ok { 4025 s.f.Names = append(s.f.Names, loc) 4026 } 4027 s.f.NamedValues[loc] = append(values, v) 4028 } 4029 4030 // Branch is an unresolved branch. 4031 type Branch struct { 4032 P *obj.Prog // branch instruction 4033 B *ssa.Block // target 4034 } 4035 4036 // SSAGenState contains state needed during Prog generation. 4037 type SSAGenState struct { 4038 // Branches remembers all the branch instructions we've seen 4039 // and where they would like to go. 4040 Branches []Branch 4041 4042 // bstart remembers where each block starts (indexed by block ID) 4043 bstart []*obj.Prog 4044 4045 // 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?) 4046 SSEto387 map[int16]int16 4047 // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8. 4048 ScratchFpMem *Node 4049 } 4050 4051 // Pc returns the current Prog. 4052 func (s *SSAGenState) Pc() *obj.Prog { 4053 return Pc 4054 } 4055 4056 // SetLineno sets the current source line number. 4057 func (s *SSAGenState) SetLineno(l int32) { 4058 lineno = l 4059 } 4060 4061 // genssa appends entries to ptxt for each instruction in f. 4062 // gcargs and gclocals are filled in with pointer maps for the frame. 4063 func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) { 4064 var s SSAGenState 4065 4066 e := f.Config.Frontend().(*ssaExport) 4067 // We're about to emit a bunch of Progs. 4068 // Since the only way to get here is to explicitly request it, 4069 // just fail on unimplemented instead of trying to unwind our mess. 4070 e.mustImplement = true 4071 4072 // Remember where each block starts. 4073 s.bstart = make([]*obj.Prog, f.NumBlocks()) 4074 4075 var valueProgs map[*obj.Prog]*ssa.Value 4076 var blockProgs map[*obj.Prog]*ssa.Block 4077 var logProgs = e.log 4078 if logProgs { 4079 valueProgs = make(map[*obj.Prog]*ssa.Value, f.NumValues()) 4080 blockProgs = make(map[*obj.Prog]*ssa.Block, f.NumBlocks()) 4081 f.Logf("genssa %s\n", f.Name) 4082 blockProgs[Pc] = f.Blocks[0] 4083 } 4084 4085 if Thearch.Use387 { 4086 s.SSEto387 = map[int16]int16{} 4087 } 4088 if f.Config.NeedsFpScratch { 4089 s.ScratchFpMem = temp(Types[TUINT64]) 4090 } 4091 4092 // Emit basic blocks 4093 for i, b := range f.Blocks { 4094 s.bstart[b.ID] = Pc 4095 // Emit values in block 4096 Thearch.SSAMarkMoves(&s, b) 4097 for _, v := range b.Values { 4098 x := Pc 4099 Thearch.SSAGenValue(&s, v) 4100 if logProgs { 4101 for ; x != Pc; x = x.Link { 4102 valueProgs[x] = v 4103 } 4104 } 4105 } 4106 // Emit control flow instructions for block 4107 var next *ssa.Block 4108 if i < len(f.Blocks)-1 && (Debug['N'] == 0 || b.Kind == ssa.BlockCall) { 4109 // If -N, leave next==nil so every block with successors 4110 // ends in a JMP (except call blocks - plive doesn't like 4111 // select{send,recv} followed by a JMP call). Helps keep 4112 // line numbers for otherwise empty blocks. 4113 next = f.Blocks[i+1] 4114 } 4115 x := Pc 4116 Thearch.SSAGenBlock(&s, b, next) 4117 if logProgs { 4118 for ; x != Pc; x = x.Link { 4119 blockProgs[x] = b 4120 } 4121 } 4122 } 4123 4124 // Resolve branches 4125 for _, br := range s.Branches { 4126 br.P.To.Val = s.bstart[br.B.ID] 4127 } 4128 4129 if logProgs { 4130 for p := ptxt; p != nil; p = p.Link { 4131 var s string 4132 if v, ok := valueProgs[p]; ok { 4133 s = v.String() 4134 } else if b, ok := blockProgs[p]; ok { 4135 s = b.String() 4136 } else { 4137 s = " " // most value and branch strings are 2-3 characters long 4138 } 4139 f.Logf("%s\t%s\n", s, p) 4140 } 4141 if f.Config.HTML != nil { 4142 saved := ptxt.Ctxt.LineHist.PrintFilenameOnly 4143 ptxt.Ctxt.LineHist.PrintFilenameOnly = true 4144 var buf bytes.Buffer 4145 buf.WriteString("<code>") 4146 buf.WriteString("<dl class=\"ssa-gen\">") 4147 for p := ptxt; p != nil; p = p.Link { 4148 buf.WriteString("<dt class=\"ssa-prog-src\">") 4149 if v, ok := valueProgs[p]; ok { 4150 buf.WriteString(v.HTML()) 4151 } else if b, ok := blockProgs[p]; ok { 4152 buf.WriteString(b.HTML()) 4153 } 4154 buf.WriteString("</dt>") 4155 buf.WriteString("<dd class=\"ssa-prog\">") 4156 buf.WriteString(html.EscapeString(p.String())) 4157 buf.WriteString("</dd>") 4158 buf.WriteString("</li>") 4159 } 4160 buf.WriteString("</dl>") 4161 buf.WriteString("</code>") 4162 f.Config.HTML.WriteColumn("genssa", buf.String()) 4163 ptxt.Ctxt.LineHist.PrintFilenameOnly = saved 4164 } 4165 } 4166 4167 // Emit static data 4168 if f.StaticData != nil { 4169 for _, n := range f.StaticData.([]*Node) { 4170 if !gen_as_init(n, false) { 4171 Fatalf("non-static data marked as static: %v\n\n", n) 4172 } 4173 } 4174 } 4175 4176 // Allocate stack frame 4177 allocauto(ptxt) 4178 4179 // Generate gc bitmaps. 4180 liveness(Curfn, ptxt, gcargs, gclocals) 4181 4182 // Add frame prologue. Zero ambiguously live variables. 4183 Thearch.Defframe(ptxt) 4184 if Debug['f'] != 0 { 4185 frame(0) 4186 } 4187 4188 // Remove leftover instrumentation from the instruction stream. 4189 removevardef(ptxt) 4190 4191 f.Config.HTML.Close() 4192 } 4193 4194 type FloatingEQNEJump struct { 4195 Jump obj.As 4196 Index int 4197 } 4198 4199 func oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPrediction, branches []Branch) []Branch { 4200 p := Prog(jumps.Jump) 4201 p.To.Type = obj.TYPE_BRANCH 4202 to := jumps.Index 4203 branches = append(branches, Branch{p, b.Succs[to].Block()}) 4204 if to == 1 { 4205 likely = -likely 4206 } 4207 // liblink reorders the instruction stream as it sees fit. 4208 // Pass along what we know so liblink can make use of it. 4209 // TODO: Once we've fully switched to SSA, 4210 // make liblink leave our output alone. 4211 switch likely { 4212 case ssa.BranchUnlikely: 4213 p.From.Type = obj.TYPE_CONST 4214 p.From.Offset = 0 4215 case ssa.BranchLikely: 4216 p.From.Type = obj.TYPE_CONST 4217 p.From.Offset = 1 4218 } 4219 return branches 4220 } 4221 4222 func SSAGenFPJump(s *SSAGenState, b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) { 4223 likely := b.Likely 4224 switch next { 4225 case b.Succs[0].Block(): 4226 s.Branches = oneFPJump(b, &jumps[0][0], likely, s.Branches) 4227 s.Branches = oneFPJump(b, &jumps[0][1], likely, s.Branches) 4228 case b.Succs[1].Block(): 4229 s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches) 4230 s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches) 4231 default: 4232 s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches) 4233 s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches) 4234 q := Prog(obj.AJMP) 4235 q.To.Type = obj.TYPE_BRANCH 4236 s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()}) 4237 } 4238 } 4239 4240 func AuxOffset(v *ssa.Value) (offset int64) { 4241 if v.Aux == nil { 4242 return 0 4243 } 4244 switch sym := v.Aux.(type) { 4245 4246 case *ssa.AutoSymbol: 4247 n := sym.Node.(*Node) 4248 return n.Xoffset 4249 } 4250 return 0 4251 } 4252 4253 // AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a. 4254 func AddAux(a *obj.Addr, v *ssa.Value) { 4255 AddAux2(a, v, v.AuxInt) 4256 } 4257 func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { 4258 if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR { 4259 v.Fatalf("bad AddAux addr %v", a) 4260 } 4261 // add integer offset 4262 a.Offset += offset 4263 4264 // If no additional symbol offset, we're done. 4265 if v.Aux == nil { 4266 return 4267 } 4268 // Add symbol's offset from its base register. 4269 switch sym := v.Aux.(type) { 4270 case *ssa.ExternSymbol: 4271 a.Name = obj.NAME_EXTERN 4272 switch s := sym.Sym.(type) { 4273 case *Sym: 4274 a.Sym = Linksym(s) 4275 case *obj.LSym: 4276 a.Sym = s 4277 default: 4278 v.Fatalf("ExternSymbol.Sym is %T", s) 4279 } 4280 case *ssa.ArgSymbol: 4281 n := sym.Node.(*Node) 4282 a.Name = obj.NAME_PARAM 4283 a.Node = n 4284 a.Sym = Linksym(n.Orig.Sym) 4285 a.Offset += n.Xoffset // TODO: why do I have to add this here? I don't for auto variables. 4286 case *ssa.AutoSymbol: 4287 n := sym.Node.(*Node) 4288 a.Name = obj.NAME_AUTO 4289 a.Node = n 4290 a.Sym = Linksym(n.Sym) 4291 default: 4292 v.Fatalf("aux in %s not implemented %#v", v, v.Aux) 4293 } 4294 } 4295 4296 // SizeAlignAuxInt returns an AuxInt encoding the size and alignment of type t. 4297 func SizeAlignAuxInt(t *Type) int64 { 4298 return ssa.MakeSizeAndAlign(t.Size(), t.Alignment()).Int64() 4299 } 4300 4301 // extendIndex extends v to a full int width. 4302 // panic using the given function if v does not fit in an int (only on 32-bit archs). 4303 func (s *state) extendIndex(v *ssa.Value, panicfn *Node) *ssa.Value { 4304 size := v.Type.Size() 4305 if size == s.config.IntSize { 4306 return v 4307 } 4308 if size > s.config.IntSize { 4309 // truncate 64-bit indexes on 32-bit pointer archs. Test the 4310 // high word and branch to out-of-bounds failure if it is not 0. 4311 if Debug['B'] == 0 { 4312 hi := s.newValue1(ssa.OpInt64Hi, Types[TUINT32], v) 4313 cmp := s.newValue2(ssa.OpEq32, Types[TBOOL], hi, s.constInt32(Types[TUINT32], 0)) 4314 s.check(cmp, panicfn) 4315 } 4316 return s.newValue1(ssa.OpTrunc64to32, Types[TINT], v) 4317 } 4318 4319 // Extend value to the required size 4320 var op ssa.Op 4321 if v.Type.IsSigned() { 4322 switch 10*size + s.config.IntSize { 4323 case 14: 4324 op = ssa.OpSignExt8to32 4325 case 18: 4326 op = ssa.OpSignExt8to64 4327 case 24: 4328 op = ssa.OpSignExt16to32 4329 case 28: 4330 op = ssa.OpSignExt16to64 4331 case 48: 4332 op = ssa.OpSignExt32to64 4333 default: 4334 s.Fatalf("bad signed index extension %s", v.Type) 4335 } 4336 } else { 4337 switch 10*size + s.config.IntSize { 4338 case 14: 4339 op = ssa.OpZeroExt8to32 4340 case 18: 4341 op = ssa.OpZeroExt8to64 4342 case 24: 4343 op = ssa.OpZeroExt16to32 4344 case 28: 4345 op = ssa.OpZeroExt16to64 4346 case 48: 4347 op = ssa.OpZeroExt32to64 4348 default: 4349 s.Fatalf("bad unsigned index extension %s", v.Type) 4350 } 4351 } 4352 return s.newValue1(op, Types[TINT], v) 4353 } 4354 4355 // SSAReg returns the register to which v has been allocated. 4356 func SSAReg(v *ssa.Value) *ssa.Register { 4357 reg := v.Block.Func.RegAlloc[v.ID] 4358 if reg == nil { 4359 v.Fatalf("nil register for value: %s\n%s\n", v.LongString(), v.Block.Func) 4360 } 4361 return reg.(*ssa.Register) 4362 } 4363 4364 // SSAReg0 returns the register to which the first output of v has been allocated. 4365 func SSAReg0(v *ssa.Value) *ssa.Register { 4366 reg := v.Block.Func.RegAlloc[v.ID].(ssa.LocPair)[0] 4367 if reg == nil { 4368 v.Fatalf("nil first register for value: %s\n%s\n", v.LongString(), v.Block.Func) 4369 } 4370 return reg.(*ssa.Register) 4371 } 4372 4373 // SSAReg1 returns the register to which the second output of v has been allocated. 4374 func SSAReg1(v *ssa.Value) *ssa.Register { 4375 reg := v.Block.Func.RegAlloc[v.ID].(ssa.LocPair)[1] 4376 if reg == nil { 4377 v.Fatalf("nil second register for value: %s\n%s\n", v.LongString(), v.Block.Func) 4378 } 4379 return reg.(*ssa.Register) 4380 } 4381 4382 // SSARegNum returns the register number (in cmd/internal/obj numbering) to which v has been allocated. 4383 func SSARegNum(v *ssa.Value) int16 { 4384 return Thearch.SSARegToReg[SSAReg(v).Num] 4385 } 4386 4387 // SSARegNum0 returns the register number (in cmd/internal/obj numbering) to which the first output of v has been allocated. 4388 func SSARegNum0(v *ssa.Value) int16 { 4389 return Thearch.SSARegToReg[SSAReg0(v).Num] 4390 } 4391 4392 // SSARegNum1 returns the register number (in cmd/internal/obj numbering) to which the second output of v has been allocated. 4393 func SSARegNum1(v *ssa.Value) int16 { 4394 return Thearch.SSARegToReg[SSAReg1(v).Num] 4395 } 4396 4397 // CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values. 4398 // Called during ssaGenValue. 4399 func CheckLoweredPhi(v *ssa.Value) { 4400 if v.Op != ssa.OpPhi { 4401 v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString()) 4402 } 4403 if v.Type.IsMemory() { 4404 return 4405 } 4406 f := v.Block.Func 4407 loc := f.RegAlloc[v.ID] 4408 for _, a := range v.Args { 4409 if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead? 4410 v.Fatalf("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n%s\n", v, loc, a, aloc, v.Block.Func) 4411 } 4412 } 4413 } 4414 4415 // CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block. 4416 // The output of LoweredGetClosurePtr is generally hardwired to the correct register. 4417 // That register contains the closure pointer on closure entry. 4418 func CheckLoweredGetClosurePtr(v *ssa.Value) { 4419 entry := v.Block.Func.Entry 4420 if entry != v.Block || entry.Values[0] != v { 4421 Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v) 4422 } 4423 } 4424 4425 // AutoVar returns a *Node and int64 representing the auto variable and offset within it 4426 // where v should be spilled. 4427 func AutoVar(v *ssa.Value) (*Node, int64) { 4428 loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot) 4429 if v.Type.Size() > loc.Type.Size() { 4430 v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) 4431 } 4432 return loc.N.(*Node), loc.Off 4433 } 4434 4435 // fieldIdx finds the index of the field referred to by the ODOT node n. 4436 func fieldIdx(n *Node) int { 4437 t := n.Left.Type 4438 f := n.Sym 4439 if !t.IsStruct() { 4440 panic("ODOT's LHS is not a struct") 4441 } 4442 4443 var i int 4444 for _, t1 := range t.Fields().Slice() { 4445 if t1.Sym != f { 4446 i++ 4447 continue 4448 } 4449 if t1.Offset != n.Xoffset { 4450 panic("field offset doesn't match") 4451 } 4452 return i 4453 } 4454 panic(fmt.Sprintf("can't find field in expr %s\n", n)) 4455 4456 // TODO: keep the result of this function somewhere in the ODOT Node 4457 // so we don't have to recompute it each time we need it. 4458 } 4459 4460 // ssaExport exports a bunch of compiler services for the ssa backend. 4461 type ssaExport struct { 4462 log bool 4463 unimplemented bool 4464 mustImplement bool 4465 } 4466 4467 func (s *ssaExport) TypeBool() ssa.Type { return Types[TBOOL] } 4468 func (s *ssaExport) TypeInt8() ssa.Type { return Types[TINT8] } 4469 func (s *ssaExport) TypeInt16() ssa.Type { return Types[TINT16] } 4470 func (s *ssaExport) TypeInt32() ssa.Type { return Types[TINT32] } 4471 func (s *ssaExport) TypeInt64() ssa.Type { return Types[TINT64] } 4472 func (s *ssaExport) TypeUInt8() ssa.Type { return Types[TUINT8] } 4473 func (s *ssaExport) TypeUInt16() ssa.Type { return Types[TUINT16] } 4474 func (s *ssaExport) TypeUInt32() ssa.Type { return Types[TUINT32] } 4475 func (s *ssaExport) TypeUInt64() ssa.Type { return Types[TUINT64] } 4476 func (s *ssaExport) TypeFloat32() ssa.Type { return Types[TFLOAT32] } 4477 func (s *ssaExport) TypeFloat64() ssa.Type { return Types[TFLOAT64] } 4478 func (s *ssaExport) TypeInt() ssa.Type { return Types[TINT] } 4479 func (s *ssaExport) TypeUintptr() ssa.Type { return Types[TUINTPTR] } 4480 func (s *ssaExport) TypeString() ssa.Type { return Types[TSTRING] } 4481 func (s *ssaExport) TypeBytePtr() ssa.Type { return Ptrto(Types[TUINT8]) } 4482 4483 // StringData returns a symbol (a *Sym wrapped in an interface) which 4484 // is the data component of a global string constant containing s. 4485 func (*ssaExport) StringData(s string) interface{} { 4486 // TODO: is idealstring correct? It might not matter... 4487 _, data := stringsym(s) 4488 return &ssa.ExternSymbol{Typ: idealstring, Sym: data} 4489 } 4490 4491 func (e *ssaExport) Auto(t ssa.Type) ssa.GCNode { 4492 n := temp(t.(*Type)) // Note: adds new auto to Curfn.Func.Dcl list 4493 e.mustImplement = true // This modifies the input to SSA, so we want to make sure we succeed from here! 4494 return n 4495 } 4496 4497 func (e *ssaExport) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4498 n := name.N.(*Node) 4499 ptrType := Ptrto(Types[TUINT8]) 4500 lenType := Types[TINT] 4501 if n.Class == PAUTO && !n.Addrtaken { 4502 // Split this string up into two separate variables. 4503 p := e.namedAuto(n.Sym.Name+".ptr", ptrType) 4504 l := e.namedAuto(n.Sym.Name+".len", lenType) 4505 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0} 4506 } 4507 // Return the two parts of the larger variable. 4508 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)} 4509 } 4510 4511 func (e *ssaExport) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4512 n := name.N.(*Node) 4513 t := Ptrto(Types[TUINT8]) 4514 if n.Class == PAUTO && !n.Addrtaken { 4515 // Split this interface up into two separate variables. 4516 f := ".itab" 4517 if n.Type.IsEmptyInterface() { 4518 f = ".type" 4519 } 4520 c := e.namedAuto(n.Sym.Name+f, t) 4521 d := e.namedAuto(n.Sym.Name+".data", t) 4522 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0} 4523 } 4524 // Return the two parts of the larger variable. 4525 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)} 4526 } 4527 4528 func (e *ssaExport) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) { 4529 n := name.N.(*Node) 4530 ptrType := Ptrto(name.Type.ElemType().(*Type)) 4531 lenType := Types[TINT] 4532 if n.Class == PAUTO && !n.Addrtaken { 4533 // Split this slice up into three separate variables. 4534 p := e.namedAuto(n.Sym.Name+".ptr", ptrType) 4535 l := e.namedAuto(n.Sym.Name+".len", lenType) 4536 c := e.namedAuto(n.Sym.Name+".cap", lenType) 4537 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}, ssa.LocalSlot{N: c, Type: lenType, Off: 0} 4538 } 4539 // Return the three parts of the larger variable. 4540 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, 4541 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}, 4542 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)} 4543 } 4544 4545 func (e *ssaExport) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4546 n := name.N.(*Node) 4547 s := name.Type.Size() / 2 4548 var t *Type 4549 if s == 8 { 4550 t = Types[TFLOAT64] 4551 } else { 4552 t = Types[TFLOAT32] 4553 } 4554 if n.Class == PAUTO && !n.Addrtaken { 4555 // Split this complex up into two separate variables. 4556 c := e.namedAuto(n.Sym.Name+".real", t) 4557 d := e.namedAuto(n.Sym.Name+".imag", t) 4558 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0} 4559 } 4560 // Return the two parts of the larger variable. 4561 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s} 4562 } 4563 4564 func (e *ssaExport) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4565 n := name.N.(*Node) 4566 var t *Type 4567 if name.Type.IsSigned() { 4568 t = Types[TINT32] 4569 } else { 4570 t = Types[TUINT32] 4571 } 4572 if n.Class == PAUTO && !n.Addrtaken { 4573 // Split this int64 up into two separate variables. 4574 h := e.namedAuto(n.Sym.Name+".hi", t) 4575 l := e.namedAuto(n.Sym.Name+".lo", Types[TUINT32]) 4576 return ssa.LocalSlot{N: h, Type: t, Off: 0}, ssa.LocalSlot{N: l, Type: Types[TUINT32], Off: 0} 4577 } 4578 // Return the two parts of the larger variable. 4579 // Assuming little endian (we don't support big endian 32-bit architecture yet) 4580 return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: Types[TUINT32], Off: name.Off} 4581 } 4582 4583 func (e *ssaExport) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot { 4584 n := name.N.(*Node) 4585 st := name.Type 4586 ft := st.FieldType(i) 4587 if n.Class == PAUTO && !n.Addrtaken { 4588 // Note: the _ field may appear several times. But 4589 // have no fear, identically-named but distinct Autos are 4590 // ok, albeit maybe confusing for a debugger. 4591 x := e.namedAuto(n.Sym.Name+"."+st.FieldName(i), ft) 4592 return ssa.LocalSlot{N: x, Type: ft, Off: 0} 4593 } 4594 return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)} 4595 } 4596 4597 // namedAuto returns a new AUTO variable with the given name and type. 4598 func (e *ssaExport) namedAuto(name string, typ ssa.Type) ssa.GCNode { 4599 t := typ.(*Type) 4600 s := &Sym{Name: name, Pkg: localpkg} 4601 n := Nod(ONAME, nil, nil) 4602 s.Def = n 4603 s.Def.Used = true 4604 n.Sym = s 4605 n.Type = t 4606 n.Class = PAUTO 4607 n.Addable = true 4608 n.Ullman = 1 4609 n.Esc = EscNever 4610 n.Xoffset = 0 4611 n.Name.Curfn = Curfn 4612 Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) 4613 4614 dowidth(t) 4615 e.mustImplement = true 4616 4617 return n 4618 } 4619 4620 func (e *ssaExport) CanSSA(t ssa.Type) bool { 4621 return canSSAType(t.(*Type)) 4622 } 4623 4624 func (e *ssaExport) Line(line int32) string { 4625 return linestr(line) 4626 } 4627 4628 // Log logs a message from the compiler. 4629 func (e *ssaExport) Logf(msg string, args ...interface{}) { 4630 // If e was marked as unimplemented, anything could happen. Ignore. 4631 if e.log && !e.unimplemented { 4632 fmt.Printf(msg, args...) 4633 } 4634 } 4635 4636 func (e *ssaExport) Log() bool { 4637 return e.log 4638 } 4639 4640 // Fatal reports a compiler error and exits. 4641 func (e *ssaExport) Fatalf(line int32, msg string, args ...interface{}) { 4642 // If e was marked as unimplemented, anything could happen. Ignore. 4643 if !e.unimplemented { 4644 lineno = line 4645 Fatalf(msg, args...) 4646 } 4647 } 4648 4649 // Unimplemented reports that the function cannot be compiled. 4650 // It will be removed once SSA work is complete. 4651 func (e *ssaExport) Unimplementedf(line int32, msg string, args ...interface{}) { 4652 if e.mustImplement { 4653 lineno = line 4654 Fatalf(msg, args...) 4655 } 4656 const alwaysLog = false // enable to calculate top unimplemented features 4657 if !e.unimplemented && (e.log || alwaysLog) { 4658 // first implementation failure, print explanation 4659 fmt.Printf("SSA unimplemented: "+msg+"\n", args...) 4660 } 4661 e.unimplemented = true 4662 } 4663 4664 // Warnl reports a "warning", which is usually flag-triggered 4665 // logging output for the benefit of tests. 4666 func (e *ssaExport) Warnl(line int32, fmt_ string, args ...interface{}) { 4667 Warnl(line, fmt_, args...) 4668 } 4669 4670 func (e *ssaExport) Debug_checknil() bool { 4671 return Debug_checknil != 0 4672 } 4673 4674 func (n *Node) Typ() ssa.Type { 4675 return n.Type 4676 }