github.com/letsencrypt/go@v0.0.0-20160714163537-4054769a31f6/src/cmd/compile/internal/gc/ssa.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "bytes" 9 "fmt" 10 "html" 11 "os" 12 "strings" 13 14 "cmd/compile/internal/ssa" 15 "cmd/internal/obj" 16 "cmd/internal/sys" 17 ) 18 19 var ssaEnabled = true 20 21 var ssaConfig *ssa.Config 22 var ssaExp ssaExport 23 24 func initssa() *ssa.Config { 25 ssaExp.unimplemented = false 26 ssaExp.mustImplement = true 27 if ssaConfig == nil { 28 ssaConfig = ssa.NewConfig(Thearch.LinkArch.Name, &ssaExp, Ctxt, Debug['N'] == 0) 29 } 30 return ssaConfig 31 } 32 33 func shouldssa(fn *Node) bool { 34 switch Thearch.LinkArch.Name { 35 default: 36 // Only available for testing. 37 if os.Getenv("SSATEST") == "" { 38 return false 39 } 40 // Generally available. 41 case "amd64": 42 } 43 if !ssaEnabled { 44 return false 45 } 46 47 // Environment variable control of SSA CG 48 // 1. IF GOSSAFUNC == current function name THEN 49 // compile this function with SSA and log output to ssa.html 50 51 // 2. IF GOSSAHASH == "" THEN 52 // compile this function (and everything else) with SSA 53 54 // 3. IF GOSSAHASH == "n" or "N" 55 // IF GOSSAPKG == current package name THEN 56 // compile this function (and everything in this package) with SSA 57 // ELSE 58 // use the old back end for this function. 59 // This is for compatibility with existing test harness and should go away. 60 61 // 4. IF GOSSAHASH is a suffix of the binary-rendered SHA1 hash of the function name THEN 62 // compile this function with SSA 63 // ELSE 64 // compile this function with the old back end. 65 66 // Plan is for 3 to be removed when the tests are revised. 67 // SSA is now default, and is disabled by setting 68 // GOSSAHASH to n or N, or selectively with strings of 69 // 0 and 1. 70 71 name := fn.Func.Nname.Sym.Name 72 73 funcname := os.Getenv("GOSSAFUNC") 74 if funcname != "" { 75 // If GOSSAFUNC is set, compile only that function. 76 return name == funcname 77 } 78 79 pkg := os.Getenv("GOSSAPKG") 80 if pkg != "" { 81 // If GOSSAPKG is set, compile only that package. 82 return localpkg.Name == pkg 83 } 84 85 return initssa().DebugHashMatch("GOSSAHASH", name) 86 } 87 88 // buildssa builds an SSA function. 89 func buildssa(fn *Node) *ssa.Func { 90 name := fn.Func.Nname.Sym.Name 91 printssa := name == os.Getenv("GOSSAFUNC") 92 if printssa { 93 fmt.Println("generating SSA for", name) 94 dumplist("buildssa-enter", fn.Func.Enter) 95 dumplist("buildssa-body", fn.Nbody) 96 dumplist("buildssa-exit", fn.Func.Exit) 97 } 98 99 var s state 100 s.pushLine(fn.Lineno) 101 defer s.popLine() 102 103 if fn.Func.Pragma&CgoUnsafeArgs != 0 { 104 s.cgoUnsafeArgs = true 105 } 106 if fn.Func.Pragma&Nowritebarrier != 0 { 107 s.noWB = true 108 } 109 defer func() { 110 if s.WBLineno != 0 { 111 fn.Func.WBLineno = s.WBLineno 112 } 113 }() 114 // TODO(khr): build config just once at the start of the compiler binary 115 116 ssaExp.log = printssa 117 118 s.config = initssa() 119 s.f = s.config.NewFunc() 120 s.f.Name = name 121 s.exitCode = fn.Func.Exit 122 s.panics = map[funcLine]*ssa.Block{} 123 124 if name == os.Getenv("GOSSAFUNC") { 125 // TODO: tempfile? it is handy to have the location 126 // of this file be stable, so you can just reload in the browser. 127 s.config.HTML = ssa.NewHTMLWriter("ssa.html", s.config, name) 128 // TODO: generate and print a mapping from nodes to values and blocks 129 } 130 defer func() { 131 if !printssa { 132 s.config.HTML.Close() 133 } 134 }() 135 136 // Allocate starting block 137 s.f.Entry = s.f.NewBlock(ssa.BlockPlain) 138 139 // Allocate starting values 140 s.labels = map[string]*ssaLabel{} 141 s.labeledNodes = map[*Node]*ssaLabel{} 142 s.startmem = s.entryNewValue0(ssa.OpInitMem, ssa.TypeMem) 143 s.sp = s.entryNewValue0(ssa.OpSP, Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead 144 s.sb = s.entryNewValue0(ssa.OpSB, Types[TUINTPTR]) 145 146 s.startBlock(s.f.Entry) 147 s.vars[&memVar] = s.startmem 148 149 s.varsyms = map[*Node]interface{}{} 150 151 // Generate addresses of local declarations 152 s.decladdrs = map[*Node]*ssa.Value{} 153 for _, n := range fn.Func.Dcl { 154 switch n.Class { 155 case PPARAM, PPARAMOUT: 156 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n}) 157 s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sp) 158 if n.Class == PPARAMOUT && s.canSSA(n) { 159 // Save ssa-able PPARAMOUT variables so we can 160 // store them back to the stack at the end of 161 // the function. 162 s.returns = append(s.returns, n) 163 } 164 if n.Class == PPARAM && s.canSSA(n) && n.Type.IsPtrShaped() { 165 s.ptrargs = append(s.ptrargs, n) 166 n.SetNotLiveAtEnd(true) // SSA takes care of this explicitly 167 } 168 case PAUTO: 169 // processed at each use, to prevent Addr coming 170 // before the decl. 171 case PAUTOHEAP: 172 // moved to heap - already handled by frontend 173 case PFUNC: 174 // local function - already handled by frontend 175 default: 176 s.Unimplementedf("local variable with class %s unimplemented", classnames[n.Class]) 177 } 178 } 179 180 // Convert the AST-based IR to the SSA-based IR 181 s.stmts(fn.Func.Enter) 182 s.stmts(fn.Nbody) 183 184 // fallthrough to exit 185 if s.curBlock != nil { 186 s.pushLine(fn.Func.Endlineno) 187 s.exit() 188 s.popLine() 189 } 190 191 // Check that we used all labels 192 for name, lab := range s.labels { 193 if !lab.used() && !lab.reported { 194 yyerrorl(lab.defNode.Lineno, "label %v defined and not used", name) 195 lab.reported = true 196 } 197 if lab.used() && !lab.defined() && !lab.reported { 198 yyerrorl(lab.useNode.Lineno, "label %v not defined", name) 199 lab.reported = true 200 } 201 } 202 203 // Check any forward gotos. Non-forward gotos have already been checked. 204 for _, n := range s.fwdGotos { 205 lab := s.labels[n.Left.Sym.Name] 206 // If the label is undefined, we have already have printed an error. 207 if lab.defined() { 208 s.checkgoto(n, lab.defNode) 209 } 210 } 211 212 if nerrors > 0 { 213 s.f.Free() 214 return nil 215 } 216 217 prelinkNumvars := s.f.NumValues() 218 sparseDefState := s.locatePotentialPhiFunctions(fn) 219 220 // Link up variable uses to variable definitions 221 s.linkForwardReferences(sparseDefState) 222 223 if ssa.BuildStats > 0 { 224 s.f.LogStat("build", s.f.NumBlocks(), "blocks", prelinkNumvars, "vars_before", 225 s.f.NumValues(), "vars_after", prelinkNumvars*s.f.NumBlocks(), "ssa_phi_loc_cutoff_score") 226 } 227 228 // Don't carry reference this around longer than necessary 229 s.exitCode = Nodes{} 230 231 // Main call to ssa package to compile function 232 ssa.Compile(s.f) 233 234 return s.f 235 } 236 237 type state struct { 238 // configuration (arch) information 239 config *ssa.Config 240 241 // function we're building 242 f *ssa.Func 243 244 // labels and labeled control flow nodes (OFOR, OSWITCH, OSELECT) in f 245 labels map[string]*ssaLabel 246 labeledNodes map[*Node]*ssaLabel 247 248 // gotos that jump forward; required for deferred checkgoto calls 249 fwdGotos []*Node 250 // Code that must precede any return 251 // (e.g., copying value of heap-escaped paramout back to true paramout) 252 exitCode Nodes 253 254 // unlabeled break and continue statement tracking 255 breakTo *ssa.Block // current target for plain break statement 256 continueTo *ssa.Block // current target for plain continue statement 257 258 // current location where we're interpreting the AST 259 curBlock *ssa.Block 260 261 // variable assignments in the current block (map from variable symbol to ssa value) 262 // *Node is the unique identifier (an ONAME Node) for the variable. 263 vars map[*Node]*ssa.Value 264 265 // all defined variables at the end of each block. Indexed by block ID. 266 defvars []map[*Node]*ssa.Value 267 268 // addresses of PPARAM and PPARAMOUT variables. 269 decladdrs map[*Node]*ssa.Value 270 271 // symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused. 272 varsyms map[*Node]interface{} 273 274 // starting values. Memory, stack pointer, and globals pointer 275 startmem *ssa.Value 276 sp *ssa.Value 277 sb *ssa.Value 278 279 // line number stack. The current line number is top of stack 280 line []int32 281 282 // list of panic calls by function name and line number. 283 // Used to deduplicate panic calls. 284 panics map[funcLine]*ssa.Block 285 286 // list of FwdRef values. 287 fwdRefs []*ssa.Value 288 289 // list of PPARAMOUT (return) variables. 290 returns []*Node 291 292 // list of PPARAM SSA-able pointer-shaped args. We ensure these are live 293 // throughout the function to help users avoid premature finalizers. 294 ptrargs []*Node 295 296 cgoUnsafeArgs bool 297 noWB bool 298 WBLineno int32 // line number of first write barrier. 0=no write barriers 299 } 300 301 type funcLine struct { 302 f *Node 303 line int32 304 } 305 306 type ssaLabel struct { 307 target *ssa.Block // block identified by this label 308 breakTarget *ssa.Block // block to break to in control flow node identified by this label 309 continueTarget *ssa.Block // block to continue to in control flow node identified by this label 310 defNode *Node // label definition Node (OLABEL) 311 // Label use Node (OGOTO, OBREAK, OCONTINUE). 312 // Used only for error detection and reporting. 313 // There might be multiple uses, but we only need to track one. 314 useNode *Node 315 reported bool // reported indicates whether an error has already been reported for this label 316 } 317 318 // defined reports whether the label has a definition (OLABEL node). 319 func (l *ssaLabel) defined() bool { return l.defNode != nil } 320 321 // used reports whether the label has a use (OGOTO, OBREAK, or OCONTINUE node). 322 func (l *ssaLabel) used() bool { return l.useNode != nil } 323 324 // label returns the label associated with sym, creating it if necessary. 325 func (s *state) label(sym *Sym) *ssaLabel { 326 lab := s.labels[sym.Name] 327 if lab == nil { 328 lab = new(ssaLabel) 329 s.labels[sym.Name] = lab 330 } 331 return lab 332 } 333 334 func (s *state) Logf(msg string, args ...interface{}) { s.config.Logf(msg, args...) } 335 func (s *state) Log() bool { return s.config.Log() } 336 func (s *state) Fatalf(msg string, args ...interface{}) { s.config.Fatalf(s.peekLine(), msg, args...) } 337 func (s *state) Unimplementedf(msg string, args ...interface{}) { 338 s.config.Unimplementedf(s.peekLine(), msg, args...) 339 } 340 func (s *state) Warnl(line int32, msg string, args ...interface{}) { s.config.Warnl(line, msg, args...) } 341 func (s *state) Debug_checknil() bool { return s.config.Debug_checknil() } 342 343 var ( 344 // dummy node for the memory variable 345 memVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "mem"}} 346 347 // dummy nodes for temporary variables 348 ptrVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ptr"}} 349 lenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "len"}} 350 newlenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "newlen"}} 351 capVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "cap"}} 352 typVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "typ"}} 353 idataVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "idata"}} 354 okVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ok"}} 355 deltaVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "delta"}} 356 ) 357 358 // startBlock sets the current block we're generating code in to b. 359 func (s *state) startBlock(b *ssa.Block) { 360 if s.curBlock != nil { 361 s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock) 362 } 363 s.curBlock = b 364 s.vars = map[*Node]*ssa.Value{} 365 } 366 367 // endBlock marks the end of generating code for the current block. 368 // Returns the (former) current block. Returns nil if there is no current 369 // block, i.e. if no code flows to the current execution point. 370 func (s *state) endBlock() *ssa.Block { 371 b := s.curBlock 372 if b == nil { 373 return nil 374 } 375 for len(s.defvars) <= int(b.ID) { 376 s.defvars = append(s.defvars, nil) 377 } 378 s.defvars[b.ID] = s.vars 379 s.curBlock = nil 380 s.vars = nil 381 b.Line = s.peekLine() 382 return b 383 } 384 385 // pushLine pushes a line number on the line number stack. 386 func (s *state) pushLine(line int32) { 387 if line == 0 { 388 // the frontend may emit node with line number missing, 389 // use the parent line number in this case. 390 line = s.peekLine() 391 if Debug['K'] != 0 { 392 Warn("buildssa: line 0") 393 } 394 } 395 s.line = append(s.line, line) 396 } 397 398 // popLine pops the top of the line number stack. 399 func (s *state) popLine() { 400 s.line = s.line[:len(s.line)-1] 401 } 402 403 // peekLine peek the top of the line number stack. 404 func (s *state) peekLine() int32 { 405 return s.line[len(s.line)-1] 406 } 407 408 func (s *state) Error(msg string, args ...interface{}) { 409 yyerrorl(s.peekLine(), msg, args...) 410 } 411 412 // newValue0 adds a new value with no arguments to the current block. 413 func (s *state) newValue0(op ssa.Op, t ssa.Type) *ssa.Value { 414 return s.curBlock.NewValue0(s.peekLine(), op, t) 415 } 416 417 // newValue0A adds a new value with no arguments and an aux value to the current block. 418 func (s *state) newValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value { 419 return s.curBlock.NewValue0A(s.peekLine(), op, t, aux) 420 } 421 422 // newValue0I adds a new value with no arguments and an auxint value to the current block. 423 func (s *state) newValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value { 424 return s.curBlock.NewValue0I(s.peekLine(), op, t, auxint) 425 } 426 427 // newValue1 adds a new value with one argument to the current block. 428 func (s *state) newValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value { 429 return s.curBlock.NewValue1(s.peekLine(), op, t, arg) 430 } 431 432 // newValue1A adds a new value with one argument and an aux value to the current block. 433 func (s *state) newValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 434 return s.curBlock.NewValue1A(s.peekLine(), op, t, aux, arg) 435 } 436 437 // newValue1I adds a new value with one argument and an auxint value to the current block. 438 func (s *state) newValue1I(op ssa.Op, t ssa.Type, aux int64, arg *ssa.Value) *ssa.Value { 439 return s.curBlock.NewValue1I(s.peekLine(), op, t, aux, arg) 440 } 441 442 // newValue2 adds a new value with two arguments to the current block. 443 func (s *state) newValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value { 444 return s.curBlock.NewValue2(s.peekLine(), op, t, arg0, arg1) 445 } 446 447 // newValue2I adds a new value with two arguments and an auxint value to the current block. 448 func (s *state) newValue2I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value { 449 return s.curBlock.NewValue2I(s.peekLine(), op, t, aux, arg0, arg1) 450 } 451 452 // newValue3 adds a new value with three arguments to the current block. 453 func (s *state) newValue3(op ssa.Op, t ssa.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 454 return s.curBlock.NewValue3(s.peekLine(), op, t, arg0, arg1, arg2) 455 } 456 457 // newValue3I adds a new value with three arguments and an auxint value to the current block. 458 func (s *state) newValue3I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 459 return s.curBlock.NewValue3I(s.peekLine(), op, t, aux, arg0, arg1, arg2) 460 } 461 462 // entryNewValue0 adds a new value with no arguments to the entry block. 463 func (s *state) entryNewValue0(op ssa.Op, t ssa.Type) *ssa.Value { 464 return s.f.Entry.NewValue0(s.peekLine(), op, t) 465 } 466 467 // entryNewValue0A adds a new value with no arguments and an aux value to the entry block. 468 func (s *state) entryNewValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value { 469 return s.f.Entry.NewValue0A(s.peekLine(), op, t, aux) 470 } 471 472 // entryNewValue0I adds a new value with no arguments and an auxint value to the entry block. 473 func (s *state) entryNewValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value { 474 return s.f.Entry.NewValue0I(s.peekLine(), op, t, auxint) 475 } 476 477 // entryNewValue1 adds a new value with one argument to the entry block. 478 func (s *state) entryNewValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value { 479 return s.f.Entry.NewValue1(s.peekLine(), op, t, arg) 480 } 481 482 // entryNewValue1 adds a new value with one argument and an auxint value to the entry block. 483 func (s *state) entryNewValue1I(op ssa.Op, t ssa.Type, auxint int64, arg *ssa.Value) *ssa.Value { 484 return s.f.Entry.NewValue1I(s.peekLine(), op, t, auxint, arg) 485 } 486 487 // entryNewValue1A adds a new value with one argument and an aux value to the entry block. 488 func (s *state) entryNewValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 489 return s.f.Entry.NewValue1A(s.peekLine(), op, t, aux, arg) 490 } 491 492 // entryNewValue2 adds a new value with two arguments to the entry block. 493 func (s *state) entryNewValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value { 494 return s.f.Entry.NewValue2(s.peekLine(), op, t, arg0, arg1) 495 } 496 497 // const* routines add a new const value to the entry block. 498 func (s *state) constSlice(t ssa.Type) *ssa.Value { return s.f.ConstSlice(s.peekLine(), t) } 499 func (s *state) constInterface(t ssa.Type) *ssa.Value { return s.f.ConstInterface(s.peekLine(), t) } 500 func (s *state) constNil(t ssa.Type) *ssa.Value { return s.f.ConstNil(s.peekLine(), t) } 501 func (s *state) constEmptyString(t ssa.Type) *ssa.Value { return s.f.ConstEmptyString(s.peekLine(), t) } 502 func (s *state) constBool(c bool) *ssa.Value { 503 return s.f.ConstBool(s.peekLine(), Types[TBOOL], c) 504 } 505 func (s *state) constInt8(t ssa.Type, c int8) *ssa.Value { 506 return s.f.ConstInt8(s.peekLine(), t, c) 507 } 508 func (s *state) constInt16(t ssa.Type, c int16) *ssa.Value { 509 return s.f.ConstInt16(s.peekLine(), t, c) 510 } 511 func (s *state) constInt32(t ssa.Type, c int32) *ssa.Value { 512 return s.f.ConstInt32(s.peekLine(), t, c) 513 } 514 func (s *state) constInt64(t ssa.Type, c int64) *ssa.Value { 515 return s.f.ConstInt64(s.peekLine(), t, c) 516 } 517 func (s *state) constFloat32(t ssa.Type, c float64) *ssa.Value { 518 return s.f.ConstFloat32(s.peekLine(), t, c) 519 } 520 func (s *state) constFloat64(t ssa.Type, c float64) *ssa.Value { 521 return s.f.ConstFloat64(s.peekLine(), t, c) 522 } 523 func (s *state) constInt(t ssa.Type, c int64) *ssa.Value { 524 if s.config.IntSize == 8 { 525 return s.constInt64(t, c) 526 } 527 if int64(int32(c)) != c { 528 s.Fatalf("integer constant too big %d", c) 529 } 530 return s.constInt32(t, int32(c)) 531 } 532 533 func (s *state) stmts(a Nodes) { 534 for _, x := range a.Slice() { 535 s.stmt(x) 536 } 537 } 538 539 // ssaStmtList converts the statement n to SSA and adds it to s. 540 func (s *state) stmtList(l Nodes) { 541 for _, n := range l.Slice() { 542 s.stmt(n) 543 } 544 } 545 546 // ssaStmt converts the statement n to SSA and adds it to s. 547 func (s *state) stmt(n *Node) { 548 s.pushLine(n.Lineno) 549 defer s.popLine() 550 551 // If s.curBlock is nil, then we're about to generate dead code. 552 // We can't just short-circuit here, though, 553 // because we check labels and gotos as part of SSA generation. 554 // Provide a block for the dead code so that we don't have 555 // to add special cases everywhere else. 556 if s.curBlock == nil { 557 dead := s.f.NewBlock(ssa.BlockPlain) 558 s.startBlock(dead) 559 } 560 561 s.stmtList(n.Ninit) 562 switch n.Op { 563 564 case OBLOCK: 565 s.stmtList(n.List) 566 567 // No-ops 568 case OEMPTY, ODCLCONST, ODCLTYPE, OFALL: 569 570 // Expression statements 571 case OCALLFUNC, OCALLMETH, OCALLINTER: 572 s.call(n, callNormal) 573 if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class == PFUNC && 574 (compiling_runtime && n.Left.Sym.Name == "throw" || 575 n.Left.Sym.Pkg == Runtimepkg && (n.Left.Sym.Name == "gopanic" || n.Left.Sym.Name == "selectgo" || n.Left.Sym.Name == "block")) { 576 m := s.mem() 577 b := s.endBlock() 578 b.Kind = ssa.BlockExit 579 b.SetControl(m) 580 // TODO: never rewrite OPANIC to OCALLFUNC in the 581 // first place. Need to wait until all backends 582 // go through SSA. 583 } 584 case ODEFER: 585 s.call(n.Left, callDefer) 586 case OPROC: 587 s.call(n.Left, callGo) 588 589 case OAS2DOTTYPE: 590 res, resok := s.dottype(n.Rlist.First(), true) 591 s.assign(n.List.First(), res, needwritebarrier(n.List.First(), n.Rlist.First()), false, n.Lineno, 0, false) 592 s.assign(n.List.Second(), resok, false, false, n.Lineno, 0, false) 593 return 594 595 case ODCL: 596 if n.Left.Class == PAUTOHEAP { 597 Fatalf("DCL %v", n) 598 } 599 600 case OLABEL: 601 sym := n.Left.Sym 602 603 if isblanksym(sym) { 604 // Empty identifier is valid but useless. 605 // See issues 11589, 11593. 606 return 607 } 608 609 lab := s.label(sym) 610 611 // Associate label with its control flow node, if any 612 if ctl := n.Name.Defn; ctl != nil { 613 switch ctl.Op { 614 case OFOR, OSWITCH, OSELECT: 615 s.labeledNodes[ctl] = lab 616 } 617 } 618 619 if !lab.defined() { 620 lab.defNode = n 621 } else { 622 s.Error("label %v already defined at %v", sym, linestr(lab.defNode.Lineno)) 623 lab.reported = true 624 } 625 // The label might already have a target block via a goto. 626 if lab.target == nil { 627 lab.target = s.f.NewBlock(ssa.BlockPlain) 628 } 629 630 // go to that label (we pretend "label:" is preceded by "goto label") 631 b := s.endBlock() 632 b.AddEdgeTo(lab.target) 633 s.startBlock(lab.target) 634 635 case OGOTO: 636 sym := n.Left.Sym 637 638 lab := s.label(sym) 639 if lab.target == nil { 640 lab.target = s.f.NewBlock(ssa.BlockPlain) 641 } 642 if !lab.used() { 643 lab.useNode = n 644 } 645 646 if lab.defined() { 647 s.checkgoto(n, lab.defNode) 648 } else { 649 s.fwdGotos = append(s.fwdGotos, n) 650 } 651 652 b := s.endBlock() 653 b.AddEdgeTo(lab.target) 654 655 case OAS, OASWB: 656 // Check whether we can generate static data rather than code. 657 // If so, ignore n and defer data generation until codegen. 658 // Failure to do this causes writes to readonly symbols. 659 if gen_as_init(n, true) { 660 var data []*Node 661 if s.f.StaticData != nil { 662 data = s.f.StaticData.([]*Node) 663 } 664 s.f.StaticData = append(data, n) 665 return 666 } 667 668 if n.Left == n.Right && n.Left.Op == ONAME { 669 // An x=x assignment. No point in doing anything 670 // here. In addition, skipping this assignment 671 // prevents generating: 672 // VARDEF x 673 // COPY x -> x 674 // which is bad because x is incorrectly considered 675 // dead before the vardef. See issue #14904. 676 return 677 } 678 679 var t *Type 680 if n.Right != nil { 681 t = n.Right.Type 682 } else { 683 t = n.Left.Type 684 } 685 686 // Evaluate RHS. 687 rhs := n.Right 688 if rhs != nil { 689 switch rhs.Op { 690 case OSTRUCTLIT, OARRAYLIT: 691 // All literals with nonzero fields have already been 692 // rewritten during walk. Any that remain are just T{} 693 // or equivalents. Use the zero value. 694 if !iszero(rhs) { 695 Fatalf("literal with nonzero value in SSA: %v", rhs) 696 } 697 rhs = nil 698 case OAPPEND: 699 // If we're writing the result of an append back to the same slice, 700 // handle it specially to avoid write barriers on the fast (non-growth) path. 701 // If the slice can be SSA'd, it'll be on the stack, 702 // so there will be no write barriers, 703 // so there's no need to attempt to prevent them. 704 if samesafeexpr(n.Left, rhs.List.First()) && !s.canSSA(n.Left) { 705 s.append(rhs, true) 706 return 707 } 708 } 709 } 710 var r *ssa.Value 711 var isVolatile bool 712 needwb := n.Op == OASWB && rhs != nil 713 deref := !canSSAType(t) 714 if deref { 715 if rhs == nil { 716 r = nil // Signal assign to use OpZero. 717 } else { 718 r, isVolatile = s.addr(rhs, false) 719 } 720 } else { 721 if rhs == nil { 722 r = s.zeroVal(t) 723 } else { 724 r = s.expr(rhs) 725 } 726 } 727 if rhs != nil && rhs.Op == OAPPEND { 728 // The frontend gets rid of the write barrier to enable the special OAPPEND 729 // handling above, but since this is not a special case, we need it. 730 // TODO: just add a ptr graying to the end of growslice? 731 // TODO: check whether we need to provide special handling and a write barrier 732 // for ODOTTYPE and ORECV also. 733 // They get similar wb-removal treatment in walk.go:OAS. 734 needwb = true 735 } 736 737 var skip skipMask 738 if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) { 739 // We're assigning a slicing operation back to its source. 740 // Don't write back fields we aren't changing. See issue #14855. 741 i, j, k := rhs.SliceBounds() 742 if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) { 743 // [0:...] is the same as [:...] 744 i = nil 745 } 746 // TODO: detect defaults for len/cap also. 747 // Currently doesn't really work because (*p)[:len(*p)] appears here as: 748 // tmp = len(*p) 749 // (*p)[:tmp] 750 //if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) { 751 // j = nil 752 //} 753 //if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) { 754 // k = nil 755 //} 756 if i == nil { 757 skip |= skipPtr 758 if j == nil { 759 skip |= skipLen 760 } 761 if k == nil { 762 skip |= skipCap 763 } 764 } 765 } 766 767 s.assign(n.Left, r, needwb, deref, n.Lineno, skip, isVolatile) 768 769 case OIF: 770 bThen := s.f.NewBlock(ssa.BlockPlain) 771 bEnd := s.f.NewBlock(ssa.BlockPlain) 772 var bElse *ssa.Block 773 if n.Rlist.Len() != 0 { 774 bElse = s.f.NewBlock(ssa.BlockPlain) 775 s.condBranch(n.Left, bThen, bElse, n.Likely) 776 } else { 777 s.condBranch(n.Left, bThen, bEnd, n.Likely) 778 } 779 780 s.startBlock(bThen) 781 s.stmts(n.Nbody) 782 if b := s.endBlock(); b != nil { 783 b.AddEdgeTo(bEnd) 784 } 785 786 if n.Rlist.Len() != 0 { 787 s.startBlock(bElse) 788 s.stmtList(n.Rlist) 789 if b := s.endBlock(); b != nil { 790 b.AddEdgeTo(bEnd) 791 } 792 } 793 s.startBlock(bEnd) 794 795 case ORETURN: 796 s.stmtList(n.List) 797 s.exit() 798 case ORETJMP: 799 s.stmtList(n.List) 800 b := s.exit() 801 b.Kind = ssa.BlockRetJmp // override BlockRet 802 b.Aux = n.Left.Sym 803 804 case OCONTINUE, OBREAK: 805 var op string 806 var to *ssa.Block 807 switch n.Op { 808 case OCONTINUE: 809 op = "continue" 810 to = s.continueTo 811 case OBREAK: 812 op = "break" 813 to = s.breakTo 814 } 815 if n.Left == nil { 816 // plain break/continue 817 if to == nil { 818 s.Error("%s is not in a loop", op) 819 return 820 } 821 // nothing to do; "to" is already the correct target 822 } else { 823 // labeled break/continue; look up the target 824 sym := n.Left.Sym 825 lab := s.label(sym) 826 if !lab.used() { 827 lab.useNode = n.Left 828 } 829 if !lab.defined() { 830 s.Error("%s label not defined: %v", op, sym) 831 lab.reported = true 832 return 833 } 834 switch n.Op { 835 case OCONTINUE: 836 to = lab.continueTarget 837 case OBREAK: 838 to = lab.breakTarget 839 } 840 if to == nil { 841 // Valid label but not usable with a break/continue here, e.g.: 842 // for { 843 // continue abc 844 // } 845 // abc: 846 // for {} 847 s.Error("invalid %s label %v", op, sym) 848 lab.reported = true 849 return 850 } 851 } 852 853 b := s.endBlock() 854 b.AddEdgeTo(to) 855 856 case OFOR: 857 // OFOR: for Ninit; Left; Right { Nbody } 858 bCond := s.f.NewBlock(ssa.BlockPlain) 859 bBody := s.f.NewBlock(ssa.BlockPlain) 860 bIncr := s.f.NewBlock(ssa.BlockPlain) 861 bEnd := s.f.NewBlock(ssa.BlockPlain) 862 863 // first, jump to condition test 864 b := s.endBlock() 865 b.AddEdgeTo(bCond) 866 867 // generate code to test condition 868 s.startBlock(bCond) 869 if n.Left != nil { 870 s.condBranch(n.Left, bBody, bEnd, 1) 871 } else { 872 b := s.endBlock() 873 b.Kind = ssa.BlockPlain 874 b.AddEdgeTo(bBody) 875 } 876 877 // set up for continue/break in body 878 prevContinue := s.continueTo 879 prevBreak := s.breakTo 880 s.continueTo = bIncr 881 s.breakTo = bEnd 882 lab := s.labeledNodes[n] 883 if lab != nil { 884 // labeled for loop 885 lab.continueTarget = bIncr 886 lab.breakTarget = bEnd 887 } 888 889 // generate body 890 s.startBlock(bBody) 891 s.stmts(n.Nbody) 892 893 // tear down continue/break 894 s.continueTo = prevContinue 895 s.breakTo = prevBreak 896 if lab != nil { 897 lab.continueTarget = nil 898 lab.breakTarget = nil 899 } 900 901 // done with body, goto incr 902 if b := s.endBlock(); b != nil { 903 b.AddEdgeTo(bIncr) 904 } 905 906 // generate incr 907 s.startBlock(bIncr) 908 if n.Right != nil { 909 s.stmt(n.Right) 910 } 911 if b := s.endBlock(); b != nil { 912 b.AddEdgeTo(bCond) 913 } 914 s.startBlock(bEnd) 915 916 case OSWITCH, OSELECT: 917 // These have been mostly rewritten by the front end into their Nbody fields. 918 // Our main task is to correctly hook up any break statements. 919 bEnd := s.f.NewBlock(ssa.BlockPlain) 920 921 prevBreak := s.breakTo 922 s.breakTo = bEnd 923 lab := s.labeledNodes[n] 924 if lab != nil { 925 // labeled 926 lab.breakTarget = bEnd 927 } 928 929 // generate body code 930 s.stmts(n.Nbody) 931 932 s.breakTo = prevBreak 933 if lab != nil { 934 lab.breakTarget = nil 935 } 936 937 // OSWITCH never falls through (s.curBlock == nil here). 938 // OSELECT does not fall through if we're calling selectgo. 939 // OSELECT does fall through if we're calling selectnb{send,recv}[2]. 940 // In those latter cases, go to the code after the select. 941 if b := s.endBlock(); b != nil { 942 b.AddEdgeTo(bEnd) 943 } 944 s.startBlock(bEnd) 945 946 case OVARKILL: 947 // Insert a varkill op to record that a variable is no longer live. 948 // We only care about liveness info at call sites, so putting the 949 // varkill in the store chain is enough to keep it correctly ordered 950 // with respect to call ops. 951 if !s.canSSA(n.Left) { 952 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, n.Left, s.mem()) 953 } 954 955 case OVARLIVE: 956 // Insert a varlive op to record that a variable is still live. 957 if !n.Left.Addrtaken { 958 s.Fatalf("VARLIVE variable %s must have Addrtaken set", n.Left) 959 } 960 s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, ssa.TypeMem, n.Left, s.mem()) 961 962 case OCHECKNIL: 963 p := s.expr(n.Left) 964 s.nilCheck(p) 965 966 default: 967 s.Unimplementedf("unhandled stmt %s", n.Op) 968 } 969 } 970 971 // exit processes any code that needs to be generated just before returning. 972 // It returns a BlockRet block that ends the control flow. Its control value 973 // will be set to the final memory state. 974 func (s *state) exit() *ssa.Block { 975 if hasdefer { 976 s.rtcall(Deferreturn, true, nil) 977 } 978 979 // Run exit code. Typically, this code copies heap-allocated PPARAMOUT 980 // variables back to the stack. 981 s.stmts(s.exitCode) 982 983 // Store SSAable PPARAMOUT variables back to stack locations. 984 for _, n := range s.returns { 985 addr := s.decladdrs[n] 986 val := s.variable(n, n.Type) 987 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, n, s.mem()) 988 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, n.Type.Size(), addr, val, s.mem()) 989 // TODO: if val is ever spilled, we'd like to use the 990 // PPARAMOUT slot for spilling it. That won't happen 991 // currently. 992 } 993 994 // Keep input pointer args live until the return. This is a bandaid 995 // fix for 1.7 for what will become in 1.8 explicit runtime.KeepAlive calls. 996 // For <= 1.7 we guarantee that pointer input arguments live to the end of 997 // the function to prevent premature (from the user's point of view) 998 // execution of finalizers. See issue 15277. 999 // TODO: remove for 1.8? 1000 for _, n := range s.ptrargs { 1001 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, s.variable(n, n.Type), s.mem()) 1002 } 1003 1004 // Do actual return. 1005 m := s.mem() 1006 b := s.endBlock() 1007 b.Kind = ssa.BlockRet 1008 b.SetControl(m) 1009 return b 1010 } 1011 1012 type opAndType struct { 1013 op Op 1014 etype EType 1015 } 1016 1017 var opToSSA = map[opAndType]ssa.Op{ 1018 opAndType{OADD, TINT8}: ssa.OpAdd8, 1019 opAndType{OADD, TUINT8}: ssa.OpAdd8, 1020 opAndType{OADD, TINT16}: ssa.OpAdd16, 1021 opAndType{OADD, TUINT16}: ssa.OpAdd16, 1022 opAndType{OADD, TINT32}: ssa.OpAdd32, 1023 opAndType{OADD, TUINT32}: ssa.OpAdd32, 1024 opAndType{OADD, TPTR32}: ssa.OpAdd32, 1025 opAndType{OADD, TINT64}: ssa.OpAdd64, 1026 opAndType{OADD, TUINT64}: ssa.OpAdd64, 1027 opAndType{OADD, TPTR64}: ssa.OpAdd64, 1028 opAndType{OADD, TFLOAT32}: ssa.OpAdd32F, 1029 opAndType{OADD, TFLOAT64}: ssa.OpAdd64F, 1030 1031 opAndType{OSUB, TINT8}: ssa.OpSub8, 1032 opAndType{OSUB, TUINT8}: ssa.OpSub8, 1033 opAndType{OSUB, TINT16}: ssa.OpSub16, 1034 opAndType{OSUB, TUINT16}: ssa.OpSub16, 1035 opAndType{OSUB, TINT32}: ssa.OpSub32, 1036 opAndType{OSUB, TUINT32}: ssa.OpSub32, 1037 opAndType{OSUB, TINT64}: ssa.OpSub64, 1038 opAndType{OSUB, TUINT64}: ssa.OpSub64, 1039 opAndType{OSUB, TFLOAT32}: ssa.OpSub32F, 1040 opAndType{OSUB, TFLOAT64}: ssa.OpSub64F, 1041 1042 opAndType{ONOT, TBOOL}: ssa.OpNot, 1043 1044 opAndType{OMINUS, TINT8}: ssa.OpNeg8, 1045 opAndType{OMINUS, TUINT8}: ssa.OpNeg8, 1046 opAndType{OMINUS, TINT16}: ssa.OpNeg16, 1047 opAndType{OMINUS, TUINT16}: ssa.OpNeg16, 1048 opAndType{OMINUS, TINT32}: ssa.OpNeg32, 1049 opAndType{OMINUS, TUINT32}: ssa.OpNeg32, 1050 opAndType{OMINUS, TINT64}: ssa.OpNeg64, 1051 opAndType{OMINUS, TUINT64}: ssa.OpNeg64, 1052 opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F, 1053 opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F, 1054 1055 opAndType{OCOM, TINT8}: ssa.OpCom8, 1056 opAndType{OCOM, TUINT8}: ssa.OpCom8, 1057 opAndType{OCOM, TINT16}: ssa.OpCom16, 1058 opAndType{OCOM, TUINT16}: ssa.OpCom16, 1059 opAndType{OCOM, TINT32}: ssa.OpCom32, 1060 opAndType{OCOM, TUINT32}: ssa.OpCom32, 1061 opAndType{OCOM, TINT64}: ssa.OpCom64, 1062 opAndType{OCOM, TUINT64}: ssa.OpCom64, 1063 1064 opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag, 1065 opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag, 1066 opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal, 1067 opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal, 1068 1069 opAndType{OMUL, TINT8}: ssa.OpMul8, 1070 opAndType{OMUL, TUINT8}: ssa.OpMul8, 1071 opAndType{OMUL, TINT16}: ssa.OpMul16, 1072 opAndType{OMUL, TUINT16}: ssa.OpMul16, 1073 opAndType{OMUL, TINT32}: ssa.OpMul32, 1074 opAndType{OMUL, TUINT32}: ssa.OpMul32, 1075 opAndType{OMUL, TINT64}: ssa.OpMul64, 1076 opAndType{OMUL, TUINT64}: ssa.OpMul64, 1077 opAndType{OMUL, TFLOAT32}: ssa.OpMul32F, 1078 opAndType{OMUL, TFLOAT64}: ssa.OpMul64F, 1079 1080 opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F, 1081 opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F, 1082 1083 opAndType{OHMUL, TINT8}: ssa.OpHmul8, 1084 opAndType{OHMUL, TUINT8}: ssa.OpHmul8u, 1085 opAndType{OHMUL, TINT16}: ssa.OpHmul16, 1086 opAndType{OHMUL, TUINT16}: ssa.OpHmul16u, 1087 opAndType{OHMUL, TINT32}: ssa.OpHmul32, 1088 opAndType{OHMUL, TUINT32}: ssa.OpHmul32u, 1089 1090 opAndType{ODIV, TINT8}: ssa.OpDiv8, 1091 opAndType{ODIV, TUINT8}: ssa.OpDiv8u, 1092 opAndType{ODIV, TINT16}: ssa.OpDiv16, 1093 opAndType{ODIV, TUINT16}: ssa.OpDiv16u, 1094 opAndType{ODIV, TINT32}: ssa.OpDiv32, 1095 opAndType{ODIV, TUINT32}: ssa.OpDiv32u, 1096 opAndType{ODIV, TINT64}: ssa.OpDiv64, 1097 opAndType{ODIV, TUINT64}: ssa.OpDiv64u, 1098 1099 opAndType{OMOD, TINT8}: ssa.OpMod8, 1100 opAndType{OMOD, TUINT8}: ssa.OpMod8u, 1101 opAndType{OMOD, TINT16}: ssa.OpMod16, 1102 opAndType{OMOD, TUINT16}: ssa.OpMod16u, 1103 opAndType{OMOD, TINT32}: ssa.OpMod32, 1104 opAndType{OMOD, TUINT32}: ssa.OpMod32u, 1105 opAndType{OMOD, TINT64}: ssa.OpMod64, 1106 opAndType{OMOD, TUINT64}: ssa.OpMod64u, 1107 1108 opAndType{OAND, TINT8}: ssa.OpAnd8, 1109 opAndType{OAND, TUINT8}: ssa.OpAnd8, 1110 opAndType{OAND, TINT16}: ssa.OpAnd16, 1111 opAndType{OAND, TUINT16}: ssa.OpAnd16, 1112 opAndType{OAND, TINT32}: ssa.OpAnd32, 1113 opAndType{OAND, TUINT32}: ssa.OpAnd32, 1114 opAndType{OAND, TINT64}: ssa.OpAnd64, 1115 opAndType{OAND, TUINT64}: ssa.OpAnd64, 1116 1117 opAndType{OOR, TINT8}: ssa.OpOr8, 1118 opAndType{OOR, TUINT8}: ssa.OpOr8, 1119 opAndType{OOR, TINT16}: ssa.OpOr16, 1120 opAndType{OOR, TUINT16}: ssa.OpOr16, 1121 opAndType{OOR, TINT32}: ssa.OpOr32, 1122 opAndType{OOR, TUINT32}: ssa.OpOr32, 1123 opAndType{OOR, TINT64}: ssa.OpOr64, 1124 opAndType{OOR, TUINT64}: ssa.OpOr64, 1125 1126 opAndType{OXOR, TINT8}: ssa.OpXor8, 1127 opAndType{OXOR, TUINT8}: ssa.OpXor8, 1128 opAndType{OXOR, TINT16}: ssa.OpXor16, 1129 opAndType{OXOR, TUINT16}: ssa.OpXor16, 1130 opAndType{OXOR, TINT32}: ssa.OpXor32, 1131 opAndType{OXOR, TUINT32}: ssa.OpXor32, 1132 opAndType{OXOR, TINT64}: ssa.OpXor64, 1133 opAndType{OXOR, TUINT64}: ssa.OpXor64, 1134 1135 opAndType{OEQ, TBOOL}: ssa.OpEqB, 1136 opAndType{OEQ, TINT8}: ssa.OpEq8, 1137 opAndType{OEQ, TUINT8}: ssa.OpEq8, 1138 opAndType{OEQ, TINT16}: ssa.OpEq16, 1139 opAndType{OEQ, TUINT16}: ssa.OpEq16, 1140 opAndType{OEQ, TINT32}: ssa.OpEq32, 1141 opAndType{OEQ, TUINT32}: ssa.OpEq32, 1142 opAndType{OEQ, TINT64}: ssa.OpEq64, 1143 opAndType{OEQ, TUINT64}: ssa.OpEq64, 1144 opAndType{OEQ, TINTER}: ssa.OpEqInter, 1145 opAndType{OEQ, TSLICE}: ssa.OpEqSlice, 1146 opAndType{OEQ, TFUNC}: ssa.OpEqPtr, 1147 opAndType{OEQ, TMAP}: ssa.OpEqPtr, 1148 opAndType{OEQ, TCHAN}: ssa.OpEqPtr, 1149 opAndType{OEQ, TPTR64}: ssa.OpEqPtr, 1150 opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr, 1151 opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr, 1152 opAndType{OEQ, TFLOAT64}: ssa.OpEq64F, 1153 opAndType{OEQ, TFLOAT32}: ssa.OpEq32F, 1154 1155 opAndType{ONE, TBOOL}: ssa.OpNeqB, 1156 opAndType{ONE, TINT8}: ssa.OpNeq8, 1157 opAndType{ONE, TUINT8}: ssa.OpNeq8, 1158 opAndType{ONE, TINT16}: ssa.OpNeq16, 1159 opAndType{ONE, TUINT16}: ssa.OpNeq16, 1160 opAndType{ONE, TINT32}: ssa.OpNeq32, 1161 opAndType{ONE, TUINT32}: ssa.OpNeq32, 1162 opAndType{ONE, TINT64}: ssa.OpNeq64, 1163 opAndType{ONE, TUINT64}: ssa.OpNeq64, 1164 opAndType{ONE, TINTER}: ssa.OpNeqInter, 1165 opAndType{ONE, TSLICE}: ssa.OpNeqSlice, 1166 opAndType{ONE, TFUNC}: ssa.OpNeqPtr, 1167 opAndType{ONE, TMAP}: ssa.OpNeqPtr, 1168 opAndType{ONE, TCHAN}: ssa.OpNeqPtr, 1169 opAndType{ONE, TPTR64}: ssa.OpNeqPtr, 1170 opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr, 1171 opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr, 1172 opAndType{ONE, TFLOAT64}: ssa.OpNeq64F, 1173 opAndType{ONE, TFLOAT32}: ssa.OpNeq32F, 1174 1175 opAndType{OLT, TINT8}: ssa.OpLess8, 1176 opAndType{OLT, TUINT8}: ssa.OpLess8U, 1177 opAndType{OLT, TINT16}: ssa.OpLess16, 1178 opAndType{OLT, TUINT16}: ssa.OpLess16U, 1179 opAndType{OLT, TINT32}: ssa.OpLess32, 1180 opAndType{OLT, TUINT32}: ssa.OpLess32U, 1181 opAndType{OLT, TINT64}: ssa.OpLess64, 1182 opAndType{OLT, TUINT64}: ssa.OpLess64U, 1183 opAndType{OLT, TFLOAT64}: ssa.OpLess64F, 1184 opAndType{OLT, TFLOAT32}: ssa.OpLess32F, 1185 1186 opAndType{OGT, TINT8}: ssa.OpGreater8, 1187 opAndType{OGT, TUINT8}: ssa.OpGreater8U, 1188 opAndType{OGT, TINT16}: ssa.OpGreater16, 1189 opAndType{OGT, TUINT16}: ssa.OpGreater16U, 1190 opAndType{OGT, TINT32}: ssa.OpGreater32, 1191 opAndType{OGT, TUINT32}: ssa.OpGreater32U, 1192 opAndType{OGT, TINT64}: ssa.OpGreater64, 1193 opAndType{OGT, TUINT64}: ssa.OpGreater64U, 1194 opAndType{OGT, TFLOAT64}: ssa.OpGreater64F, 1195 opAndType{OGT, TFLOAT32}: ssa.OpGreater32F, 1196 1197 opAndType{OLE, TINT8}: ssa.OpLeq8, 1198 opAndType{OLE, TUINT8}: ssa.OpLeq8U, 1199 opAndType{OLE, TINT16}: ssa.OpLeq16, 1200 opAndType{OLE, TUINT16}: ssa.OpLeq16U, 1201 opAndType{OLE, TINT32}: ssa.OpLeq32, 1202 opAndType{OLE, TUINT32}: ssa.OpLeq32U, 1203 opAndType{OLE, TINT64}: ssa.OpLeq64, 1204 opAndType{OLE, TUINT64}: ssa.OpLeq64U, 1205 opAndType{OLE, TFLOAT64}: ssa.OpLeq64F, 1206 opAndType{OLE, TFLOAT32}: ssa.OpLeq32F, 1207 1208 opAndType{OGE, TINT8}: ssa.OpGeq8, 1209 opAndType{OGE, TUINT8}: ssa.OpGeq8U, 1210 opAndType{OGE, TINT16}: ssa.OpGeq16, 1211 opAndType{OGE, TUINT16}: ssa.OpGeq16U, 1212 opAndType{OGE, TINT32}: ssa.OpGeq32, 1213 opAndType{OGE, TUINT32}: ssa.OpGeq32U, 1214 opAndType{OGE, TINT64}: ssa.OpGeq64, 1215 opAndType{OGE, TUINT64}: ssa.OpGeq64U, 1216 opAndType{OGE, TFLOAT64}: ssa.OpGeq64F, 1217 opAndType{OGE, TFLOAT32}: ssa.OpGeq32F, 1218 1219 opAndType{OLROT, TUINT8}: ssa.OpLrot8, 1220 opAndType{OLROT, TUINT16}: ssa.OpLrot16, 1221 opAndType{OLROT, TUINT32}: ssa.OpLrot32, 1222 opAndType{OLROT, TUINT64}: ssa.OpLrot64, 1223 1224 opAndType{OSQRT, TFLOAT64}: ssa.OpSqrt, 1225 } 1226 1227 func (s *state) concreteEtype(t *Type) EType { 1228 e := t.Etype 1229 switch e { 1230 default: 1231 return e 1232 case TINT: 1233 if s.config.IntSize == 8 { 1234 return TINT64 1235 } 1236 return TINT32 1237 case TUINT: 1238 if s.config.IntSize == 8 { 1239 return TUINT64 1240 } 1241 return TUINT32 1242 case TUINTPTR: 1243 if s.config.PtrSize == 8 { 1244 return TUINT64 1245 } 1246 return TUINT32 1247 } 1248 } 1249 1250 func (s *state) ssaOp(op Op, t *Type) ssa.Op { 1251 etype := s.concreteEtype(t) 1252 x, ok := opToSSA[opAndType{op, etype}] 1253 if !ok { 1254 s.Unimplementedf("unhandled binary op %s %s", op, etype) 1255 } 1256 return x 1257 } 1258 1259 func floatForComplex(t *Type) *Type { 1260 if t.Size() == 8 { 1261 return Types[TFLOAT32] 1262 } else { 1263 return Types[TFLOAT64] 1264 } 1265 } 1266 1267 type opAndTwoTypes struct { 1268 op Op 1269 etype1 EType 1270 etype2 EType 1271 } 1272 1273 type twoTypes struct { 1274 etype1 EType 1275 etype2 EType 1276 } 1277 1278 type twoOpsAndType struct { 1279 op1 ssa.Op 1280 op2 ssa.Op 1281 intermediateType EType 1282 } 1283 1284 var fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1285 1286 twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32}, 1287 twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32}, 1288 twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32}, 1289 twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64}, 1290 1291 twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32}, 1292 twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32}, 1293 twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32}, 1294 twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64}, 1295 1296 twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1297 twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1298 twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32}, 1299 twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64}, 1300 1301 twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1302 twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1303 twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32}, 1304 twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64}, 1305 // unsigned 1306 twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32}, 1307 twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32}, 1308 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned 1309 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead 1310 1311 twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32}, 1312 twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32}, 1313 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned 1314 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead 1315 1316 twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1317 twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1318 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1319 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead 1320 1321 twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1322 twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1323 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1324 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead 1325 1326 // float 1327 twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32}, 1328 twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT64}, 1329 twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT32}, 1330 twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64}, 1331 } 1332 1333 var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{ 1334 opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8, 1335 opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8, 1336 opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16, 1337 opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16, 1338 opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32, 1339 opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32, 1340 opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64, 1341 opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64, 1342 1343 opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8, 1344 opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8, 1345 opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16, 1346 opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16, 1347 opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32, 1348 opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32, 1349 opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64, 1350 opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64, 1351 1352 opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8, 1353 opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8, 1354 opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16, 1355 opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16, 1356 opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32, 1357 opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32, 1358 opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64, 1359 opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64, 1360 1361 opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8, 1362 opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8, 1363 opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16, 1364 opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16, 1365 opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32, 1366 opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32, 1367 opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64, 1368 opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64, 1369 1370 opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8, 1371 opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8, 1372 opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16, 1373 opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16, 1374 opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32, 1375 opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32, 1376 opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64, 1377 opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64, 1378 1379 opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8, 1380 opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8, 1381 opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16, 1382 opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16, 1383 opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32, 1384 opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32, 1385 opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64, 1386 opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64, 1387 1388 opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8, 1389 opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8, 1390 opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16, 1391 opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16, 1392 opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32, 1393 opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32, 1394 opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64, 1395 opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64, 1396 1397 opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8, 1398 opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8, 1399 opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16, 1400 opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16, 1401 opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32, 1402 opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32, 1403 opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64, 1404 opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64, 1405 } 1406 1407 func (s *state) ssaShiftOp(op Op, t *Type, u *Type) ssa.Op { 1408 etype1 := s.concreteEtype(t) 1409 etype2 := s.concreteEtype(u) 1410 x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}] 1411 if !ok { 1412 s.Unimplementedf("unhandled shift op %s etype=%s/%s", op, etype1, etype2) 1413 } 1414 return x 1415 } 1416 1417 func (s *state) ssaRotateOp(op Op, t *Type) ssa.Op { 1418 etype1 := s.concreteEtype(t) 1419 x, ok := opToSSA[opAndType{op, etype1}] 1420 if !ok { 1421 s.Unimplementedf("unhandled rotate op %s etype=%s", op, etype1) 1422 } 1423 return x 1424 } 1425 1426 // expr converts the expression n to ssa, adds it to s and returns the ssa result. 1427 func (s *state) expr(n *Node) *ssa.Value { 1428 if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) { 1429 // ONAMEs and named OLITERALs have the line number 1430 // of the decl, not the use. See issue 14742. 1431 s.pushLine(n.Lineno) 1432 defer s.popLine() 1433 } 1434 1435 s.stmtList(n.Ninit) 1436 switch n.Op { 1437 case OCFUNC: 1438 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: n.Left.Sym}) 1439 return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb) 1440 case ONAME: 1441 if n.Class == PFUNC { 1442 // "value" of a function is the address of the function's closure 1443 sym := funcsym(n.Sym) 1444 aux := &ssa.ExternSymbol{Typ: n.Type, Sym: sym} 1445 return s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sb) 1446 } 1447 if s.canSSA(n) { 1448 return s.variable(n, n.Type) 1449 } 1450 addr, _ := s.addr(n, false) 1451 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1452 case OCLOSUREVAR: 1453 addr, _ := s.addr(n, false) 1454 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1455 case OLITERAL: 1456 switch u := n.Val().U.(type) { 1457 case *Mpint: 1458 i := u.Int64() 1459 switch n.Type.Size() { 1460 case 1: 1461 return s.constInt8(n.Type, int8(i)) 1462 case 2: 1463 return s.constInt16(n.Type, int16(i)) 1464 case 4: 1465 return s.constInt32(n.Type, int32(i)) 1466 case 8: 1467 return s.constInt64(n.Type, i) 1468 default: 1469 s.Fatalf("bad integer size %d", n.Type.Size()) 1470 return nil 1471 } 1472 case string: 1473 if u == "" { 1474 return s.constEmptyString(n.Type) 1475 } 1476 return s.entryNewValue0A(ssa.OpConstString, n.Type, u) 1477 case bool: 1478 return s.constBool(u) 1479 case *NilVal: 1480 t := n.Type 1481 switch { 1482 case t.IsSlice(): 1483 return s.constSlice(t) 1484 case t.IsInterface(): 1485 return s.constInterface(t) 1486 default: 1487 return s.constNil(t) 1488 } 1489 case *Mpflt: 1490 switch n.Type.Size() { 1491 case 4: 1492 return s.constFloat32(n.Type, u.Float32()) 1493 case 8: 1494 return s.constFloat64(n.Type, u.Float64()) 1495 default: 1496 s.Fatalf("bad float size %d", n.Type.Size()) 1497 return nil 1498 } 1499 case *Mpcplx: 1500 r := &u.Real 1501 i := &u.Imag 1502 switch n.Type.Size() { 1503 case 8: 1504 pt := Types[TFLOAT32] 1505 return s.newValue2(ssa.OpComplexMake, n.Type, 1506 s.constFloat32(pt, r.Float32()), 1507 s.constFloat32(pt, i.Float32())) 1508 case 16: 1509 pt := Types[TFLOAT64] 1510 return s.newValue2(ssa.OpComplexMake, n.Type, 1511 s.constFloat64(pt, r.Float64()), 1512 s.constFloat64(pt, i.Float64())) 1513 default: 1514 s.Fatalf("bad float size %d", n.Type.Size()) 1515 return nil 1516 } 1517 1518 default: 1519 s.Unimplementedf("unhandled OLITERAL %v", n.Val().Ctype()) 1520 return nil 1521 } 1522 case OCONVNOP: 1523 to := n.Type 1524 from := n.Left.Type 1525 1526 // Assume everything will work out, so set up our return value. 1527 // Anything interesting that happens from here is a fatal. 1528 x := s.expr(n.Left) 1529 1530 // Special case for not confusing GC and liveness. 1531 // We don't want pointers accidentally classified 1532 // as not-pointers or vice-versa because of copy 1533 // elision. 1534 if to.IsPtrShaped() != from.IsPtrShaped() { 1535 return s.newValue2(ssa.OpConvert, to, x, s.mem()) 1536 } 1537 1538 v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type 1539 1540 // CONVNOP closure 1541 if to.Etype == TFUNC && from.IsPtrShaped() { 1542 return v 1543 } 1544 1545 // named <--> unnamed type or typed <--> untyped const 1546 if from.Etype == to.Etype { 1547 return v 1548 } 1549 1550 // unsafe.Pointer <--> *T 1551 if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() { 1552 return v 1553 } 1554 1555 dowidth(from) 1556 dowidth(to) 1557 if from.Width != to.Width { 1558 s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width) 1559 return nil 1560 } 1561 if etypesign(from.Etype) != etypesign(to.Etype) { 1562 s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype) 1563 return nil 1564 } 1565 1566 if instrumenting { 1567 // These appear to be fine, but they fail the 1568 // integer constraint below, so okay them here. 1569 // Sample non-integer conversion: map[string]string -> *uint8 1570 return v 1571 } 1572 1573 if etypesign(from.Etype) == 0 { 1574 s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to) 1575 return nil 1576 } 1577 1578 // integer, same width, same sign 1579 return v 1580 1581 case OCONV: 1582 x := s.expr(n.Left) 1583 ft := n.Left.Type // from type 1584 tt := n.Type // to type 1585 if ft.IsInteger() && tt.IsInteger() { 1586 var op ssa.Op 1587 if tt.Size() == ft.Size() { 1588 op = ssa.OpCopy 1589 } else if tt.Size() < ft.Size() { 1590 // truncation 1591 switch 10*ft.Size() + tt.Size() { 1592 case 21: 1593 op = ssa.OpTrunc16to8 1594 case 41: 1595 op = ssa.OpTrunc32to8 1596 case 42: 1597 op = ssa.OpTrunc32to16 1598 case 81: 1599 op = ssa.OpTrunc64to8 1600 case 82: 1601 op = ssa.OpTrunc64to16 1602 case 84: 1603 op = ssa.OpTrunc64to32 1604 default: 1605 s.Fatalf("weird integer truncation %s -> %s", ft, tt) 1606 } 1607 } else if ft.IsSigned() { 1608 // sign extension 1609 switch 10*ft.Size() + tt.Size() { 1610 case 12: 1611 op = ssa.OpSignExt8to16 1612 case 14: 1613 op = ssa.OpSignExt8to32 1614 case 18: 1615 op = ssa.OpSignExt8to64 1616 case 24: 1617 op = ssa.OpSignExt16to32 1618 case 28: 1619 op = ssa.OpSignExt16to64 1620 case 48: 1621 op = ssa.OpSignExt32to64 1622 default: 1623 s.Fatalf("bad integer sign extension %s -> %s", ft, tt) 1624 } 1625 } else { 1626 // zero extension 1627 switch 10*ft.Size() + tt.Size() { 1628 case 12: 1629 op = ssa.OpZeroExt8to16 1630 case 14: 1631 op = ssa.OpZeroExt8to32 1632 case 18: 1633 op = ssa.OpZeroExt8to64 1634 case 24: 1635 op = ssa.OpZeroExt16to32 1636 case 28: 1637 op = ssa.OpZeroExt16to64 1638 case 48: 1639 op = ssa.OpZeroExt32to64 1640 default: 1641 s.Fatalf("weird integer sign extension %s -> %s", ft, tt) 1642 } 1643 } 1644 return s.newValue1(op, n.Type, x) 1645 } 1646 1647 if ft.IsFloat() || tt.IsFloat() { 1648 conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}] 1649 if !ok { 1650 s.Fatalf("weird float conversion %s -> %s", ft, tt) 1651 } 1652 op1, op2, it := conv.op1, conv.op2, conv.intermediateType 1653 1654 if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid { 1655 // normal case, not tripping over unsigned 64 1656 if op1 == ssa.OpCopy { 1657 if op2 == ssa.OpCopy { 1658 return x 1659 } 1660 return s.newValue1(op2, n.Type, x) 1661 } 1662 if op2 == ssa.OpCopy { 1663 return s.newValue1(op1, n.Type, x) 1664 } 1665 return s.newValue1(op2, n.Type, s.newValue1(op1, Types[it], x)) 1666 } 1667 // Tricky 64-bit unsigned cases. 1668 if ft.IsInteger() { 1669 // therefore tt is float32 or float64, and ft is also unsigned 1670 if tt.Size() == 4 { 1671 return s.uint64Tofloat32(n, x, ft, tt) 1672 } 1673 if tt.Size() == 8 { 1674 return s.uint64Tofloat64(n, x, ft, tt) 1675 } 1676 s.Fatalf("weird unsigned integer to float conversion %s -> %s", ft, tt) 1677 } 1678 // therefore ft is float32 or float64, and tt is unsigned integer 1679 if ft.Size() == 4 { 1680 return s.float32ToUint64(n, x, ft, tt) 1681 } 1682 if ft.Size() == 8 { 1683 return s.float64ToUint64(n, x, ft, tt) 1684 } 1685 s.Fatalf("weird float to unsigned integer conversion %s -> %s", ft, tt) 1686 return nil 1687 } 1688 1689 if ft.IsComplex() && tt.IsComplex() { 1690 var op ssa.Op 1691 if ft.Size() == tt.Size() { 1692 op = ssa.OpCopy 1693 } else if ft.Size() == 8 && tt.Size() == 16 { 1694 op = ssa.OpCvt32Fto64F 1695 } else if ft.Size() == 16 && tt.Size() == 8 { 1696 op = ssa.OpCvt64Fto32F 1697 } else { 1698 s.Fatalf("weird complex conversion %s -> %s", ft, tt) 1699 } 1700 ftp := floatForComplex(ft) 1701 ttp := floatForComplex(tt) 1702 return s.newValue2(ssa.OpComplexMake, tt, 1703 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)), 1704 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x))) 1705 } 1706 1707 s.Unimplementedf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype) 1708 return nil 1709 1710 case ODOTTYPE: 1711 res, _ := s.dottype(n, false) 1712 return res 1713 1714 // binary ops 1715 case OLT, OEQ, ONE, OLE, OGE, OGT: 1716 a := s.expr(n.Left) 1717 b := s.expr(n.Right) 1718 if n.Left.Type.IsComplex() { 1719 pt := floatForComplex(n.Left.Type) 1720 op := s.ssaOp(OEQ, pt) 1721 r := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)) 1722 i := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)) 1723 c := s.newValue2(ssa.OpAnd8, Types[TBOOL], r, i) 1724 switch n.Op { 1725 case OEQ: 1726 return c 1727 case ONE: 1728 return s.newValue1(ssa.OpNot, Types[TBOOL], c) 1729 default: 1730 s.Fatalf("ordered complex compare %s", n.Op) 1731 } 1732 } 1733 return s.newValue2(s.ssaOp(n.Op, n.Left.Type), Types[TBOOL], a, b) 1734 case OMUL: 1735 a := s.expr(n.Left) 1736 b := s.expr(n.Right) 1737 if n.Type.IsComplex() { 1738 mulop := ssa.OpMul64F 1739 addop := ssa.OpAdd64F 1740 subop := ssa.OpSub64F 1741 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1742 wt := Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 1743 1744 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1745 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1746 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1747 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1748 1749 if pt != wt { // Widen for calculation 1750 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal) 1751 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal) 1752 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag) 1753 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag) 1754 } 1755 1756 xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag)) 1757 ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal)) 1758 1759 if pt != wt { // Narrow to store back 1760 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal) 1761 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag) 1762 } 1763 1764 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1765 } 1766 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1767 1768 case ODIV: 1769 a := s.expr(n.Left) 1770 b := s.expr(n.Right) 1771 if n.Type.IsComplex() { 1772 // TODO this is not executed because the front-end substitutes a runtime call. 1773 // That probably ought to change; with modest optimization the widen/narrow 1774 // conversions could all be elided in larger expression trees. 1775 mulop := ssa.OpMul64F 1776 addop := ssa.OpAdd64F 1777 subop := ssa.OpSub64F 1778 divop := ssa.OpDiv64F 1779 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1780 wt := Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 1781 1782 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1783 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1784 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1785 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1786 1787 if pt != wt { // Widen for calculation 1788 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal) 1789 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal) 1790 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag) 1791 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag) 1792 } 1793 1794 denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag)) 1795 xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag)) 1796 ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag)) 1797 1798 // TODO not sure if this is best done in wide precision or narrow 1799 // Double-rounding might be an issue. 1800 // Note that the pre-SSA implementation does the entire calculation 1801 // in wide format, so wide is compatible. 1802 xreal = s.newValue2(divop, wt, xreal, denom) 1803 ximag = s.newValue2(divop, wt, ximag, denom) 1804 1805 if pt != wt { // Narrow to store back 1806 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal) 1807 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag) 1808 } 1809 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1810 } 1811 if n.Type.IsFloat() { 1812 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1813 } else { 1814 // do a size-appropriate check for zero 1815 cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type)) 1816 s.check(cmp, panicdivide) 1817 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1818 } 1819 case OMOD: 1820 a := s.expr(n.Left) 1821 b := s.expr(n.Right) 1822 // do a size-appropriate check for zero 1823 cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type)) 1824 s.check(cmp, panicdivide) 1825 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1826 case OADD, OSUB: 1827 a := s.expr(n.Left) 1828 b := s.expr(n.Right) 1829 if n.Type.IsComplex() { 1830 pt := floatForComplex(n.Type) 1831 op := s.ssaOp(n.Op, pt) 1832 return s.newValue2(ssa.OpComplexMake, n.Type, 1833 s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)), 1834 s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))) 1835 } 1836 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1837 case OAND, OOR, OHMUL, OXOR: 1838 a := s.expr(n.Left) 1839 b := s.expr(n.Right) 1840 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1841 case OLSH, ORSH: 1842 a := s.expr(n.Left) 1843 b := s.expr(n.Right) 1844 return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b) 1845 case OLROT: 1846 a := s.expr(n.Left) 1847 i := n.Right.Int64() 1848 if i <= 0 || i >= n.Type.Size()*8 { 1849 s.Fatalf("Wrong rotate distance for LROT, expected 1 through %d, saw %d", n.Type.Size()*8-1, i) 1850 } 1851 return s.newValue1I(s.ssaRotateOp(n.Op, n.Type), a.Type, i, a) 1852 case OANDAND, OOROR: 1853 // To implement OANDAND (and OOROR), we introduce a 1854 // new temporary variable to hold the result. The 1855 // variable is associated with the OANDAND node in the 1856 // s.vars table (normally variables are only 1857 // associated with ONAME nodes). We convert 1858 // A && B 1859 // to 1860 // var = A 1861 // if var { 1862 // var = B 1863 // } 1864 // Using var in the subsequent block introduces the 1865 // necessary phi variable. 1866 el := s.expr(n.Left) 1867 s.vars[n] = el 1868 1869 b := s.endBlock() 1870 b.Kind = ssa.BlockIf 1871 b.SetControl(el) 1872 // In theory, we should set b.Likely here based on context. 1873 // However, gc only gives us likeliness hints 1874 // in a single place, for plain OIF statements, 1875 // and passing around context is finnicky, so don't bother for now. 1876 1877 bRight := s.f.NewBlock(ssa.BlockPlain) 1878 bResult := s.f.NewBlock(ssa.BlockPlain) 1879 if n.Op == OANDAND { 1880 b.AddEdgeTo(bRight) 1881 b.AddEdgeTo(bResult) 1882 } else if n.Op == OOROR { 1883 b.AddEdgeTo(bResult) 1884 b.AddEdgeTo(bRight) 1885 } 1886 1887 s.startBlock(bRight) 1888 er := s.expr(n.Right) 1889 s.vars[n] = er 1890 1891 b = s.endBlock() 1892 b.AddEdgeTo(bResult) 1893 1894 s.startBlock(bResult) 1895 return s.variable(n, Types[TBOOL]) 1896 case OCOMPLEX: 1897 r := s.expr(n.Left) 1898 i := s.expr(n.Right) 1899 return s.newValue2(ssa.OpComplexMake, n.Type, r, i) 1900 1901 // unary ops 1902 case OMINUS: 1903 a := s.expr(n.Left) 1904 if n.Type.IsComplex() { 1905 tp := floatForComplex(n.Type) 1906 negop := s.ssaOp(n.Op, tp) 1907 return s.newValue2(ssa.OpComplexMake, n.Type, 1908 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)), 1909 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a))) 1910 } 1911 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 1912 case ONOT, OCOM, OSQRT: 1913 a := s.expr(n.Left) 1914 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 1915 case OIMAG, OREAL: 1916 a := s.expr(n.Left) 1917 return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a) 1918 case OPLUS: 1919 return s.expr(n.Left) 1920 1921 case OADDR: 1922 a, _ := s.addr(n.Left, n.Bounded) 1923 // Note we know the volatile result is false because you can't write &f() in Go. 1924 return a 1925 1926 case OINDREG: 1927 if int(n.Reg) != Thearch.REGSP { 1928 s.Unimplementedf("OINDREG of non-SP register %s in expr: %v", obj.Rconv(int(n.Reg)), n) 1929 return nil 1930 } 1931 addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(n.Type), n.Xoffset, s.sp) 1932 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1933 1934 case OIND: 1935 p := s.exprPtr(n.Left, false, n.Lineno) 1936 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1937 1938 case ODOT: 1939 t := n.Left.Type 1940 if canSSAType(t) { 1941 v := s.expr(n.Left) 1942 return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v) 1943 } 1944 p, _ := s.addr(n, false) 1945 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1946 1947 case ODOTPTR: 1948 p := s.exprPtr(n.Left, false, n.Lineno) 1949 p = s.newValue1I(ssa.OpOffPtr, p.Type, n.Xoffset, p) 1950 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1951 1952 case OINDEX: 1953 switch { 1954 case n.Left.Type.IsString(): 1955 a := s.expr(n.Left) 1956 i := s.expr(n.Right) 1957 i = s.extendIndex(i) 1958 if !n.Bounded { 1959 len := s.newValue1(ssa.OpStringLen, Types[TINT], a) 1960 s.boundsCheck(i, len) 1961 } 1962 ptrtyp := Ptrto(Types[TUINT8]) 1963 ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a) 1964 if Isconst(n.Right, CTINT) { 1965 ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr) 1966 } else { 1967 ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i) 1968 } 1969 return s.newValue2(ssa.OpLoad, Types[TUINT8], ptr, s.mem()) 1970 case n.Left.Type.IsSlice(): 1971 p, _ := s.addr(n, false) 1972 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 1973 case n.Left.Type.IsArray(): 1974 // TODO: fix when we can SSA arrays of length 1. 1975 p, _ := s.addr(n, false) 1976 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 1977 default: 1978 s.Fatalf("bad type for index %v", n.Left.Type) 1979 return nil 1980 } 1981 1982 case OLEN, OCAP: 1983 switch { 1984 case n.Left.Type.IsSlice(): 1985 op := ssa.OpSliceLen 1986 if n.Op == OCAP { 1987 op = ssa.OpSliceCap 1988 } 1989 return s.newValue1(op, Types[TINT], s.expr(n.Left)) 1990 case n.Left.Type.IsString(): // string; not reachable for OCAP 1991 return s.newValue1(ssa.OpStringLen, Types[TINT], s.expr(n.Left)) 1992 case n.Left.Type.IsMap(), n.Left.Type.IsChan(): 1993 return s.referenceTypeBuiltin(n, s.expr(n.Left)) 1994 default: // array 1995 return s.constInt(Types[TINT], n.Left.Type.NumElem()) 1996 } 1997 1998 case OSPTR: 1999 a := s.expr(n.Left) 2000 if n.Left.Type.IsSlice() { 2001 return s.newValue1(ssa.OpSlicePtr, n.Type, a) 2002 } else { 2003 return s.newValue1(ssa.OpStringPtr, n.Type, a) 2004 } 2005 2006 case OITAB: 2007 a := s.expr(n.Left) 2008 return s.newValue1(ssa.OpITab, n.Type, a) 2009 2010 case OEFACE: 2011 tab := s.expr(n.Left) 2012 data := s.expr(n.Right) 2013 // The frontend allows putting things like struct{*byte} in 2014 // the data portion of an eface. But we don't want struct{*byte} 2015 // as a register type because (among other reasons) the liveness 2016 // analysis is confused by the "fat" variables that result from 2017 // such types being spilled. 2018 // So here we ensure that we are selecting the underlying pointer 2019 // when we build an eface. 2020 // TODO: get rid of this now that structs can be SSA'd? 2021 for !data.Type.IsPtrShaped() { 2022 switch { 2023 case data.Type.IsArray(): 2024 data = s.newValue1I(ssa.OpArrayIndex, data.Type.ElemType(), 0, data) 2025 case data.Type.IsStruct(): 2026 for i := data.Type.NumFields() - 1; i >= 0; i-- { 2027 f := data.Type.FieldType(i) 2028 if f.Size() == 0 { 2029 // eface type could also be struct{p *byte; q [0]int} 2030 continue 2031 } 2032 data = s.newValue1I(ssa.OpStructSelect, f, int64(i), data) 2033 break 2034 } 2035 default: 2036 s.Fatalf("type being put into an eface isn't a pointer") 2037 } 2038 } 2039 return s.newValue2(ssa.OpIMake, n.Type, tab, data) 2040 2041 case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR: 2042 v := s.expr(n.Left) 2043 var i, j, k *ssa.Value 2044 low, high, max := n.SliceBounds() 2045 if low != nil { 2046 i = s.extendIndex(s.expr(low)) 2047 } 2048 if high != nil { 2049 j = s.extendIndex(s.expr(high)) 2050 } 2051 if max != nil { 2052 k = s.extendIndex(s.expr(max)) 2053 } 2054 p, l, c := s.slice(n.Left.Type, v, i, j, k) 2055 return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c) 2056 2057 case OSLICESTR: 2058 v := s.expr(n.Left) 2059 var i, j *ssa.Value 2060 low, high, _ := n.SliceBounds() 2061 if low != nil { 2062 i = s.extendIndex(s.expr(low)) 2063 } 2064 if high != nil { 2065 j = s.extendIndex(s.expr(high)) 2066 } 2067 p, l, _ := s.slice(n.Left.Type, v, i, j, nil) 2068 return s.newValue2(ssa.OpStringMake, n.Type, p, l) 2069 2070 case OCALLFUNC: 2071 if isIntrinsicCall1(n) { 2072 return s.intrinsicCall1(n) 2073 } 2074 fallthrough 2075 2076 case OCALLINTER, OCALLMETH: 2077 a := s.call(n, callNormal) 2078 return s.newValue2(ssa.OpLoad, n.Type, a, s.mem()) 2079 2080 case OGETG: 2081 return s.newValue1(ssa.OpGetG, n.Type, s.mem()) 2082 2083 case OAPPEND: 2084 return s.append(n, false) 2085 2086 default: 2087 s.Unimplementedf("unhandled expr %s", n.Op) 2088 return nil 2089 } 2090 } 2091 2092 // append converts an OAPPEND node to SSA. 2093 // If inplace is false, it converts the OAPPEND expression n to an ssa.Value, 2094 // adds it to s, and returns the Value. 2095 // If inplace is true, it writes the result of the OAPPEND expression n 2096 // back to the slice being appended to, and returns nil. 2097 // inplace MUST be set to false if the slice can be SSA'd. 2098 func (s *state) append(n *Node, inplace bool) *ssa.Value { 2099 // If inplace is false, process as expression "append(s, e1, e2, e3)": 2100 // 2101 // ptr, len, cap := s 2102 // newlen := len + 3 2103 // if newlen > cap { 2104 // ptr, len, cap = growslice(s, newlen) 2105 // newlen = len + 3 // recalculate to avoid a spill 2106 // } 2107 // // with write barriers, if needed: 2108 // *(ptr+len) = e1 2109 // *(ptr+len+1) = e2 2110 // *(ptr+len+2) = e3 2111 // return makeslice(ptr, newlen, cap) 2112 // 2113 // 2114 // If inplace is true, process as statement "s = append(s, e1, e2, e3)": 2115 // 2116 // a := &s 2117 // ptr, len, cap := s 2118 // newlen := len + 3 2119 // if newlen > cap { 2120 // newptr, len, newcap = growslice(ptr, len, cap, newlen) 2121 // vardef(a) // if necessary, advise liveness we are writing a new a 2122 // *a.cap = newcap // write before ptr to avoid a spill 2123 // *a.ptr = newptr // with write barrier 2124 // } 2125 // newlen = len + 3 // recalculate to avoid a spill 2126 // *a.len = newlen 2127 // // with write barriers, if needed: 2128 // *(ptr+len) = e1 2129 // *(ptr+len+1) = e2 2130 // *(ptr+len+2) = e3 2131 2132 et := n.Type.Elem() 2133 pt := Ptrto(et) 2134 2135 // Evaluate slice 2136 sn := n.List.First() // the slice node is the first in the list 2137 2138 var slice, addr *ssa.Value 2139 if inplace { 2140 addr, _ = s.addr(sn, false) 2141 slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 2142 } else { 2143 slice = s.expr(sn) 2144 } 2145 2146 // Allocate new blocks 2147 grow := s.f.NewBlock(ssa.BlockPlain) 2148 assign := s.f.NewBlock(ssa.BlockPlain) 2149 2150 // Decide if we need to grow 2151 nargs := int64(n.List.Len() - 1) 2152 p := s.newValue1(ssa.OpSlicePtr, pt, slice) 2153 l := s.newValue1(ssa.OpSliceLen, Types[TINT], slice) 2154 c := s.newValue1(ssa.OpSliceCap, Types[TINT], slice) 2155 nl := s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs)) 2156 2157 cmp := s.newValue2(s.ssaOp(OGT, Types[TINT]), Types[TBOOL], nl, c) 2158 s.vars[&ptrVar] = p 2159 2160 if !inplace { 2161 s.vars[&newlenVar] = nl 2162 s.vars[&capVar] = c 2163 } else { 2164 s.vars[&lenVar] = l 2165 } 2166 2167 b := s.endBlock() 2168 b.Kind = ssa.BlockIf 2169 b.Likely = ssa.BranchUnlikely 2170 b.SetControl(cmp) 2171 b.AddEdgeTo(grow) 2172 b.AddEdgeTo(assign) 2173 2174 // Call growslice 2175 s.startBlock(grow) 2176 taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(n.Type.Elem())}, s.sb) 2177 2178 r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl) 2179 2180 if inplace { 2181 if sn.Op == ONAME { 2182 // Tell liveness we're about to build a new slice 2183 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, sn, s.mem()) 2184 } 2185 capaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(Array_cap), addr) 2186 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capaddr, r[2], s.mem()) 2187 s.insertWBstore(pt, addr, r[0], n.Lineno, 0) 2188 // load the value we just stored to avoid having to spill it 2189 s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem()) 2190 s.vars[&lenVar] = r[1] // avoid a spill in the fast path 2191 } else { 2192 s.vars[&ptrVar] = r[0] 2193 s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], r[1], s.constInt(Types[TINT], nargs)) 2194 s.vars[&capVar] = r[2] 2195 } 2196 2197 b = s.endBlock() 2198 b.AddEdgeTo(assign) 2199 2200 // assign new elements to slots 2201 s.startBlock(assign) 2202 2203 if inplace { 2204 l = s.variable(&lenVar, Types[TINT]) // generates phi for len 2205 nl = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs)) 2206 lenaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(Array_nel), addr) 2207 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenaddr, nl, s.mem()) 2208 } 2209 2210 // Evaluate args 2211 type argRec struct { 2212 // if store is true, we're appending the value v. If false, we're appending the 2213 // value at *v. If store==false, isVolatile reports whether the source 2214 // is in the outargs section of the stack frame. 2215 v *ssa.Value 2216 store bool 2217 isVolatile bool 2218 } 2219 args := make([]argRec, 0, nargs) 2220 for _, n := range n.List.Slice()[1:] { 2221 if canSSAType(n.Type) { 2222 args = append(args, argRec{v: s.expr(n), store: true}) 2223 } else { 2224 v, isVolatile := s.addr(n, false) 2225 args = append(args, argRec{v: v, isVolatile: isVolatile}) 2226 } 2227 } 2228 2229 p = s.variable(&ptrVar, pt) // generates phi for ptr 2230 if !inplace { 2231 nl = s.variable(&newlenVar, Types[TINT]) // generates phi for nl 2232 c = s.variable(&capVar, Types[TINT]) // generates phi for cap 2233 } 2234 p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l) 2235 // TODO: just one write barrier call for all of these writes? 2236 // TODO: maybe just one writeBarrier.enabled check? 2237 for i, arg := range args { 2238 addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(Types[TINT], int64(i))) 2239 if arg.store { 2240 if haspointers(et) { 2241 s.insertWBstore(et, addr, arg.v, n.Lineno, 0) 2242 } else { 2243 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, et.Size(), addr, arg.v, s.mem()) 2244 } 2245 } else { 2246 if haspointers(et) { 2247 s.insertWBmove(et, addr, arg.v, n.Lineno, arg.isVolatile) 2248 } else { 2249 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, et.Size(), addr, arg.v, s.mem()) 2250 } 2251 } 2252 } 2253 2254 delete(s.vars, &ptrVar) 2255 if inplace { 2256 delete(s.vars, &lenVar) 2257 return nil 2258 } 2259 delete(s.vars, &newlenVar) 2260 delete(s.vars, &capVar) 2261 // make result 2262 return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c) 2263 } 2264 2265 // condBranch evaluates the boolean expression cond and branches to yes 2266 // if cond is true and no if cond is false. 2267 // This function is intended to handle && and || better than just calling 2268 // s.expr(cond) and branching on the result. 2269 func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) { 2270 if cond.Op == OANDAND { 2271 mid := s.f.NewBlock(ssa.BlockPlain) 2272 s.stmtList(cond.Ninit) 2273 s.condBranch(cond.Left, mid, no, max8(likely, 0)) 2274 s.startBlock(mid) 2275 s.condBranch(cond.Right, yes, no, likely) 2276 return 2277 // Note: if likely==1, then both recursive calls pass 1. 2278 // If likely==-1, then we don't have enough information to decide 2279 // whether the first branch is likely or not. So we pass 0 for 2280 // the likeliness of the first branch. 2281 // TODO: have the frontend give us branch prediction hints for 2282 // OANDAND and OOROR nodes (if it ever has such info). 2283 } 2284 if cond.Op == OOROR { 2285 mid := s.f.NewBlock(ssa.BlockPlain) 2286 s.stmtList(cond.Ninit) 2287 s.condBranch(cond.Left, yes, mid, min8(likely, 0)) 2288 s.startBlock(mid) 2289 s.condBranch(cond.Right, yes, no, likely) 2290 return 2291 // Note: if likely==-1, then both recursive calls pass -1. 2292 // If likely==1, then we don't have enough info to decide 2293 // the likelihood of the first branch. 2294 } 2295 if cond.Op == ONOT { 2296 s.stmtList(cond.Ninit) 2297 s.condBranch(cond.Left, no, yes, -likely) 2298 return 2299 } 2300 c := s.expr(cond) 2301 b := s.endBlock() 2302 b.Kind = ssa.BlockIf 2303 b.SetControl(c) 2304 b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness 2305 b.AddEdgeTo(yes) 2306 b.AddEdgeTo(no) 2307 } 2308 2309 type skipMask uint8 2310 2311 const ( 2312 skipPtr skipMask = 1 << iota 2313 skipLen 2314 skipCap 2315 ) 2316 2317 // assign does left = right. 2318 // Right has already been evaluated to ssa, left has not. 2319 // If deref is true, then we do left = *right instead (and right has already been nil-checked). 2320 // If deref is true and right == nil, just do left = 0. 2321 // If deref is true, rightIsVolatile reports whether right points to volatile (clobbered by a call) storage. 2322 // Include a write barrier if wb is true. 2323 // skip indicates assignments (at the top level) that can be avoided. 2324 func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, line int32, skip skipMask, rightIsVolatile bool) { 2325 if left.Op == ONAME && isblank(left) { 2326 return 2327 } 2328 t := left.Type 2329 dowidth(t) 2330 if s.canSSA(left) { 2331 if deref { 2332 s.Fatalf("can SSA LHS %s but not RHS %s", left, right) 2333 } 2334 if left.Op == ODOT { 2335 // We're assigning to a field of an ssa-able value. 2336 // We need to build a new structure with the new value for the 2337 // field we're assigning and the old values for the other fields. 2338 // For instance: 2339 // type T struct {a, b, c int} 2340 // var T x 2341 // x.b = 5 2342 // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c} 2343 2344 // Grab information about the structure type. 2345 t := left.Left.Type 2346 nf := t.NumFields() 2347 idx := fieldIdx(left) 2348 2349 // Grab old value of structure. 2350 old := s.expr(left.Left) 2351 2352 // Make new structure. 2353 new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t) 2354 2355 // Add fields as args. 2356 for i := 0; i < nf; i++ { 2357 if i == idx { 2358 new.AddArg(right) 2359 } else { 2360 new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old)) 2361 } 2362 } 2363 2364 // Recursively assign the new value we've made to the base of the dot op. 2365 s.assign(left.Left, new, false, false, line, 0, rightIsVolatile) 2366 // TODO: do we need to update named values here? 2367 return 2368 } 2369 // Update variable assignment. 2370 s.vars[left] = right 2371 s.addNamedValue(left, right) 2372 return 2373 } 2374 // Left is not ssa-able. Compute its address. 2375 addr, _ := s.addr(left, false) 2376 if left.Op == ONAME && skip == 0 { 2377 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, left, s.mem()) 2378 } 2379 if deref { 2380 // Treat as a mem->mem move. 2381 if right == nil { 2382 s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, t.Size(), addr, s.mem()) 2383 return 2384 } 2385 if wb { 2386 s.insertWBmove(t, addr, right, line, rightIsVolatile) 2387 return 2388 } 2389 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), addr, right, s.mem()) 2390 return 2391 } 2392 // Treat as a store. 2393 if wb { 2394 if skip&skipPtr != 0 { 2395 // Special case: if we don't write back the pointers, don't bother 2396 // doing the write barrier check. 2397 s.storeTypeScalars(t, addr, right, skip) 2398 return 2399 } 2400 s.insertWBstore(t, addr, right, line, skip) 2401 return 2402 } 2403 if skip != 0 { 2404 if skip&skipPtr == 0 { 2405 s.storeTypePtrs(t, addr, right) 2406 } 2407 s.storeTypeScalars(t, addr, right, skip) 2408 return 2409 } 2410 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), addr, right, s.mem()) 2411 } 2412 2413 // zeroVal returns the zero value for type t. 2414 func (s *state) zeroVal(t *Type) *ssa.Value { 2415 switch { 2416 case t.IsInteger(): 2417 switch t.Size() { 2418 case 1: 2419 return s.constInt8(t, 0) 2420 case 2: 2421 return s.constInt16(t, 0) 2422 case 4: 2423 return s.constInt32(t, 0) 2424 case 8: 2425 return s.constInt64(t, 0) 2426 default: 2427 s.Fatalf("bad sized integer type %s", t) 2428 } 2429 case t.IsFloat(): 2430 switch t.Size() { 2431 case 4: 2432 return s.constFloat32(t, 0) 2433 case 8: 2434 return s.constFloat64(t, 0) 2435 default: 2436 s.Fatalf("bad sized float type %s", t) 2437 } 2438 case t.IsComplex(): 2439 switch t.Size() { 2440 case 8: 2441 z := s.constFloat32(Types[TFLOAT32], 0) 2442 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2443 case 16: 2444 z := s.constFloat64(Types[TFLOAT64], 0) 2445 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2446 default: 2447 s.Fatalf("bad sized complex type %s", t) 2448 } 2449 2450 case t.IsString(): 2451 return s.constEmptyString(t) 2452 case t.IsPtrShaped(): 2453 return s.constNil(t) 2454 case t.IsBoolean(): 2455 return s.constBool(false) 2456 case t.IsInterface(): 2457 return s.constInterface(t) 2458 case t.IsSlice(): 2459 return s.constSlice(t) 2460 case t.IsStruct(): 2461 n := t.NumFields() 2462 v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t) 2463 for i := 0; i < n; i++ { 2464 v.AddArg(s.zeroVal(t.FieldType(i).(*Type))) 2465 } 2466 return v 2467 } 2468 s.Unimplementedf("zero for type %v not implemented", t) 2469 return nil 2470 } 2471 2472 type callKind int8 2473 2474 const ( 2475 callNormal callKind = iota 2476 callDefer 2477 callGo 2478 ) 2479 2480 // isSSAIntrinsic1 returns true if n is a call to a recognized 1-arg intrinsic 2481 // that can be handled by the SSA backend. 2482 // SSA uses this, but so does the front end to see if should not 2483 // inline a function because it is a candidate for intrinsic 2484 // substitution. 2485 func isSSAIntrinsic1(s *Sym) bool { 2486 // The test below is not quite accurate -- in the event that 2487 // a function is disabled on a per-function basis, for example 2488 // because of hash-keyed binary failure search, SSA might be 2489 // disabled for that function but it would not be noted here, 2490 // and thus an inlining would not occur (in practice, inlining 2491 // so far has only been noticed for Bswap32 and the 16-bit count 2492 // leading/trailing instructions, but heuristics might change 2493 // in the future or on different architectures). 2494 if !ssaEnabled || ssa.IntrinsicsDisable || Thearch.LinkArch.Family != sys.AMD64 { 2495 return false 2496 } 2497 if s != nil && s.Pkg != nil && s.Pkg.Path == "runtime/internal/sys" { 2498 switch s.Name { 2499 case 2500 "Ctz64", "Ctz32", "Ctz16", 2501 "Bswap64", "Bswap32": 2502 return true 2503 } 2504 } 2505 return false 2506 } 2507 2508 func isIntrinsicCall1(n *Node) bool { 2509 if n == nil || n.Left == nil { 2510 return false 2511 } 2512 return isSSAIntrinsic1(n.Left.Sym) 2513 } 2514 2515 // intrinsicFirstArg extracts arg from n.List and eval 2516 func (s *state) intrinsicFirstArg(n *Node) *ssa.Value { 2517 x := n.List.First() 2518 if x.Op == OAS { 2519 x = x.Right 2520 } 2521 return s.expr(x) 2522 } 2523 2524 // intrinsicCall1 converts a call to a recognized 1-arg intrinsic 2525 // into the intrinsic 2526 func (s *state) intrinsicCall1(n *Node) *ssa.Value { 2527 var result *ssa.Value 2528 switch n.Left.Sym.Name { 2529 case "Ctz64": 2530 result = s.newValue1(ssa.OpCtz64, Types[TUINT64], s.intrinsicFirstArg(n)) 2531 case "Ctz32": 2532 result = s.newValue1(ssa.OpCtz32, Types[TUINT32], s.intrinsicFirstArg(n)) 2533 case "Ctz16": 2534 result = s.newValue1(ssa.OpCtz16, Types[TUINT16], s.intrinsicFirstArg(n)) 2535 case "Bswap64": 2536 result = s.newValue1(ssa.OpBswap64, Types[TUINT64], s.intrinsicFirstArg(n)) 2537 case "Bswap32": 2538 result = s.newValue1(ssa.OpBswap32, Types[TUINT32], s.intrinsicFirstArg(n)) 2539 } 2540 if result == nil { 2541 Fatalf("Unknown special call: %v", n.Left.Sym) 2542 } 2543 if ssa.IntrinsicsDebug > 0 { 2544 Warnl(n.Lineno, "intrinsic substitution for %v with %s", n.Left.Sym.Name, result.LongString()) 2545 } 2546 return result 2547 } 2548 2549 // Calls the function n using the specified call type. 2550 // Returns the address of the return value (or nil if none). 2551 func (s *state) call(n *Node, k callKind) *ssa.Value { 2552 var sym *Sym // target symbol (if static) 2553 var closure *ssa.Value // ptr to closure to run (if dynamic) 2554 var codeptr *ssa.Value // ptr to target code (if dynamic) 2555 var rcvr *ssa.Value // receiver to set 2556 fn := n.Left 2557 switch n.Op { 2558 case OCALLFUNC: 2559 if k == callNormal && fn.Op == ONAME && fn.Class == PFUNC { 2560 sym = fn.Sym 2561 break 2562 } 2563 closure = s.expr(fn) 2564 case OCALLMETH: 2565 if fn.Op != ODOTMETH { 2566 Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) 2567 } 2568 if k == callNormal { 2569 sym = fn.Sym 2570 break 2571 } 2572 n2 := newname(fn.Sym) 2573 n2.Class = PFUNC 2574 n2.Lineno = fn.Lineno 2575 closure = s.expr(n2) 2576 // Note: receiver is already assigned in n.List, so we don't 2577 // want to set it here. 2578 case OCALLINTER: 2579 if fn.Op != ODOTINTER { 2580 Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op) 2581 } 2582 i := s.expr(fn.Left) 2583 itab := s.newValue1(ssa.OpITab, Types[TUINTPTR], i) 2584 if k != callNormal { 2585 s.nilCheck(itab) 2586 } 2587 itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab 2588 itab = s.newValue1I(ssa.OpOffPtr, Types[TUINTPTR], itabidx, itab) 2589 if k == callNormal { 2590 codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], itab, s.mem()) 2591 } else { 2592 closure = itab 2593 } 2594 rcvr = s.newValue1(ssa.OpIData, Types[TUINTPTR], i) 2595 } 2596 dowidth(fn.Type) 2597 stksize := fn.Type.ArgWidth() // includes receiver 2598 2599 // Run all argument assignments. The arg slots have already 2600 // been offset by the appropriate amount (+2*widthptr for go/defer, 2601 // +widthptr for interface calls). 2602 // For OCALLMETH, the receiver is set in these statements. 2603 s.stmtList(n.List) 2604 2605 // Set receiver (for interface calls) 2606 if rcvr != nil { 2607 argStart := Ctxt.FixedFrameSize() 2608 if k != callNormal { 2609 argStart += int64(2 * Widthptr) 2610 } 2611 addr := s.entryNewValue1I(ssa.OpOffPtr, Types[TUINTPTR], argStart, s.sp) 2612 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, rcvr, s.mem()) 2613 } 2614 2615 // Defer/go args 2616 if k != callNormal { 2617 // Write argsize and closure (args to Newproc/Deferproc). 2618 argsize := s.constInt32(Types[TUINT32], int32(stksize)) 2619 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, 4, s.sp, argsize, s.mem()) 2620 addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(Types[TUINTPTR]), int64(Widthptr), s.sp) 2621 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, closure, s.mem()) 2622 stksize += 2 * int64(Widthptr) 2623 } 2624 2625 // call target 2626 bNext := s.f.NewBlock(ssa.BlockPlain) 2627 var call *ssa.Value 2628 switch { 2629 case k == callDefer: 2630 call = s.newValue1(ssa.OpDeferCall, ssa.TypeMem, s.mem()) 2631 case k == callGo: 2632 call = s.newValue1(ssa.OpGoCall, ssa.TypeMem, s.mem()) 2633 case closure != nil: 2634 codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], closure, s.mem()) 2635 call = s.newValue3(ssa.OpClosureCall, ssa.TypeMem, codeptr, closure, s.mem()) 2636 case codeptr != nil: 2637 call = s.newValue2(ssa.OpInterCall, ssa.TypeMem, codeptr, s.mem()) 2638 case sym != nil: 2639 call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, sym, s.mem()) 2640 default: 2641 Fatalf("bad call type %s %v", n.Op, n) 2642 } 2643 call.AuxInt = stksize // Call operations carry the argsize of the callee along with them 2644 2645 // Finish call block 2646 s.vars[&memVar] = call 2647 b := s.endBlock() 2648 b.Kind = ssa.BlockCall 2649 b.SetControl(call) 2650 b.AddEdgeTo(bNext) 2651 if k == callDefer { 2652 // Add recover edge to exit code. 2653 b.Kind = ssa.BlockDefer 2654 r := s.f.NewBlock(ssa.BlockPlain) 2655 s.startBlock(r) 2656 s.exit() 2657 b.AddEdgeTo(r) 2658 b.Likely = ssa.BranchLikely 2659 } 2660 2661 // Start exit block, find address of result. 2662 s.startBlock(bNext) 2663 // Keep input pointer args live across calls. This is a bandaid until 1.8. 2664 for _, n := range s.ptrargs { 2665 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, s.variable(n, n.Type), s.mem()) 2666 } 2667 res := n.Left.Type.Results() 2668 if res.NumFields() == 0 || k != callNormal { 2669 // call has no return value. Continue with the next statement. 2670 return nil 2671 } 2672 fp := res.Field(0) 2673 return s.entryNewValue1I(ssa.OpOffPtr, Ptrto(fp.Type), fp.Offset+Ctxt.FixedFrameSize(), s.sp) 2674 } 2675 2676 // etypesign returns the signed-ness of e, for integer/pointer etypes. 2677 // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer. 2678 func etypesign(e EType) int8 { 2679 switch e { 2680 case TINT8, TINT16, TINT32, TINT64, TINT: 2681 return -1 2682 case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR: 2683 return +1 2684 } 2685 return 0 2686 } 2687 2688 // lookupSymbol is used to retrieve the symbol (Extern, Arg or Auto) used for a particular node. 2689 // This improves the effectiveness of cse by using the same Aux values for the 2690 // same symbols. 2691 func (s *state) lookupSymbol(n *Node, sym interface{}) interface{} { 2692 switch sym.(type) { 2693 default: 2694 s.Fatalf("sym %v is of uknown type %T", sym, sym) 2695 case *ssa.ExternSymbol, *ssa.ArgSymbol, *ssa.AutoSymbol: 2696 // these are the only valid types 2697 } 2698 2699 if lsym, ok := s.varsyms[n]; ok { 2700 return lsym 2701 } else { 2702 s.varsyms[n] = sym 2703 return sym 2704 } 2705 } 2706 2707 // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result. 2708 // Also returns a bool reporting whether the returned value is "volatile", that is it 2709 // points to the outargs section and thus the referent will be clobbered by any call. 2710 // The value that the returned Value represents is guaranteed to be non-nil. 2711 // If bounded is true then this address does not require a nil check for its operand 2712 // even if that would otherwise be implied. 2713 func (s *state) addr(n *Node, bounded bool) (*ssa.Value, bool) { 2714 t := Ptrto(n.Type) 2715 switch n.Op { 2716 case ONAME: 2717 switch n.Class { 2718 case PEXTERN: 2719 // global variable 2720 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: n.Sym}) 2721 v := s.entryNewValue1A(ssa.OpAddr, t, aux, s.sb) 2722 // TODO: Make OpAddr use AuxInt as well as Aux. 2723 if n.Xoffset != 0 { 2724 v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v) 2725 } 2726 return v, false 2727 case PPARAM: 2728 // parameter slot 2729 v := s.decladdrs[n] 2730 if v != nil { 2731 return v, false 2732 } 2733 if n.String() == ".fp" { 2734 // Special arg that points to the frame pointer. 2735 // (Used by the race detector, others?) 2736 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n}) 2737 return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp), false 2738 } 2739 s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs) 2740 return nil, false 2741 case PAUTO: 2742 aux := s.lookupSymbol(n, &ssa.AutoSymbol{Typ: n.Type, Node: n}) 2743 return s.newValue1A(ssa.OpAddr, t, aux, s.sp), false 2744 case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early. 2745 // ensure that we reuse symbols for out parameters so 2746 // that cse works on their addresses 2747 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n}) 2748 return s.newValue1A(ssa.OpAddr, t, aux, s.sp), false 2749 default: 2750 s.Unimplementedf("variable address class %v not implemented", classnames[n.Class]) 2751 return nil, false 2752 } 2753 case OINDREG: 2754 // indirect off a register 2755 // used for storing/loading arguments/returns to/from callees 2756 if int(n.Reg) != Thearch.REGSP { 2757 s.Unimplementedf("OINDREG of non-SP register %s in addr: %v", obj.Rconv(int(n.Reg)), n) 2758 return nil, false 2759 } 2760 return s.entryNewValue1I(ssa.OpOffPtr, t, n.Xoffset, s.sp), true 2761 case OINDEX: 2762 if n.Left.Type.IsSlice() { 2763 a := s.expr(n.Left) 2764 i := s.expr(n.Right) 2765 i = s.extendIndex(i) 2766 len := s.newValue1(ssa.OpSliceLen, Types[TINT], a) 2767 if !n.Bounded { 2768 s.boundsCheck(i, len) 2769 } 2770 p := s.newValue1(ssa.OpSlicePtr, t, a) 2771 return s.newValue2(ssa.OpPtrIndex, t, p, i), false 2772 } else { // array 2773 a, isVolatile := s.addr(n.Left, bounded) 2774 i := s.expr(n.Right) 2775 i = s.extendIndex(i) 2776 len := s.constInt(Types[TINT], n.Left.Type.NumElem()) 2777 if !n.Bounded { 2778 s.boundsCheck(i, len) 2779 } 2780 return s.newValue2(ssa.OpPtrIndex, Ptrto(n.Left.Type.Elem()), a, i), isVolatile 2781 } 2782 case OIND: 2783 return s.exprPtr(n.Left, bounded, n.Lineno), false 2784 case ODOT: 2785 p, isVolatile := s.addr(n.Left, bounded) 2786 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p), isVolatile 2787 case ODOTPTR: 2788 p := s.exprPtr(n.Left, bounded, n.Lineno) 2789 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p), false 2790 case OCLOSUREVAR: 2791 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, 2792 s.entryNewValue0(ssa.OpGetClosurePtr, Ptrto(Types[TUINT8]))), false 2793 case OCONVNOP: 2794 addr, isVolatile := s.addr(n.Left, bounded) 2795 return s.newValue1(ssa.OpCopy, t, addr), isVolatile // ensure that addr has the right type 2796 case OCALLFUNC, OCALLINTER, OCALLMETH: 2797 return s.call(n, callNormal), true 2798 2799 default: 2800 s.Unimplementedf("unhandled addr %v", n.Op) 2801 return nil, false 2802 } 2803 } 2804 2805 // canSSA reports whether n is SSA-able. 2806 // n must be an ONAME (or an ODOT sequence with an ONAME base). 2807 func (s *state) canSSA(n *Node) bool { 2808 if Debug['N'] != 0 { 2809 return false 2810 } 2811 for n.Op == ODOT { 2812 n = n.Left 2813 } 2814 if n.Op != ONAME { 2815 return false 2816 } 2817 if n.Addrtaken { 2818 return false 2819 } 2820 if n.isParamHeapCopy() { 2821 return false 2822 } 2823 if n.Class == PAUTOHEAP { 2824 Fatalf("canSSA of PAUTOHEAP %v", n) 2825 } 2826 switch n.Class { 2827 case PEXTERN: 2828 return false 2829 case PPARAMOUT: 2830 if hasdefer { 2831 // TODO: handle this case? Named return values must be 2832 // in memory so that the deferred function can see them. 2833 // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false } 2834 return false 2835 } 2836 if s.cgoUnsafeArgs { 2837 // Cgo effectively takes the address of all result args, 2838 // but the compiler can't see that. 2839 return false 2840 } 2841 } 2842 if n.Class == PPARAM && n.String() == ".this" { 2843 // wrappers generated by genwrapper need to update 2844 // the .this pointer in place. 2845 // TODO: treat as a PPARMOUT? 2846 return false 2847 } 2848 return canSSAType(n.Type) 2849 // TODO: try to make more variables SSAable? 2850 } 2851 2852 // canSSA reports whether variables of type t are SSA-able. 2853 func canSSAType(t *Type) bool { 2854 dowidth(t) 2855 if t.Width > int64(4*Widthptr) { 2856 // 4*Widthptr is an arbitrary constant. We want it 2857 // to be at least 3*Widthptr so slices can be registerized. 2858 // Too big and we'll introduce too much register pressure. 2859 return false 2860 } 2861 switch t.Etype { 2862 case TARRAY: 2863 // We can't do arrays because dynamic indexing is 2864 // not supported on SSA variables. 2865 // TODO: maybe allow if length is <=1? All indexes 2866 // are constant? Might be good for the arrays 2867 // introduced by the compiler for variadic functions. 2868 return false 2869 case TSTRUCT: 2870 if t.NumFields() > ssa.MaxStruct { 2871 return false 2872 } 2873 for _, t1 := range t.Fields().Slice() { 2874 if !canSSAType(t1.Type) { 2875 return false 2876 } 2877 } 2878 return true 2879 default: 2880 return true 2881 } 2882 } 2883 2884 // exprPtr evaluates n to a pointer and nil-checks it. 2885 func (s *state) exprPtr(n *Node, bounded bool, lineno int32) *ssa.Value { 2886 p := s.expr(n) 2887 if bounded || n.NonNil { 2888 if s.f.Config.Debug_checknil() && lineno > 1 { 2889 s.f.Config.Warnl(lineno, "removed nil check") 2890 } 2891 return p 2892 } 2893 s.nilCheck(p) 2894 return p 2895 } 2896 2897 // nilCheck generates nil pointer checking code. 2898 // Starts a new block on return, unless nil checks are disabled. 2899 // Used only for automatically inserted nil checks, 2900 // not for user code like 'x != nil'. 2901 func (s *state) nilCheck(ptr *ssa.Value) { 2902 if Disable_checknil != 0 { 2903 return 2904 } 2905 chk := s.newValue2(ssa.OpNilCheck, ssa.TypeVoid, ptr, s.mem()) 2906 b := s.endBlock() 2907 b.Kind = ssa.BlockCheck 2908 b.SetControl(chk) 2909 bNext := s.f.NewBlock(ssa.BlockPlain) 2910 b.AddEdgeTo(bNext) 2911 s.startBlock(bNext) 2912 } 2913 2914 // boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not. 2915 // Starts a new block on return. 2916 func (s *state) boundsCheck(idx, len *ssa.Value) { 2917 if Debug['B'] != 0 { 2918 return 2919 } 2920 // TODO: convert index to full width? 2921 // TODO: if index is 64-bit and we're compiling to 32-bit, check that high 32 bits are zero. 2922 2923 // bounds check 2924 cmp := s.newValue2(ssa.OpIsInBounds, Types[TBOOL], idx, len) 2925 s.check(cmp, Panicindex) 2926 } 2927 2928 // sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not. 2929 // Starts a new block on return. 2930 func (s *state) sliceBoundsCheck(idx, len *ssa.Value) { 2931 if Debug['B'] != 0 { 2932 return 2933 } 2934 // TODO: convert index to full width? 2935 // TODO: if index is 64-bit and we're compiling to 32-bit, check that high 32 bits are zero. 2936 2937 // bounds check 2938 cmp := s.newValue2(ssa.OpIsSliceInBounds, Types[TBOOL], idx, len) 2939 s.check(cmp, panicslice) 2940 } 2941 2942 // If cmp (a bool) is true, panic using the given function. 2943 func (s *state) check(cmp *ssa.Value, fn *Node) { 2944 b := s.endBlock() 2945 b.Kind = ssa.BlockIf 2946 b.SetControl(cmp) 2947 b.Likely = ssa.BranchLikely 2948 bNext := s.f.NewBlock(ssa.BlockPlain) 2949 line := s.peekLine() 2950 bPanic := s.panics[funcLine{fn, line}] 2951 if bPanic == nil { 2952 bPanic = s.f.NewBlock(ssa.BlockPlain) 2953 s.panics[funcLine{fn, line}] = bPanic 2954 s.startBlock(bPanic) 2955 // The panic call takes/returns memory to ensure that the right 2956 // memory state is observed if the panic happens. 2957 s.rtcall(fn, false, nil) 2958 } 2959 b.AddEdgeTo(bNext) 2960 b.AddEdgeTo(bPanic) 2961 s.startBlock(bNext) 2962 } 2963 2964 // rtcall issues a call to the given runtime function fn with the listed args. 2965 // Returns a slice of results of the given result types. 2966 // The call is added to the end of the current block. 2967 // If returns is false, the block is marked as an exit block. 2968 // If returns is true, the block is marked as a call block. A new block 2969 // is started to load the return values. 2970 func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Value) []*ssa.Value { 2971 // Write args to the stack 2972 var off int64 // TODO: arch-dependent starting offset? 2973 for _, arg := range args { 2974 t := arg.Type 2975 off = Rnd(off, t.Alignment()) 2976 ptr := s.sp 2977 if off != 0 { 2978 ptr = s.newValue1I(ssa.OpOffPtr, Types[TUINTPTR], off, s.sp) 2979 } 2980 size := t.Size() 2981 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, size, ptr, arg, s.mem()) 2982 off += size 2983 } 2984 off = Rnd(off, int64(Widthptr)) 2985 2986 // Issue call 2987 call := s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, fn.Sym, s.mem()) 2988 s.vars[&memVar] = call 2989 2990 // Finish block 2991 b := s.endBlock() 2992 if !returns { 2993 b.Kind = ssa.BlockExit 2994 b.SetControl(call) 2995 call.AuxInt = off 2996 if len(results) > 0 { 2997 Fatalf("panic call can't have results") 2998 } 2999 return nil 3000 } 3001 b.Kind = ssa.BlockCall 3002 b.SetControl(call) 3003 bNext := s.f.NewBlock(ssa.BlockPlain) 3004 b.AddEdgeTo(bNext) 3005 s.startBlock(bNext) 3006 3007 // Keep input pointer args live across calls. This is a bandaid until 1.8. 3008 for _, n := range s.ptrargs { 3009 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, s.variable(n, n.Type), s.mem()) 3010 } 3011 3012 // Load results 3013 res := make([]*ssa.Value, len(results)) 3014 for i, t := range results { 3015 off = Rnd(off, t.Alignment()) 3016 ptr := s.sp 3017 if off != 0 { 3018 ptr = s.newValue1I(ssa.OpOffPtr, Types[TUINTPTR], off, s.sp) 3019 } 3020 res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem()) 3021 off += t.Size() 3022 } 3023 off = Rnd(off, int64(Widthptr)) 3024 3025 // Remember how much callee stack space we needed. 3026 call.AuxInt = off 3027 3028 return res 3029 } 3030 3031 // insertWBmove inserts the assignment *left = *right including a write barrier. 3032 // t is the type being assigned. 3033 func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32, rightIsVolatile bool) { 3034 // if writeBarrier.enabled { 3035 // typedmemmove(&t, left, right) 3036 // } else { 3037 // *left = *right 3038 // } 3039 3040 if s.noWB { 3041 s.Fatalf("write barrier prohibited") 3042 } 3043 if s.WBLineno == 0 { 3044 s.WBLineno = left.Line 3045 } 3046 bThen := s.f.NewBlock(ssa.BlockPlain) 3047 bElse := s.f.NewBlock(ssa.BlockPlain) 3048 bEnd := s.f.NewBlock(ssa.BlockPlain) 3049 3050 aux := &ssa.ExternSymbol{Typ: Types[TBOOL], Sym: syslook("writeBarrier").Sym} 3051 flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb) 3052 // TODO: select the .enabled field. It is currently first, so not needed for now. 3053 // Load word, test byte, avoiding partial register write from load byte. 3054 flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem()) 3055 flag = s.newValue1(ssa.OpTrunc64to8, Types[TBOOL], flag) 3056 b := s.endBlock() 3057 b.Kind = ssa.BlockIf 3058 b.Likely = ssa.BranchUnlikely 3059 b.SetControl(flag) 3060 b.AddEdgeTo(bThen) 3061 b.AddEdgeTo(bElse) 3062 3063 s.startBlock(bThen) 3064 3065 if !rightIsVolatile { 3066 // Issue typedmemmove call. 3067 taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(t)}, s.sb) 3068 s.rtcall(typedmemmove, true, nil, taddr, left, right) 3069 } else { 3070 // Copy to temp location if the source is volatile (will be clobbered by 3071 // a function call). Marshaling the args to typedmemmove might clobber the 3072 // value we're trying to move. 3073 tmp := temp(t) 3074 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, tmp, s.mem()) 3075 tmpaddr, _ := s.addr(tmp, true) 3076 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), tmpaddr, right, s.mem()) 3077 // Issue typedmemmove call. 3078 taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(t)}, s.sb) 3079 s.rtcall(typedmemmove, true, nil, taddr, left, tmpaddr) 3080 // Mark temp as dead. 3081 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, tmp, s.mem()) 3082 } 3083 s.endBlock().AddEdgeTo(bEnd) 3084 3085 s.startBlock(bElse) 3086 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), left, right, s.mem()) 3087 s.endBlock().AddEdgeTo(bEnd) 3088 3089 s.startBlock(bEnd) 3090 3091 if Debug_wb > 0 { 3092 Warnl(line, "write barrier") 3093 } 3094 } 3095 3096 // insertWBstore inserts the assignment *left = right including a write barrier. 3097 // t is the type being assigned. 3098 func (s *state) insertWBstore(t *Type, left, right *ssa.Value, line int32, skip skipMask) { 3099 // store scalar fields 3100 // if writeBarrier.enabled { 3101 // writebarrierptr for pointer fields 3102 // } else { 3103 // store pointer fields 3104 // } 3105 3106 if s.noWB { 3107 s.Fatalf("write barrier prohibited") 3108 } 3109 if s.WBLineno == 0 { 3110 s.WBLineno = left.Line 3111 } 3112 s.storeTypeScalars(t, left, right, skip) 3113 3114 bThen := s.f.NewBlock(ssa.BlockPlain) 3115 bElse := s.f.NewBlock(ssa.BlockPlain) 3116 bEnd := s.f.NewBlock(ssa.BlockPlain) 3117 3118 aux := &ssa.ExternSymbol{Typ: Types[TBOOL], Sym: syslook("writeBarrier").Sym} 3119 flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb) 3120 // TODO: select the .enabled field. It is currently first, so not needed for now. 3121 // Load word, test byte, avoiding partial register write from load byte. 3122 flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem()) 3123 flag = s.newValue1(ssa.OpTrunc64to8, Types[TBOOL], flag) 3124 b := s.endBlock() 3125 b.Kind = ssa.BlockIf 3126 b.Likely = ssa.BranchUnlikely 3127 b.SetControl(flag) 3128 b.AddEdgeTo(bThen) 3129 b.AddEdgeTo(bElse) 3130 3131 // Issue write barriers for pointer writes. 3132 s.startBlock(bThen) 3133 s.storeTypePtrsWB(t, left, right) 3134 s.endBlock().AddEdgeTo(bEnd) 3135 3136 // Issue regular stores for pointer writes. 3137 s.startBlock(bElse) 3138 s.storeTypePtrs(t, left, right) 3139 s.endBlock().AddEdgeTo(bEnd) 3140 3141 s.startBlock(bEnd) 3142 3143 if Debug_wb > 0 { 3144 Warnl(line, "write barrier") 3145 } 3146 } 3147 3148 // do *left = right for all scalar (non-pointer) parts of t. 3149 func (s *state) storeTypeScalars(t *Type, left, right *ssa.Value, skip skipMask) { 3150 switch { 3151 case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex(): 3152 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), left, right, s.mem()) 3153 case t.IsPtrShaped(): 3154 // no scalar fields. 3155 case t.IsString(): 3156 if skip&skipLen != 0 { 3157 return 3158 } 3159 len := s.newValue1(ssa.OpStringLen, Types[TINT], right) 3160 lenAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), s.config.IntSize, left) 3161 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem()) 3162 case t.IsSlice(): 3163 if skip&skipLen == 0 { 3164 len := s.newValue1(ssa.OpSliceLen, Types[TINT], right) 3165 lenAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), s.config.IntSize, left) 3166 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem()) 3167 } 3168 if skip&skipCap == 0 { 3169 cap := s.newValue1(ssa.OpSliceCap, Types[TINT], right) 3170 capAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), 2*s.config.IntSize, left) 3171 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capAddr, cap, s.mem()) 3172 } 3173 case t.IsInterface(): 3174 // itab field doesn't need a write barrier (even though it is a pointer). 3175 itab := s.newValue1(ssa.OpITab, Ptrto(Types[TUINT8]), right) 3176 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, left, itab, s.mem()) 3177 case t.IsStruct(): 3178 n := t.NumFields() 3179 for i := 0; i < n; i++ { 3180 ft := t.FieldType(i) 3181 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3182 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3183 s.storeTypeScalars(ft.(*Type), addr, val, 0) 3184 } 3185 default: 3186 s.Fatalf("bad write barrier type %s", t) 3187 } 3188 } 3189 3190 // do *left = right for all pointer parts of t. 3191 func (s *state) storeTypePtrs(t *Type, left, right *ssa.Value) { 3192 switch { 3193 case t.IsPtrShaped(): 3194 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, right, s.mem()) 3195 case t.IsString(): 3196 ptr := s.newValue1(ssa.OpStringPtr, Ptrto(Types[TUINT8]), right) 3197 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem()) 3198 case t.IsSlice(): 3199 ptr := s.newValue1(ssa.OpSlicePtr, Ptrto(Types[TUINT8]), right) 3200 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem()) 3201 case t.IsInterface(): 3202 // itab field is treated as a scalar. 3203 idata := s.newValue1(ssa.OpIData, Ptrto(Types[TUINT8]), right) 3204 idataAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TUINT8]), s.config.PtrSize, left) 3205 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, idataAddr, idata, s.mem()) 3206 case t.IsStruct(): 3207 n := t.NumFields() 3208 for i := 0; i < n; i++ { 3209 ft := t.FieldType(i) 3210 if !haspointers(ft.(*Type)) { 3211 continue 3212 } 3213 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3214 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3215 s.storeTypePtrs(ft.(*Type), addr, val) 3216 } 3217 default: 3218 s.Fatalf("bad write barrier type %s", t) 3219 } 3220 } 3221 3222 // do *left = right with a write barrier for all pointer parts of t. 3223 func (s *state) storeTypePtrsWB(t *Type, left, right *ssa.Value) { 3224 switch { 3225 case t.IsPtrShaped(): 3226 s.rtcall(writebarrierptr, true, nil, left, right) 3227 case t.IsString(): 3228 ptr := s.newValue1(ssa.OpStringPtr, Ptrto(Types[TUINT8]), right) 3229 s.rtcall(writebarrierptr, true, nil, left, ptr) 3230 case t.IsSlice(): 3231 ptr := s.newValue1(ssa.OpSlicePtr, Ptrto(Types[TUINT8]), right) 3232 s.rtcall(writebarrierptr, true, nil, left, ptr) 3233 case t.IsInterface(): 3234 idata := s.newValue1(ssa.OpIData, Ptrto(Types[TUINT8]), right) 3235 idataAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TUINT8]), s.config.PtrSize, left) 3236 s.rtcall(writebarrierptr, true, nil, idataAddr, idata) 3237 case t.IsStruct(): 3238 n := t.NumFields() 3239 for i := 0; i < n; i++ { 3240 ft := t.FieldType(i) 3241 if !haspointers(ft.(*Type)) { 3242 continue 3243 } 3244 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3245 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3246 s.storeTypePtrsWB(ft.(*Type), addr, val) 3247 } 3248 default: 3249 s.Fatalf("bad write barrier type %s", t) 3250 } 3251 } 3252 3253 // slice computes the slice v[i:j:k] and returns ptr, len, and cap of result. 3254 // i,j,k may be nil, in which case they are set to their default value. 3255 // t is a slice, ptr to array, or string type. 3256 func (s *state) slice(t *Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) { 3257 var elemtype *Type 3258 var ptrtype *Type 3259 var ptr *ssa.Value 3260 var len *ssa.Value 3261 var cap *ssa.Value 3262 zero := s.constInt(Types[TINT], 0) 3263 switch { 3264 case t.IsSlice(): 3265 elemtype = t.Elem() 3266 ptrtype = Ptrto(elemtype) 3267 ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v) 3268 len = s.newValue1(ssa.OpSliceLen, Types[TINT], v) 3269 cap = s.newValue1(ssa.OpSliceCap, Types[TINT], v) 3270 case t.IsString(): 3271 elemtype = Types[TUINT8] 3272 ptrtype = Ptrto(elemtype) 3273 ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v) 3274 len = s.newValue1(ssa.OpStringLen, Types[TINT], v) 3275 cap = len 3276 case t.IsPtr(): 3277 if !t.Elem().IsArray() { 3278 s.Fatalf("bad ptr to array in slice %v\n", t) 3279 } 3280 elemtype = t.Elem().Elem() 3281 ptrtype = Ptrto(elemtype) 3282 s.nilCheck(v) 3283 ptr = v 3284 len = s.constInt(Types[TINT], t.Elem().NumElem()) 3285 cap = len 3286 default: 3287 s.Fatalf("bad type in slice %v\n", t) 3288 } 3289 3290 // Set default values 3291 if i == nil { 3292 i = zero 3293 } 3294 if j == nil { 3295 j = len 3296 } 3297 if k == nil { 3298 k = cap 3299 } 3300 3301 // Panic if slice indices are not in bounds. 3302 s.sliceBoundsCheck(i, j) 3303 if j != k { 3304 s.sliceBoundsCheck(j, k) 3305 } 3306 if k != cap { 3307 s.sliceBoundsCheck(k, cap) 3308 } 3309 3310 // Generate the following code assuming that indexes are in bounds. 3311 // The conditional is to make sure that we don't generate a slice 3312 // that points to the next object in memory. 3313 // rlen = j-i 3314 // rcap = k-i 3315 // delta = i*elemsize 3316 // if rcap == 0 { 3317 // delta = 0 3318 // } 3319 // rptr = p+delta 3320 // result = (SliceMake rptr rlen rcap) 3321 subOp := s.ssaOp(OSUB, Types[TINT]) 3322 eqOp := s.ssaOp(OEQ, Types[TINT]) 3323 mulOp := s.ssaOp(OMUL, Types[TINT]) 3324 rlen := s.newValue2(subOp, Types[TINT], j, i) 3325 var rcap *ssa.Value 3326 switch { 3327 case t.IsString(): 3328 // Capacity of the result is unimportant. However, we use 3329 // rcap to test if we've generated a zero-length slice. 3330 // Use length of strings for that. 3331 rcap = rlen 3332 case j == k: 3333 rcap = rlen 3334 default: 3335 rcap = s.newValue2(subOp, Types[TINT], k, i) 3336 } 3337 3338 // delta = # of elements to offset pointer by. 3339 s.vars[&deltaVar] = i 3340 3341 // Generate code to set delta=0 if the resulting capacity is zero. 3342 if !((i.Op == ssa.OpConst64 && i.AuxInt == 0) || 3343 (i.Op == ssa.OpConst32 && int32(i.AuxInt) == 0)) { 3344 cmp := s.newValue2(eqOp, Types[TBOOL], rcap, zero) 3345 3346 b := s.endBlock() 3347 b.Kind = ssa.BlockIf 3348 b.Likely = ssa.BranchUnlikely 3349 b.SetControl(cmp) 3350 3351 // Generate block which zeros the delta variable. 3352 nz := s.f.NewBlock(ssa.BlockPlain) 3353 b.AddEdgeTo(nz) 3354 s.startBlock(nz) 3355 s.vars[&deltaVar] = zero 3356 s.endBlock() 3357 3358 // All done. 3359 merge := s.f.NewBlock(ssa.BlockPlain) 3360 b.AddEdgeTo(merge) 3361 nz.AddEdgeTo(merge) 3362 s.startBlock(merge) 3363 3364 // TODO: use conditional moves somehow? 3365 } 3366 3367 // Compute rptr = ptr + delta * elemsize 3368 rptr := s.newValue2(ssa.OpAddPtr, ptrtype, ptr, s.newValue2(mulOp, Types[TINT], s.variable(&deltaVar, Types[TINT]), s.constInt(Types[TINT], elemtype.Width))) 3369 delete(s.vars, &deltaVar) 3370 return rptr, rlen, rcap 3371 } 3372 3373 type u2fcvtTab struct { 3374 geq, cvt2F, and, rsh, or, add ssa.Op 3375 one func(*state, ssa.Type, int64) *ssa.Value 3376 } 3377 3378 var u64_f64 u2fcvtTab = u2fcvtTab{ 3379 geq: ssa.OpGeq64, 3380 cvt2F: ssa.OpCvt64to64F, 3381 and: ssa.OpAnd64, 3382 rsh: ssa.OpRsh64Ux64, 3383 or: ssa.OpOr64, 3384 add: ssa.OpAdd64F, 3385 one: (*state).constInt64, 3386 } 3387 3388 var u64_f32 u2fcvtTab = u2fcvtTab{ 3389 geq: ssa.OpGeq64, 3390 cvt2F: ssa.OpCvt64to32F, 3391 and: ssa.OpAnd64, 3392 rsh: ssa.OpRsh64Ux64, 3393 or: ssa.OpOr64, 3394 add: ssa.OpAdd32F, 3395 one: (*state).constInt64, 3396 } 3397 3398 // Excess generality on a machine with 64-bit integer registers. 3399 // Not used on AMD64. 3400 var u32_f32 u2fcvtTab = u2fcvtTab{ 3401 geq: ssa.OpGeq32, 3402 cvt2F: ssa.OpCvt32to32F, 3403 and: ssa.OpAnd32, 3404 rsh: ssa.OpRsh32Ux32, 3405 or: ssa.OpOr32, 3406 add: ssa.OpAdd32F, 3407 one: func(s *state, t ssa.Type, x int64) *ssa.Value { 3408 return s.constInt32(t, int32(x)) 3409 }, 3410 } 3411 3412 func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3413 return s.uintTofloat(&u64_f64, n, x, ft, tt) 3414 } 3415 3416 func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3417 return s.uintTofloat(&u64_f32, n, x, ft, tt) 3418 } 3419 3420 func (s *state) uintTofloat(cvttab *u2fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3421 // if x >= 0 { 3422 // result = (floatY) x 3423 // } else { 3424 // y = uintX(x) ; y = x & 1 3425 // z = uintX(x) ; z = z >> 1 3426 // z = z >> 1 3427 // z = z | y 3428 // result = floatY(z) 3429 // result = result + result 3430 // } 3431 // 3432 // Code borrowed from old code generator. 3433 // What's going on: large 64-bit "unsigned" looks like 3434 // negative number to hardware's integer-to-float 3435 // conversion. However, because the mantissa is only 3436 // 63 bits, we don't need the LSB, so instead we do an 3437 // unsigned right shift (divide by two), convert, and 3438 // double. However, before we do that, we need to be 3439 // sure that we do not lose a "1" if that made the 3440 // difference in the resulting rounding. Therefore, we 3441 // preserve it, and OR (not ADD) it back in. The case 3442 // that matters is when the eleven discarded bits are 3443 // equal to 10000000001; that rounds up, and the 1 cannot 3444 // be lost else it would round down if the LSB of the 3445 // candidate mantissa is 0. 3446 cmp := s.newValue2(cvttab.geq, Types[TBOOL], x, s.zeroVal(ft)) 3447 b := s.endBlock() 3448 b.Kind = ssa.BlockIf 3449 b.SetControl(cmp) 3450 b.Likely = ssa.BranchLikely 3451 3452 bThen := s.f.NewBlock(ssa.BlockPlain) 3453 bElse := s.f.NewBlock(ssa.BlockPlain) 3454 bAfter := s.f.NewBlock(ssa.BlockPlain) 3455 3456 b.AddEdgeTo(bThen) 3457 s.startBlock(bThen) 3458 a0 := s.newValue1(cvttab.cvt2F, tt, x) 3459 s.vars[n] = a0 3460 s.endBlock() 3461 bThen.AddEdgeTo(bAfter) 3462 3463 b.AddEdgeTo(bElse) 3464 s.startBlock(bElse) 3465 one := cvttab.one(s, ft, 1) 3466 y := s.newValue2(cvttab.and, ft, x, one) 3467 z := s.newValue2(cvttab.rsh, ft, x, one) 3468 z = s.newValue2(cvttab.or, ft, z, y) 3469 a := s.newValue1(cvttab.cvt2F, tt, z) 3470 a1 := s.newValue2(cvttab.add, tt, a, a) 3471 s.vars[n] = a1 3472 s.endBlock() 3473 bElse.AddEdgeTo(bAfter) 3474 3475 s.startBlock(bAfter) 3476 return s.variable(n, n.Type) 3477 } 3478 3479 // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels. 3480 func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value { 3481 if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() { 3482 s.Fatalf("node must be a map or a channel") 3483 } 3484 // if n == nil { 3485 // return 0 3486 // } else { 3487 // // len 3488 // return *((*int)n) 3489 // // cap 3490 // return *(((*int)n)+1) 3491 // } 3492 lenType := n.Type 3493 nilValue := s.constNil(Types[TUINTPTR]) 3494 cmp := s.newValue2(ssa.OpEqPtr, Types[TBOOL], x, nilValue) 3495 b := s.endBlock() 3496 b.Kind = ssa.BlockIf 3497 b.SetControl(cmp) 3498 b.Likely = ssa.BranchUnlikely 3499 3500 bThen := s.f.NewBlock(ssa.BlockPlain) 3501 bElse := s.f.NewBlock(ssa.BlockPlain) 3502 bAfter := s.f.NewBlock(ssa.BlockPlain) 3503 3504 // length/capacity of a nil map/chan is zero 3505 b.AddEdgeTo(bThen) 3506 s.startBlock(bThen) 3507 s.vars[n] = s.zeroVal(lenType) 3508 s.endBlock() 3509 bThen.AddEdgeTo(bAfter) 3510 3511 b.AddEdgeTo(bElse) 3512 s.startBlock(bElse) 3513 if n.Op == OLEN { 3514 // length is stored in the first word for map/chan 3515 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem()) 3516 } else if n.Op == OCAP { 3517 // capacity is stored in the second word for chan 3518 sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x) 3519 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem()) 3520 } else { 3521 s.Fatalf("op must be OLEN or OCAP") 3522 } 3523 s.endBlock() 3524 bElse.AddEdgeTo(bAfter) 3525 3526 s.startBlock(bAfter) 3527 return s.variable(n, lenType) 3528 } 3529 3530 type f2uCvtTab struct { 3531 ltf, cvt2U, subf ssa.Op 3532 value func(*state, ssa.Type, float64) *ssa.Value 3533 } 3534 3535 var f32_u64 f2uCvtTab = f2uCvtTab{ 3536 ltf: ssa.OpLess32F, 3537 cvt2U: ssa.OpCvt32Fto64, 3538 subf: ssa.OpSub32F, 3539 value: (*state).constFloat32, 3540 } 3541 3542 var f64_u64 f2uCvtTab = f2uCvtTab{ 3543 ltf: ssa.OpLess64F, 3544 cvt2U: ssa.OpCvt64Fto64, 3545 subf: ssa.OpSub64F, 3546 value: (*state).constFloat64, 3547 } 3548 3549 func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3550 return s.floatToUint(&f32_u64, n, x, ft, tt) 3551 } 3552 func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3553 return s.floatToUint(&f64_u64, n, x, ft, tt) 3554 } 3555 3556 func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3557 // if x < 9223372036854775808.0 { 3558 // result = uintY(x) 3559 // } else { 3560 // y = x - 9223372036854775808.0 3561 // z = uintY(y) 3562 // result = z | -9223372036854775808 3563 // } 3564 twoToThe63 := cvttab.value(s, ft, 9223372036854775808.0) 3565 cmp := s.newValue2(cvttab.ltf, Types[TBOOL], x, twoToThe63) 3566 b := s.endBlock() 3567 b.Kind = ssa.BlockIf 3568 b.SetControl(cmp) 3569 b.Likely = ssa.BranchLikely 3570 3571 bThen := s.f.NewBlock(ssa.BlockPlain) 3572 bElse := s.f.NewBlock(ssa.BlockPlain) 3573 bAfter := s.f.NewBlock(ssa.BlockPlain) 3574 3575 b.AddEdgeTo(bThen) 3576 s.startBlock(bThen) 3577 a0 := s.newValue1(cvttab.cvt2U, tt, x) 3578 s.vars[n] = a0 3579 s.endBlock() 3580 bThen.AddEdgeTo(bAfter) 3581 3582 b.AddEdgeTo(bElse) 3583 s.startBlock(bElse) 3584 y := s.newValue2(cvttab.subf, ft, x, twoToThe63) 3585 y = s.newValue1(cvttab.cvt2U, tt, y) 3586 z := s.constInt64(tt, -9223372036854775808) 3587 a1 := s.newValue2(ssa.OpOr64, tt, y, z) 3588 s.vars[n] = a1 3589 s.endBlock() 3590 bElse.AddEdgeTo(bAfter) 3591 3592 s.startBlock(bAfter) 3593 return s.variable(n, n.Type) 3594 } 3595 3596 // ifaceType returns the value for the word containing the type. 3597 // n is the node for the interface expression. 3598 // v is the corresponding value. 3599 func (s *state) ifaceType(n *Node, v *ssa.Value) *ssa.Value { 3600 byteptr := Ptrto(Types[TUINT8]) // type used in runtime prototypes for runtime type (*byte) 3601 3602 if n.Type.IsEmptyInterface() { 3603 // Have *eface. The type is the first word in the struct. 3604 return s.newValue1(ssa.OpITab, byteptr, v) 3605 } 3606 3607 // Have *iface. 3608 // The first word in the struct is the *itab. 3609 // If the *itab is nil, return 0. 3610 // Otherwise, the second word in the *itab is the type. 3611 3612 tab := s.newValue1(ssa.OpITab, byteptr, v) 3613 s.vars[&typVar] = tab 3614 isnonnil := s.newValue2(ssa.OpNeqPtr, Types[TBOOL], tab, s.constNil(byteptr)) 3615 b := s.endBlock() 3616 b.Kind = ssa.BlockIf 3617 b.SetControl(isnonnil) 3618 b.Likely = ssa.BranchLikely 3619 3620 bLoad := s.f.NewBlock(ssa.BlockPlain) 3621 bEnd := s.f.NewBlock(ssa.BlockPlain) 3622 3623 b.AddEdgeTo(bLoad) 3624 b.AddEdgeTo(bEnd) 3625 bLoad.AddEdgeTo(bEnd) 3626 3627 s.startBlock(bLoad) 3628 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), tab) 3629 s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem()) 3630 s.endBlock() 3631 3632 s.startBlock(bEnd) 3633 typ := s.variable(&typVar, byteptr) 3634 delete(s.vars, &typVar) 3635 return typ 3636 } 3637 3638 // dottype generates SSA for a type assertion node. 3639 // commaok indicates whether to panic or return a bool. 3640 // If commaok is false, resok will be nil. 3641 func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { 3642 iface := s.expr(n.Left) 3643 typ := s.ifaceType(n.Left, iface) // actual concrete type 3644 target := s.expr(typename(n.Type)) // target type 3645 if !isdirectiface(n.Type) { 3646 // walk rewrites ODOTTYPE/OAS2DOTTYPE into runtime calls except for this case. 3647 Fatalf("dottype needs a direct iface type %s", n.Type) 3648 } 3649 3650 if Debug_typeassert > 0 { 3651 Warnl(n.Lineno, "type assertion inlined") 3652 } 3653 3654 // TODO: If we have a nonempty interface and its itab field is nil, 3655 // then this test is redundant and ifaceType should just branch directly to bFail. 3656 cond := s.newValue2(ssa.OpEqPtr, Types[TBOOL], typ, target) 3657 b := s.endBlock() 3658 b.Kind = ssa.BlockIf 3659 b.SetControl(cond) 3660 b.Likely = ssa.BranchLikely 3661 3662 byteptr := Ptrto(Types[TUINT8]) 3663 3664 bOk := s.f.NewBlock(ssa.BlockPlain) 3665 bFail := s.f.NewBlock(ssa.BlockPlain) 3666 b.AddEdgeTo(bOk) 3667 b.AddEdgeTo(bFail) 3668 3669 if !commaok { 3670 // on failure, panic by calling panicdottype 3671 s.startBlock(bFail) 3672 taddr := s.newValue1A(ssa.OpAddr, byteptr, &ssa.ExternSymbol{Typ: byteptr, Sym: typenamesym(n.Left.Type)}, s.sb) 3673 s.rtcall(panicdottype, false, nil, typ, target, taddr) 3674 3675 // on success, return idata field 3676 s.startBlock(bOk) 3677 return s.newValue1(ssa.OpIData, n.Type, iface), nil 3678 } 3679 3680 // commaok is the more complicated case because we have 3681 // a control flow merge point. 3682 bEnd := s.f.NewBlock(ssa.BlockPlain) 3683 3684 // type assertion succeeded 3685 s.startBlock(bOk) 3686 s.vars[&idataVar] = s.newValue1(ssa.OpIData, n.Type, iface) 3687 s.vars[&okVar] = s.constBool(true) 3688 s.endBlock() 3689 bOk.AddEdgeTo(bEnd) 3690 3691 // type assertion failed 3692 s.startBlock(bFail) 3693 s.vars[&idataVar] = s.constNil(byteptr) 3694 s.vars[&okVar] = s.constBool(false) 3695 s.endBlock() 3696 bFail.AddEdgeTo(bEnd) 3697 3698 // merge point 3699 s.startBlock(bEnd) 3700 res = s.variable(&idataVar, byteptr) 3701 resok = s.variable(&okVar, Types[TBOOL]) 3702 delete(s.vars, &idataVar) 3703 delete(s.vars, &okVar) 3704 return res, resok 3705 } 3706 3707 // checkgoto checks that a goto from from to to does not 3708 // jump into a block or jump over variable declarations. 3709 // It is a copy of checkgoto in the pre-SSA backend, 3710 // modified only for line number handling. 3711 // TODO: document how this works and why it is designed the way it is. 3712 func (s *state) checkgoto(from *Node, to *Node) { 3713 if from.Sym == to.Sym { 3714 return 3715 } 3716 3717 nf := 0 3718 for fs := from.Sym; fs != nil; fs = fs.Link { 3719 nf++ 3720 } 3721 nt := 0 3722 for fs := to.Sym; fs != nil; fs = fs.Link { 3723 nt++ 3724 } 3725 fs := from.Sym 3726 for ; nf > nt; nf-- { 3727 fs = fs.Link 3728 } 3729 if fs != to.Sym { 3730 // decide what to complain about. 3731 // prefer to complain about 'into block' over declarations, 3732 // so scan backward to find most recent block or else dcl. 3733 var block *Sym 3734 3735 var dcl *Sym 3736 ts := to.Sym 3737 for ; nt > nf; nt-- { 3738 if ts.Pkg == nil { 3739 block = ts 3740 } else { 3741 dcl = ts 3742 } 3743 ts = ts.Link 3744 } 3745 3746 for ts != fs { 3747 if ts.Pkg == nil { 3748 block = ts 3749 } else { 3750 dcl = ts 3751 } 3752 ts = ts.Link 3753 fs = fs.Link 3754 } 3755 3756 lno := from.Left.Lineno 3757 if block != nil { 3758 yyerrorl(lno, "goto %v jumps into block starting at %v", from.Left.Sym, linestr(block.Lastlineno)) 3759 } else { 3760 yyerrorl(lno, "goto %v jumps over declaration of %v at %v", from.Left.Sym, dcl, linestr(dcl.Lastlineno)) 3761 } 3762 } 3763 } 3764 3765 // variable returns the value of a variable at the current location. 3766 func (s *state) variable(name *Node, t ssa.Type) *ssa.Value { 3767 v := s.vars[name] 3768 if v == nil { 3769 v = s.newValue0A(ssa.OpFwdRef, t, name) 3770 s.fwdRefs = append(s.fwdRefs, v) 3771 s.vars[name] = v 3772 s.addNamedValue(name, v) 3773 } 3774 return v 3775 } 3776 3777 func (s *state) mem() *ssa.Value { 3778 return s.variable(&memVar, ssa.TypeMem) 3779 } 3780 3781 func (s *state) linkForwardReferences(dm *sparseDefState) { 3782 3783 // Build SSA graph. Each variable on its first use in a basic block 3784 // leaves a FwdRef in that block representing the incoming value 3785 // of that variable. This function links that ref up with possible definitions, 3786 // inserting Phi values as needed. This is essentially the algorithm 3787 // described by Braun, Buchwald, Hack, Leißa, Mallon, and Zwinkau: 3788 // http://pp.info.uni-karlsruhe.de/uploads/publikationen/braun13cc.pdf 3789 // Differences: 3790 // - We use FwdRef nodes to postpone phi building until the CFG is 3791 // completely built. That way we can avoid the notion of "sealed" 3792 // blocks. 3793 // - Phi optimization is a separate pass (in ../ssa/phielim.go). 3794 for len(s.fwdRefs) > 0 { 3795 v := s.fwdRefs[len(s.fwdRefs)-1] 3796 s.fwdRefs = s.fwdRefs[:len(s.fwdRefs)-1] 3797 s.resolveFwdRef(v, dm) 3798 } 3799 } 3800 3801 // resolveFwdRef modifies v to be the variable's value at the start of its block. 3802 // v must be a FwdRef op. 3803 func (s *state) resolveFwdRef(v *ssa.Value, dm *sparseDefState) { 3804 b := v.Block 3805 name := v.Aux.(*Node) 3806 v.Aux = nil 3807 if b == s.f.Entry { 3808 // Live variable at start of function. 3809 if s.canSSA(name) { 3810 if strings.HasPrefix(name.Sym.Name, "autotmp_") { 3811 // It's likely that this is an uninitialized variable in the entry block. 3812 s.Fatalf("Treating auto as if it were arg, func %s, node %v, value %v", b.Func.Name, name, v) 3813 } 3814 v.Op = ssa.OpArg 3815 v.Aux = name 3816 return 3817 } 3818 // Not SSAable. Load it. 3819 addr := s.decladdrs[name] 3820 if addr == nil { 3821 // TODO: closure args reach here. 3822 s.Unimplementedf("unhandled closure arg %s at entry to function %s", name, b.Func.Name) 3823 } 3824 if _, ok := addr.Aux.(*ssa.ArgSymbol); !ok { 3825 s.Fatalf("variable live at start of function %s is not an argument %s", b.Func.Name, name) 3826 } 3827 v.Op = ssa.OpLoad 3828 v.AddArgs(addr, s.startmem) 3829 return 3830 } 3831 if len(b.Preds) == 0 { 3832 // This block is dead; we have no predecessors and we're not the entry block. 3833 // It doesn't matter what we use here as long as it is well-formed. 3834 v.Op = ssa.OpUnknown 3835 return 3836 } 3837 // Find variable value on each predecessor. 3838 var argstore [4]*ssa.Value 3839 args := argstore[:0] 3840 for _, e := range b.Preds { 3841 p := e.Block() 3842 p = dm.FindBetterDefiningBlock(name, p) // try sparse improvement on p 3843 args = append(args, s.lookupVarOutgoing(p, v.Type, name, v.Line)) 3844 } 3845 3846 // Decide if we need a phi or not. We need a phi if there 3847 // are two different args (which are both not v). 3848 var w *ssa.Value 3849 for _, a := range args { 3850 if a == v { 3851 continue // self-reference 3852 } 3853 if a == w { 3854 continue // already have this witness 3855 } 3856 if w != nil { 3857 // two witnesses, need a phi value 3858 v.Op = ssa.OpPhi 3859 v.AddArgs(args...) 3860 return 3861 } 3862 w = a // save witness 3863 } 3864 if w == nil { 3865 s.Fatalf("no witness for reachable phi %s", v) 3866 } 3867 // One witness. Make v a copy of w. 3868 v.Op = ssa.OpCopy 3869 v.AddArg(w) 3870 } 3871 3872 // lookupVarOutgoing finds the variable's value at the end of block b. 3873 func (s *state) lookupVarOutgoing(b *ssa.Block, t ssa.Type, name *Node, line int32) *ssa.Value { 3874 for { 3875 if v, ok := s.defvars[b.ID][name]; ok { 3876 return v 3877 } 3878 // The variable is not defined by b and we haven't looked it up yet. 3879 // If b has exactly one predecessor, loop to look it up there. 3880 // Otherwise, give up and insert a new FwdRef and resolve it later. 3881 if len(b.Preds) != 1 { 3882 break 3883 } 3884 b = b.Preds[0].Block() 3885 } 3886 // Generate a FwdRef for the variable and return that. 3887 v := b.NewValue0A(line, ssa.OpFwdRef, t, name) 3888 s.fwdRefs = append(s.fwdRefs, v) 3889 s.defvars[b.ID][name] = v 3890 s.addNamedValue(name, v) 3891 return v 3892 } 3893 3894 func (s *state) addNamedValue(n *Node, v *ssa.Value) { 3895 if n.Class == Pxxx { 3896 // Don't track our dummy nodes (&memVar etc.). 3897 return 3898 } 3899 if strings.HasPrefix(n.Sym.Name, "autotmp_") { 3900 // Don't track autotmp_ variables. 3901 return 3902 } 3903 if n.Class == PPARAMOUT { 3904 // Don't track named output values. This prevents return values 3905 // from being assigned too early. See #14591 and #14762. TODO: allow this. 3906 return 3907 } 3908 if n.Class == PAUTO && n.Xoffset != 0 { 3909 s.Fatalf("AUTO var with offset %s %d", n, n.Xoffset) 3910 } 3911 loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0} 3912 values, ok := s.f.NamedValues[loc] 3913 if !ok { 3914 s.f.Names = append(s.f.Names, loc) 3915 } 3916 s.f.NamedValues[loc] = append(values, v) 3917 } 3918 3919 // Branch is an unresolved branch. 3920 type Branch struct { 3921 P *obj.Prog // branch instruction 3922 B *ssa.Block // target 3923 } 3924 3925 // SSAGenState contains state needed during Prog generation. 3926 type SSAGenState struct { 3927 // Branches remembers all the branch instructions we've seen 3928 // and where they would like to go. 3929 Branches []Branch 3930 3931 // bstart remembers where each block starts (indexed by block ID) 3932 bstart []*obj.Prog 3933 } 3934 3935 // Pc returns the current Prog. 3936 func (s *SSAGenState) Pc() *obj.Prog { 3937 return Pc 3938 } 3939 3940 // SetLineno sets the current source line number. 3941 func (s *SSAGenState) SetLineno(l int32) { 3942 lineno = l 3943 } 3944 3945 // genssa appends entries to ptxt for each instruction in f. 3946 // gcargs and gclocals are filled in with pointer maps for the frame. 3947 func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) { 3948 var s SSAGenState 3949 3950 e := f.Config.Frontend().(*ssaExport) 3951 // We're about to emit a bunch of Progs. 3952 // Since the only way to get here is to explicitly request it, 3953 // just fail on unimplemented instead of trying to unwind our mess. 3954 e.mustImplement = true 3955 3956 // Remember where each block starts. 3957 s.bstart = make([]*obj.Prog, f.NumBlocks()) 3958 3959 var valueProgs map[*obj.Prog]*ssa.Value 3960 var blockProgs map[*obj.Prog]*ssa.Block 3961 var logProgs = e.log 3962 if logProgs { 3963 valueProgs = make(map[*obj.Prog]*ssa.Value, f.NumValues()) 3964 blockProgs = make(map[*obj.Prog]*ssa.Block, f.NumBlocks()) 3965 f.Logf("genssa %s\n", f.Name) 3966 blockProgs[Pc] = f.Blocks[0] 3967 } 3968 3969 // Emit basic blocks 3970 for i, b := range f.Blocks { 3971 s.bstart[b.ID] = Pc 3972 // Emit values in block 3973 Thearch.SSAMarkMoves(&s, b) 3974 for _, v := range b.Values { 3975 x := Pc 3976 Thearch.SSAGenValue(&s, v) 3977 if logProgs { 3978 for ; x != Pc; x = x.Link { 3979 valueProgs[x] = v 3980 } 3981 } 3982 } 3983 // Emit control flow instructions for block 3984 var next *ssa.Block 3985 if i < len(f.Blocks)-1 && (Debug['N'] == 0 || b.Kind == ssa.BlockCall) { 3986 // If -N, leave next==nil so every block with successors 3987 // ends in a JMP (except call blocks - plive doesn't like 3988 // select{send,recv} followed by a JMP call). Helps keep 3989 // line numbers for otherwise empty blocks. 3990 next = f.Blocks[i+1] 3991 } 3992 x := Pc 3993 Thearch.SSAGenBlock(&s, b, next) 3994 if logProgs { 3995 for ; x != Pc; x = x.Link { 3996 blockProgs[x] = b 3997 } 3998 } 3999 } 4000 4001 // Resolve branches 4002 for _, br := range s.Branches { 4003 br.P.To.Val = s.bstart[br.B.ID] 4004 } 4005 4006 if logProgs { 4007 for p := ptxt; p != nil; p = p.Link { 4008 var s string 4009 if v, ok := valueProgs[p]; ok { 4010 s = v.String() 4011 } else if b, ok := blockProgs[p]; ok { 4012 s = b.String() 4013 } else { 4014 s = " " // most value and branch strings are 2-3 characters long 4015 } 4016 f.Logf("%s\t%s\n", s, p) 4017 } 4018 if f.Config.HTML != nil { 4019 saved := ptxt.Ctxt.LineHist.PrintFilenameOnly 4020 ptxt.Ctxt.LineHist.PrintFilenameOnly = true 4021 var buf bytes.Buffer 4022 buf.WriteString("<code>") 4023 buf.WriteString("<dl class=\"ssa-gen\">") 4024 for p := ptxt; p != nil; p = p.Link { 4025 buf.WriteString("<dt class=\"ssa-prog-src\">") 4026 if v, ok := valueProgs[p]; ok { 4027 buf.WriteString(v.HTML()) 4028 } else if b, ok := blockProgs[p]; ok { 4029 buf.WriteString(b.HTML()) 4030 } 4031 buf.WriteString("</dt>") 4032 buf.WriteString("<dd class=\"ssa-prog\">") 4033 buf.WriteString(html.EscapeString(p.String())) 4034 buf.WriteString("</dd>") 4035 buf.WriteString("</li>") 4036 } 4037 buf.WriteString("</dl>") 4038 buf.WriteString("</code>") 4039 f.Config.HTML.WriteColumn("genssa", buf.String()) 4040 ptxt.Ctxt.LineHist.PrintFilenameOnly = saved 4041 } 4042 } 4043 4044 // Emit static data 4045 if f.StaticData != nil { 4046 for _, n := range f.StaticData.([]*Node) { 4047 if !gen_as_init(n, false) { 4048 Fatalf("non-static data marked as static: %v\n\n", n) 4049 } 4050 } 4051 } 4052 4053 // Allocate stack frame 4054 allocauto(ptxt) 4055 4056 // Generate gc bitmaps. 4057 liveness(Curfn, ptxt, gcargs, gclocals) 4058 4059 // Add frame prologue. Zero ambiguously live variables. 4060 Thearch.Defframe(ptxt) 4061 if Debug['f'] != 0 { 4062 frame(0) 4063 } 4064 4065 // Remove leftover instrumentation from the instruction stream. 4066 removevardef(ptxt) 4067 4068 f.Config.HTML.Close() 4069 } 4070 4071 // movZero generates a register indirect move with a 0 immediate and keeps track of bytes left and next offset 4072 func movZero(as obj.As, width int64, nbytes int64, offset int64, regnum int16) (nleft int64, noff int64) { 4073 p := Prog(as) 4074 // TODO: use zero register on archs that support it. 4075 p.From.Type = obj.TYPE_CONST 4076 p.From.Offset = 0 4077 p.To.Type = obj.TYPE_MEM 4078 p.To.Reg = regnum 4079 p.To.Offset = offset 4080 offset += width 4081 nleft = nbytes - width 4082 return nleft, offset 4083 } 4084 4085 type FloatingEQNEJump struct { 4086 Jump obj.As 4087 Index int 4088 } 4089 4090 func oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPrediction, branches []Branch) []Branch { 4091 p := Prog(jumps.Jump) 4092 p.To.Type = obj.TYPE_BRANCH 4093 to := jumps.Index 4094 branches = append(branches, Branch{p, b.Succs[to].Block()}) 4095 if to == 1 { 4096 likely = -likely 4097 } 4098 // liblink reorders the instruction stream as it sees fit. 4099 // Pass along what we know so liblink can make use of it. 4100 // TODO: Once we've fully switched to SSA, 4101 // make liblink leave our output alone. 4102 switch likely { 4103 case ssa.BranchUnlikely: 4104 p.From.Type = obj.TYPE_CONST 4105 p.From.Offset = 0 4106 case ssa.BranchLikely: 4107 p.From.Type = obj.TYPE_CONST 4108 p.From.Offset = 1 4109 } 4110 return branches 4111 } 4112 4113 func SSAGenFPJump(s *SSAGenState, b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) { 4114 likely := b.Likely 4115 switch next { 4116 case b.Succs[0].Block(): 4117 s.Branches = oneFPJump(b, &jumps[0][0], likely, s.Branches) 4118 s.Branches = oneFPJump(b, &jumps[0][1], likely, s.Branches) 4119 case b.Succs[1].Block(): 4120 s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches) 4121 s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches) 4122 default: 4123 s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches) 4124 s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches) 4125 q := Prog(obj.AJMP) 4126 q.To.Type = obj.TYPE_BRANCH 4127 s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()}) 4128 } 4129 } 4130 4131 // AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a. 4132 func AddAux(a *obj.Addr, v *ssa.Value) { 4133 AddAux2(a, v, v.AuxInt) 4134 } 4135 func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { 4136 if a.Type != obj.TYPE_MEM { 4137 v.Fatalf("bad AddAux addr %v", a) 4138 } 4139 // add integer offset 4140 a.Offset += offset 4141 4142 // If no additional symbol offset, we're done. 4143 if v.Aux == nil { 4144 return 4145 } 4146 // Add symbol's offset from its base register. 4147 switch sym := v.Aux.(type) { 4148 case *ssa.ExternSymbol: 4149 a.Name = obj.NAME_EXTERN 4150 switch s := sym.Sym.(type) { 4151 case *Sym: 4152 a.Sym = Linksym(s) 4153 case *obj.LSym: 4154 a.Sym = s 4155 default: 4156 v.Fatalf("ExternSymbol.Sym is %T", s) 4157 } 4158 case *ssa.ArgSymbol: 4159 n := sym.Node.(*Node) 4160 a.Name = obj.NAME_PARAM 4161 a.Node = n 4162 a.Sym = Linksym(n.Orig.Sym) 4163 a.Offset += n.Xoffset // TODO: why do I have to add this here? I don't for auto variables. 4164 case *ssa.AutoSymbol: 4165 n := sym.Node.(*Node) 4166 a.Name = obj.NAME_AUTO 4167 a.Node = n 4168 a.Sym = Linksym(n.Sym) 4169 default: 4170 v.Fatalf("aux in %s not implemented %#v", v, v.Aux) 4171 } 4172 } 4173 4174 // extendIndex extends v to a full int width. 4175 func (s *state) extendIndex(v *ssa.Value) *ssa.Value { 4176 size := v.Type.Size() 4177 if size == s.config.IntSize { 4178 return v 4179 } 4180 if size > s.config.IntSize { 4181 // TODO: truncate 64-bit indexes on 32-bit pointer archs. We'd need to test 4182 // the high word and branch to out-of-bounds failure if it is not 0. 4183 s.Unimplementedf("64->32 index truncation not implemented") 4184 return v 4185 } 4186 4187 // Extend value to the required size 4188 var op ssa.Op 4189 if v.Type.IsSigned() { 4190 switch 10*size + s.config.IntSize { 4191 case 14: 4192 op = ssa.OpSignExt8to32 4193 case 18: 4194 op = ssa.OpSignExt8to64 4195 case 24: 4196 op = ssa.OpSignExt16to32 4197 case 28: 4198 op = ssa.OpSignExt16to64 4199 case 48: 4200 op = ssa.OpSignExt32to64 4201 default: 4202 s.Fatalf("bad signed index extension %s", v.Type) 4203 } 4204 } else { 4205 switch 10*size + s.config.IntSize { 4206 case 14: 4207 op = ssa.OpZeroExt8to32 4208 case 18: 4209 op = ssa.OpZeroExt8to64 4210 case 24: 4211 op = ssa.OpZeroExt16to32 4212 case 28: 4213 op = ssa.OpZeroExt16to64 4214 case 48: 4215 op = ssa.OpZeroExt32to64 4216 default: 4217 s.Fatalf("bad unsigned index extension %s", v.Type) 4218 } 4219 } 4220 return s.newValue1(op, Types[TINT], v) 4221 } 4222 4223 // SSARegNum returns the register (in cmd/internal/obj numbering) to 4224 // which v has been allocated. Panics if v is not assigned to a 4225 // register. 4226 // TODO: Make this panic again once it stops happening routinely. 4227 func SSARegNum(v *ssa.Value) int16 { 4228 reg := v.Block.Func.RegAlloc[v.ID] 4229 if reg == nil { 4230 v.Unimplementedf("nil regnum for value: %s\n%s\n", v.LongString(), v.Block.Func) 4231 return 0 4232 } 4233 return Thearch.SSARegToReg[reg.(*ssa.Register).Num] 4234 } 4235 4236 // AutoVar returns a *Node and int64 representing the auto variable and offset within it 4237 // where v should be spilled. 4238 func AutoVar(v *ssa.Value) (*Node, int64) { 4239 loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot) 4240 if v.Type.Size() > loc.Type.Size() { 4241 v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) 4242 } 4243 return loc.N.(*Node), loc.Off 4244 } 4245 4246 // fieldIdx finds the index of the field referred to by the ODOT node n. 4247 func fieldIdx(n *Node) int { 4248 t := n.Left.Type 4249 f := n.Sym 4250 if !t.IsStruct() { 4251 panic("ODOT's LHS is not a struct") 4252 } 4253 4254 var i int 4255 for _, t1 := range t.Fields().Slice() { 4256 if t1.Sym != f { 4257 i++ 4258 continue 4259 } 4260 if t1.Offset != n.Xoffset { 4261 panic("field offset doesn't match") 4262 } 4263 return i 4264 } 4265 panic(fmt.Sprintf("can't find field in expr %s\n", n)) 4266 4267 // TODO: keep the result of this function somewhere in the ODOT Node 4268 // so we don't have to recompute it each time we need it. 4269 } 4270 4271 // ssaExport exports a bunch of compiler services for the ssa backend. 4272 type ssaExport struct { 4273 log bool 4274 unimplemented bool 4275 mustImplement bool 4276 } 4277 4278 func (s *ssaExport) TypeBool() ssa.Type { return Types[TBOOL] } 4279 func (s *ssaExport) TypeInt8() ssa.Type { return Types[TINT8] } 4280 func (s *ssaExport) TypeInt16() ssa.Type { return Types[TINT16] } 4281 func (s *ssaExport) TypeInt32() ssa.Type { return Types[TINT32] } 4282 func (s *ssaExport) TypeInt64() ssa.Type { return Types[TINT64] } 4283 func (s *ssaExport) TypeUInt8() ssa.Type { return Types[TUINT8] } 4284 func (s *ssaExport) TypeUInt16() ssa.Type { return Types[TUINT16] } 4285 func (s *ssaExport) TypeUInt32() ssa.Type { return Types[TUINT32] } 4286 func (s *ssaExport) TypeUInt64() ssa.Type { return Types[TUINT64] } 4287 func (s *ssaExport) TypeFloat32() ssa.Type { return Types[TFLOAT32] } 4288 func (s *ssaExport) TypeFloat64() ssa.Type { return Types[TFLOAT64] } 4289 func (s *ssaExport) TypeInt() ssa.Type { return Types[TINT] } 4290 func (s *ssaExport) TypeUintptr() ssa.Type { return Types[TUINTPTR] } 4291 func (s *ssaExport) TypeString() ssa.Type { return Types[TSTRING] } 4292 func (s *ssaExport) TypeBytePtr() ssa.Type { return Ptrto(Types[TUINT8]) } 4293 4294 // StringData returns a symbol (a *Sym wrapped in an interface) which 4295 // is the data component of a global string constant containing s. 4296 func (*ssaExport) StringData(s string) interface{} { 4297 // TODO: is idealstring correct? It might not matter... 4298 _, data := stringsym(s) 4299 return &ssa.ExternSymbol{Typ: idealstring, Sym: data} 4300 } 4301 4302 func (e *ssaExport) Auto(t ssa.Type) ssa.GCNode { 4303 n := temp(t.(*Type)) // Note: adds new auto to Curfn.Func.Dcl list 4304 e.mustImplement = true // This modifies the input to SSA, so we want to make sure we succeed from here! 4305 return n 4306 } 4307 4308 func (e *ssaExport) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4309 n := name.N.(*Node) 4310 ptrType := Ptrto(Types[TUINT8]) 4311 lenType := Types[TINT] 4312 if n.Class == PAUTO && !n.Addrtaken { 4313 // Split this string up into two separate variables. 4314 p := e.namedAuto(n.Sym.Name+".ptr", ptrType) 4315 l := e.namedAuto(n.Sym.Name+".len", lenType) 4316 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0} 4317 } 4318 // Return the two parts of the larger variable. 4319 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)} 4320 } 4321 4322 func (e *ssaExport) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4323 n := name.N.(*Node) 4324 t := Ptrto(Types[TUINT8]) 4325 if n.Class == PAUTO && !n.Addrtaken { 4326 // Split this interface up into two separate variables. 4327 f := ".itab" 4328 if n.Type.IsEmptyInterface() { 4329 f = ".type" 4330 } 4331 c := e.namedAuto(n.Sym.Name+f, t) 4332 d := e.namedAuto(n.Sym.Name+".data", t) 4333 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0} 4334 } 4335 // Return the two parts of the larger variable. 4336 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)} 4337 } 4338 4339 func (e *ssaExport) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) { 4340 n := name.N.(*Node) 4341 ptrType := Ptrto(name.Type.ElemType().(*Type)) 4342 lenType := Types[TINT] 4343 if n.Class == PAUTO && !n.Addrtaken { 4344 // Split this slice up into three separate variables. 4345 p := e.namedAuto(n.Sym.Name+".ptr", ptrType) 4346 l := e.namedAuto(n.Sym.Name+".len", lenType) 4347 c := e.namedAuto(n.Sym.Name+".cap", lenType) 4348 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}, ssa.LocalSlot{N: c, Type: lenType, Off: 0} 4349 } 4350 // Return the three parts of the larger variable. 4351 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, 4352 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}, 4353 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)} 4354 } 4355 4356 func (e *ssaExport) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4357 n := name.N.(*Node) 4358 s := name.Type.Size() / 2 4359 var t *Type 4360 if s == 8 { 4361 t = Types[TFLOAT64] 4362 } else { 4363 t = Types[TFLOAT32] 4364 } 4365 if n.Class == PAUTO && !n.Addrtaken { 4366 // Split this complex up into two separate variables. 4367 c := e.namedAuto(n.Sym.Name+".real", t) 4368 d := e.namedAuto(n.Sym.Name+".imag", t) 4369 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0} 4370 } 4371 // Return the two parts of the larger variable. 4372 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s} 4373 } 4374 4375 func (e *ssaExport) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot { 4376 n := name.N.(*Node) 4377 st := name.Type 4378 ft := st.FieldType(i) 4379 if n.Class == PAUTO && !n.Addrtaken { 4380 // Note: the _ field may appear several times. But 4381 // have no fear, identically-named but distinct Autos are 4382 // ok, albeit maybe confusing for a debugger. 4383 x := e.namedAuto(n.Sym.Name+"."+st.FieldName(i), ft) 4384 return ssa.LocalSlot{N: x, Type: ft, Off: 0} 4385 } 4386 return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)} 4387 } 4388 4389 // namedAuto returns a new AUTO variable with the given name and type. 4390 func (e *ssaExport) namedAuto(name string, typ ssa.Type) ssa.GCNode { 4391 t := typ.(*Type) 4392 s := &Sym{Name: name, Pkg: autopkg} 4393 n := Nod(ONAME, nil, nil) 4394 s.Def = n 4395 s.Def.Used = true 4396 n.Sym = s 4397 n.Type = t 4398 n.Class = PAUTO 4399 n.Addable = true 4400 n.Ullman = 1 4401 n.Esc = EscNever 4402 n.Xoffset = 0 4403 n.Name.Curfn = Curfn 4404 Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) 4405 4406 dowidth(t) 4407 e.mustImplement = true 4408 4409 return n 4410 } 4411 4412 func (e *ssaExport) CanSSA(t ssa.Type) bool { 4413 return canSSAType(t.(*Type)) 4414 } 4415 4416 func (e *ssaExport) Line(line int32) string { 4417 return linestr(line) 4418 } 4419 4420 // Log logs a message from the compiler. 4421 func (e *ssaExport) Logf(msg string, args ...interface{}) { 4422 // If e was marked as unimplemented, anything could happen. Ignore. 4423 if e.log && !e.unimplemented { 4424 fmt.Printf(msg, args...) 4425 } 4426 } 4427 4428 func (e *ssaExport) Log() bool { 4429 return e.log 4430 } 4431 4432 // Fatal reports a compiler error and exits. 4433 func (e *ssaExport) Fatalf(line int32, msg string, args ...interface{}) { 4434 // If e was marked as unimplemented, anything could happen. Ignore. 4435 if !e.unimplemented { 4436 lineno = line 4437 Fatalf(msg, args...) 4438 } 4439 } 4440 4441 // Unimplemented reports that the function cannot be compiled. 4442 // It will be removed once SSA work is complete. 4443 func (e *ssaExport) Unimplementedf(line int32, msg string, args ...interface{}) { 4444 if e.mustImplement { 4445 lineno = line 4446 Fatalf(msg, args...) 4447 } 4448 const alwaysLog = false // enable to calculate top unimplemented features 4449 if !e.unimplemented && (e.log || alwaysLog) { 4450 // first implementation failure, print explanation 4451 fmt.Printf("SSA unimplemented: "+msg+"\n", args...) 4452 } 4453 e.unimplemented = true 4454 } 4455 4456 // Warnl reports a "warning", which is usually flag-triggered 4457 // logging output for the benefit of tests. 4458 func (e *ssaExport) Warnl(line int32, fmt_ string, args ...interface{}) { 4459 Warnl(line, fmt_, args...) 4460 } 4461 4462 func (e *ssaExport) Debug_checknil() bool { 4463 return Debug_checknil != 0 4464 } 4465 4466 func (n *Node) Typ() ssa.Type { 4467 return n.Type 4468 }