github.com/gocuntian/go@v0.0.0-20160610041250-fee02d270bf8/src/cmd/compile/internal/gc/ssa.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "bytes" 9 "fmt" 10 "html" 11 "os" 12 "strings" 13 14 "cmd/compile/internal/ssa" 15 "cmd/internal/obj" 16 "cmd/internal/sys" 17 ) 18 19 var ssaEnabled = true 20 21 var ssaConfig *ssa.Config 22 var ssaExp ssaExport 23 24 func initssa() *ssa.Config { 25 ssaExp.unimplemented = false 26 ssaExp.mustImplement = true 27 if ssaConfig == nil { 28 ssaConfig = ssa.NewConfig(Thearch.LinkArch.Name, &ssaExp, Ctxt, Debug['N'] == 0) 29 } 30 return ssaConfig 31 } 32 33 func shouldssa(fn *Node) bool { 34 switch Thearch.LinkArch.Name { 35 default: 36 // Only available for testing. 37 if os.Getenv("SSATEST") == "" { 38 return false 39 } 40 // Generally available. 41 case "amd64": 42 } 43 if !ssaEnabled { 44 return false 45 } 46 47 // Environment variable control of SSA CG 48 // 1. IF GOSSAFUNC == current function name THEN 49 // compile this function with SSA and log output to ssa.html 50 51 // 2. IF GOSSAHASH == "" THEN 52 // compile this function (and everything else) with SSA 53 54 // 3. IF GOSSAHASH == "n" or "N" 55 // IF GOSSAPKG == current package name THEN 56 // compile this function (and everything in this package) with SSA 57 // ELSE 58 // use the old back end for this function. 59 // This is for compatibility with existing test harness and should go away. 60 61 // 4. IF GOSSAHASH is a suffix of the binary-rendered SHA1 hash of the function name THEN 62 // compile this function with SSA 63 // ELSE 64 // compile this function with the old back end. 65 66 // Plan is for 3 to be removed when the tests are revised. 67 // SSA is now default, and is disabled by setting 68 // GOSSAHASH to n or N, or selectively with strings of 69 // 0 and 1. 70 71 name := fn.Func.Nname.Sym.Name 72 73 funcname := os.Getenv("GOSSAFUNC") 74 if funcname != "" { 75 // If GOSSAFUNC is set, compile only that function. 76 return name == funcname 77 } 78 79 pkg := os.Getenv("GOSSAPKG") 80 if pkg != "" { 81 // If GOSSAPKG is set, compile only that package. 82 return localpkg.Name == pkg 83 } 84 85 return initssa().DebugHashMatch("GOSSAHASH", name) 86 } 87 88 // buildssa builds an SSA function. 89 func buildssa(fn *Node) *ssa.Func { 90 name := fn.Func.Nname.Sym.Name 91 printssa := name == os.Getenv("GOSSAFUNC") 92 if printssa { 93 fmt.Println("generating SSA for", name) 94 dumplist("buildssa-enter", fn.Func.Enter) 95 dumplist("buildssa-body", fn.Nbody) 96 dumplist("buildssa-exit", fn.Func.Exit) 97 } 98 99 var s state 100 s.pushLine(fn.Lineno) 101 defer s.popLine() 102 103 if fn.Func.Pragma&CgoUnsafeArgs != 0 { 104 s.cgoUnsafeArgs = true 105 } 106 if fn.Func.Pragma&Nowritebarrier != 0 { 107 s.noWB = true 108 } 109 defer func() { 110 if s.WBLineno != 0 { 111 fn.Func.WBLineno = s.WBLineno 112 } 113 }() 114 // TODO(khr): build config just once at the start of the compiler binary 115 116 ssaExp.log = printssa 117 118 s.config = initssa() 119 s.f = s.config.NewFunc() 120 s.f.Name = name 121 s.exitCode = fn.Func.Exit 122 s.panics = map[funcLine]*ssa.Block{} 123 124 if name == os.Getenv("GOSSAFUNC") { 125 // TODO: tempfile? it is handy to have the location 126 // of this file be stable, so you can just reload in the browser. 127 s.config.HTML = ssa.NewHTMLWriter("ssa.html", s.config, name) 128 // TODO: generate and print a mapping from nodes to values and blocks 129 } 130 defer func() { 131 if !printssa { 132 s.config.HTML.Close() 133 } 134 }() 135 136 // Allocate starting block 137 s.f.Entry = s.f.NewBlock(ssa.BlockPlain) 138 139 // Allocate starting values 140 s.labels = map[string]*ssaLabel{} 141 s.labeledNodes = map[*Node]*ssaLabel{} 142 s.startmem = s.entryNewValue0(ssa.OpInitMem, ssa.TypeMem) 143 s.sp = s.entryNewValue0(ssa.OpSP, Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead 144 s.sb = s.entryNewValue0(ssa.OpSB, Types[TUINTPTR]) 145 146 s.startBlock(s.f.Entry) 147 s.vars[&memVar] = s.startmem 148 149 s.varsyms = map[*Node]interface{}{} 150 151 // Generate addresses of local declarations 152 s.decladdrs = map[*Node]*ssa.Value{} 153 for _, n := range fn.Func.Dcl { 154 switch n.Class { 155 case PPARAM, PPARAMOUT: 156 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n}) 157 s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sp) 158 if n.Class == PPARAMOUT && s.canSSA(n) { 159 // Save ssa-able PPARAMOUT variables so we can 160 // store them back to the stack at the end of 161 // the function. 162 s.returns = append(s.returns, n) 163 } 164 if n.Class == PPARAM && s.canSSA(n) && n.Type.IsPtrShaped() { 165 s.ptrargs = append(s.ptrargs, n) 166 n.SetNotLiveAtEnd(true) // SSA takes care of this explicitly 167 } 168 case PAUTO: 169 // processed at each use, to prevent Addr coming 170 // before the decl. 171 case PAUTOHEAP: 172 // moved to heap - already handled by frontend 173 case PFUNC: 174 // local function - already handled by frontend 175 default: 176 s.Unimplementedf("local variable with class %s unimplemented", classnames[n.Class]) 177 } 178 } 179 180 // Convert the AST-based IR to the SSA-based IR 181 s.stmts(fn.Func.Enter) 182 s.stmts(fn.Nbody) 183 184 // fallthrough to exit 185 if s.curBlock != nil { 186 s.pushLine(fn.Func.Endlineno) 187 s.exit() 188 s.popLine() 189 } 190 191 // Check that we used all labels 192 for name, lab := range s.labels { 193 if !lab.used() && !lab.reported { 194 yyerrorl(lab.defNode.Lineno, "label %v defined and not used", name) 195 lab.reported = true 196 } 197 if lab.used() && !lab.defined() && !lab.reported { 198 yyerrorl(lab.useNode.Lineno, "label %v not defined", name) 199 lab.reported = true 200 } 201 } 202 203 // Check any forward gotos. Non-forward gotos have already been checked. 204 for _, n := range s.fwdGotos { 205 lab := s.labels[n.Left.Sym.Name] 206 // If the label is undefined, we have already have printed an error. 207 if lab.defined() { 208 s.checkgoto(n, lab.defNode) 209 } 210 } 211 212 if nerrors > 0 { 213 s.f.Free() 214 return nil 215 } 216 217 prelinkNumvars := s.f.NumValues() 218 sparseDefState := s.locatePotentialPhiFunctions(fn) 219 220 // Link up variable uses to variable definitions 221 s.linkForwardReferences(sparseDefState) 222 223 if ssa.BuildStats > 0 { 224 s.f.LogStat("build", s.f.NumBlocks(), "blocks", prelinkNumvars, "vars_before", 225 s.f.NumValues(), "vars_after", prelinkNumvars*s.f.NumBlocks(), "ssa_phi_loc_cutoff_score") 226 } 227 228 // Don't carry reference this around longer than necessary 229 s.exitCode = Nodes{} 230 231 // Main call to ssa package to compile function 232 ssa.Compile(s.f) 233 234 return s.f 235 } 236 237 type state struct { 238 // configuration (arch) information 239 config *ssa.Config 240 241 // function we're building 242 f *ssa.Func 243 244 // labels and labeled control flow nodes (OFOR, OSWITCH, OSELECT) in f 245 labels map[string]*ssaLabel 246 labeledNodes map[*Node]*ssaLabel 247 248 // gotos that jump forward; required for deferred checkgoto calls 249 fwdGotos []*Node 250 // Code that must precede any return 251 // (e.g., copying value of heap-escaped paramout back to true paramout) 252 exitCode Nodes 253 254 // unlabeled break and continue statement tracking 255 breakTo *ssa.Block // current target for plain break statement 256 continueTo *ssa.Block // current target for plain continue statement 257 258 // current location where we're interpreting the AST 259 curBlock *ssa.Block 260 261 // variable assignments in the current block (map from variable symbol to ssa value) 262 // *Node is the unique identifier (an ONAME Node) for the variable. 263 vars map[*Node]*ssa.Value 264 265 // all defined variables at the end of each block. Indexed by block ID. 266 defvars []map[*Node]*ssa.Value 267 268 // addresses of PPARAM and PPARAMOUT variables. 269 decladdrs map[*Node]*ssa.Value 270 271 // symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused. 272 varsyms map[*Node]interface{} 273 274 // starting values. Memory, stack pointer, and globals pointer 275 startmem *ssa.Value 276 sp *ssa.Value 277 sb *ssa.Value 278 279 // line number stack. The current line number is top of stack 280 line []int32 281 282 // list of panic calls by function name and line number. 283 // Used to deduplicate panic calls. 284 panics map[funcLine]*ssa.Block 285 286 // list of FwdRef values. 287 fwdRefs []*ssa.Value 288 289 // list of PPARAMOUT (return) variables. 290 returns []*Node 291 292 // list of PPARAM SSA-able pointer-shaped args. We ensure these are live 293 // throughout the function to help users avoid premature finalizers. 294 ptrargs []*Node 295 296 cgoUnsafeArgs bool 297 noWB bool 298 WBLineno int32 // line number of first write barrier. 0=no write barriers 299 } 300 301 type funcLine struct { 302 f *Node 303 line int32 304 } 305 306 type ssaLabel struct { 307 target *ssa.Block // block identified by this label 308 breakTarget *ssa.Block // block to break to in control flow node identified by this label 309 continueTarget *ssa.Block // block to continue to in control flow node identified by this label 310 defNode *Node // label definition Node (OLABEL) 311 // Label use Node (OGOTO, OBREAK, OCONTINUE). 312 // Used only for error detection and reporting. 313 // There might be multiple uses, but we only need to track one. 314 useNode *Node 315 reported bool // reported indicates whether an error has already been reported for this label 316 } 317 318 // defined reports whether the label has a definition (OLABEL node). 319 func (l *ssaLabel) defined() bool { return l.defNode != nil } 320 321 // used reports whether the label has a use (OGOTO, OBREAK, or OCONTINUE node). 322 func (l *ssaLabel) used() bool { return l.useNode != nil } 323 324 // label returns the label associated with sym, creating it if necessary. 325 func (s *state) label(sym *Sym) *ssaLabel { 326 lab := s.labels[sym.Name] 327 if lab == nil { 328 lab = new(ssaLabel) 329 s.labels[sym.Name] = lab 330 } 331 return lab 332 } 333 334 func (s *state) Logf(msg string, args ...interface{}) { s.config.Logf(msg, args...) } 335 func (s *state) Log() bool { return s.config.Log() } 336 func (s *state) Fatalf(msg string, args ...interface{}) { s.config.Fatalf(s.peekLine(), msg, args...) } 337 func (s *state) Unimplementedf(msg string, args ...interface{}) { 338 s.config.Unimplementedf(s.peekLine(), msg, args...) 339 } 340 func (s *state) Warnl(line int32, msg string, args ...interface{}) { s.config.Warnl(line, msg, args...) } 341 func (s *state) Debug_checknil() bool { return s.config.Debug_checknil() } 342 343 var ( 344 // dummy node for the memory variable 345 memVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "mem"}} 346 347 // dummy nodes for temporary variables 348 ptrVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ptr"}} 349 lenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "len"}} 350 newlenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "newlen"}} 351 capVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "cap"}} 352 typVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "typ"}} 353 idataVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "idata"}} 354 okVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ok"}} 355 deltaVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "delta"}} 356 ) 357 358 // startBlock sets the current block we're generating code in to b. 359 func (s *state) startBlock(b *ssa.Block) { 360 if s.curBlock != nil { 361 s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock) 362 } 363 s.curBlock = b 364 s.vars = map[*Node]*ssa.Value{} 365 } 366 367 // endBlock marks the end of generating code for the current block. 368 // Returns the (former) current block. Returns nil if there is no current 369 // block, i.e. if no code flows to the current execution point. 370 func (s *state) endBlock() *ssa.Block { 371 b := s.curBlock 372 if b == nil { 373 return nil 374 } 375 for len(s.defvars) <= int(b.ID) { 376 s.defvars = append(s.defvars, nil) 377 } 378 s.defvars[b.ID] = s.vars 379 s.curBlock = nil 380 s.vars = nil 381 b.Line = s.peekLine() 382 return b 383 } 384 385 // pushLine pushes a line number on the line number stack. 386 func (s *state) pushLine(line int32) { 387 s.line = append(s.line, line) 388 } 389 390 // popLine pops the top of the line number stack. 391 func (s *state) popLine() { 392 s.line = s.line[:len(s.line)-1] 393 } 394 395 // peekLine peek the top of the line number stack. 396 func (s *state) peekLine() int32 { 397 return s.line[len(s.line)-1] 398 } 399 400 func (s *state) Error(msg string, args ...interface{}) { 401 yyerrorl(s.peekLine(), msg, args...) 402 } 403 404 // newValue0 adds a new value with no arguments to the current block. 405 func (s *state) newValue0(op ssa.Op, t ssa.Type) *ssa.Value { 406 return s.curBlock.NewValue0(s.peekLine(), op, t) 407 } 408 409 // newValue0A adds a new value with no arguments and an aux value to the current block. 410 func (s *state) newValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value { 411 return s.curBlock.NewValue0A(s.peekLine(), op, t, aux) 412 } 413 414 // newValue0I adds a new value with no arguments and an auxint value to the current block. 415 func (s *state) newValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value { 416 return s.curBlock.NewValue0I(s.peekLine(), op, t, auxint) 417 } 418 419 // newValue1 adds a new value with one argument to the current block. 420 func (s *state) newValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value { 421 return s.curBlock.NewValue1(s.peekLine(), op, t, arg) 422 } 423 424 // newValue1A adds a new value with one argument and an aux value to the current block. 425 func (s *state) newValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 426 return s.curBlock.NewValue1A(s.peekLine(), op, t, aux, arg) 427 } 428 429 // newValue1I adds a new value with one argument and an auxint value to the current block. 430 func (s *state) newValue1I(op ssa.Op, t ssa.Type, aux int64, arg *ssa.Value) *ssa.Value { 431 return s.curBlock.NewValue1I(s.peekLine(), op, t, aux, arg) 432 } 433 434 // newValue2 adds a new value with two arguments to the current block. 435 func (s *state) newValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value { 436 return s.curBlock.NewValue2(s.peekLine(), op, t, arg0, arg1) 437 } 438 439 // newValue2I adds a new value with two arguments and an auxint value to the current block. 440 func (s *state) newValue2I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value { 441 return s.curBlock.NewValue2I(s.peekLine(), op, t, aux, arg0, arg1) 442 } 443 444 // newValue3 adds a new value with three arguments to the current block. 445 func (s *state) newValue3(op ssa.Op, t ssa.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 446 return s.curBlock.NewValue3(s.peekLine(), op, t, arg0, arg1, arg2) 447 } 448 449 // newValue3I adds a new value with three arguments and an auxint value to the current block. 450 func (s *state) newValue3I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 451 return s.curBlock.NewValue3I(s.peekLine(), op, t, aux, arg0, arg1, arg2) 452 } 453 454 // entryNewValue0 adds a new value with no arguments to the entry block. 455 func (s *state) entryNewValue0(op ssa.Op, t ssa.Type) *ssa.Value { 456 return s.f.Entry.NewValue0(s.peekLine(), op, t) 457 } 458 459 // entryNewValue0A adds a new value with no arguments and an aux value to the entry block. 460 func (s *state) entryNewValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value { 461 return s.f.Entry.NewValue0A(s.peekLine(), op, t, aux) 462 } 463 464 // entryNewValue0I adds a new value with no arguments and an auxint value to the entry block. 465 func (s *state) entryNewValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value { 466 return s.f.Entry.NewValue0I(s.peekLine(), op, t, auxint) 467 } 468 469 // entryNewValue1 adds a new value with one argument to the entry block. 470 func (s *state) entryNewValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value { 471 return s.f.Entry.NewValue1(s.peekLine(), op, t, arg) 472 } 473 474 // entryNewValue1 adds a new value with one argument and an auxint value to the entry block. 475 func (s *state) entryNewValue1I(op ssa.Op, t ssa.Type, auxint int64, arg *ssa.Value) *ssa.Value { 476 return s.f.Entry.NewValue1I(s.peekLine(), op, t, auxint, arg) 477 } 478 479 // entryNewValue1A adds a new value with one argument and an aux value to the entry block. 480 func (s *state) entryNewValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 481 return s.f.Entry.NewValue1A(s.peekLine(), op, t, aux, arg) 482 } 483 484 // entryNewValue2 adds a new value with two arguments to the entry block. 485 func (s *state) entryNewValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value { 486 return s.f.Entry.NewValue2(s.peekLine(), op, t, arg0, arg1) 487 } 488 489 // const* routines add a new const value to the entry block. 490 func (s *state) constSlice(t ssa.Type) *ssa.Value { return s.f.ConstSlice(s.peekLine(), t) } 491 func (s *state) constInterface(t ssa.Type) *ssa.Value { return s.f.ConstInterface(s.peekLine(), t) } 492 func (s *state) constNil(t ssa.Type) *ssa.Value { return s.f.ConstNil(s.peekLine(), t) } 493 func (s *state) constEmptyString(t ssa.Type) *ssa.Value { return s.f.ConstEmptyString(s.peekLine(), t) } 494 func (s *state) constBool(c bool) *ssa.Value { 495 return s.f.ConstBool(s.peekLine(), Types[TBOOL], c) 496 } 497 func (s *state) constInt8(t ssa.Type, c int8) *ssa.Value { 498 return s.f.ConstInt8(s.peekLine(), t, c) 499 } 500 func (s *state) constInt16(t ssa.Type, c int16) *ssa.Value { 501 return s.f.ConstInt16(s.peekLine(), t, c) 502 } 503 func (s *state) constInt32(t ssa.Type, c int32) *ssa.Value { 504 return s.f.ConstInt32(s.peekLine(), t, c) 505 } 506 func (s *state) constInt64(t ssa.Type, c int64) *ssa.Value { 507 return s.f.ConstInt64(s.peekLine(), t, c) 508 } 509 func (s *state) constFloat32(t ssa.Type, c float64) *ssa.Value { 510 return s.f.ConstFloat32(s.peekLine(), t, c) 511 } 512 func (s *state) constFloat64(t ssa.Type, c float64) *ssa.Value { 513 return s.f.ConstFloat64(s.peekLine(), t, c) 514 } 515 func (s *state) constInt(t ssa.Type, c int64) *ssa.Value { 516 if s.config.IntSize == 8 { 517 return s.constInt64(t, c) 518 } 519 if int64(int32(c)) != c { 520 s.Fatalf("integer constant too big %d", c) 521 } 522 return s.constInt32(t, int32(c)) 523 } 524 525 func (s *state) stmts(a Nodes) { 526 for _, x := range a.Slice() { 527 s.stmt(x) 528 } 529 } 530 531 // ssaStmtList converts the statement n to SSA and adds it to s. 532 func (s *state) stmtList(l Nodes) { 533 for _, n := range l.Slice() { 534 s.stmt(n) 535 } 536 } 537 538 // ssaStmt converts the statement n to SSA and adds it to s. 539 func (s *state) stmt(n *Node) { 540 s.pushLine(n.Lineno) 541 defer s.popLine() 542 543 // If s.curBlock is nil, then we're about to generate dead code. 544 // We can't just short-circuit here, though, 545 // because we check labels and gotos as part of SSA generation. 546 // Provide a block for the dead code so that we don't have 547 // to add special cases everywhere else. 548 if s.curBlock == nil { 549 dead := s.f.NewBlock(ssa.BlockPlain) 550 s.startBlock(dead) 551 } 552 553 s.stmtList(n.Ninit) 554 switch n.Op { 555 556 case OBLOCK: 557 s.stmtList(n.List) 558 559 // No-ops 560 case OEMPTY, ODCLCONST, ODCLTYPE, OFALL: 561 562 // Expression statements 563 case OCALLFUNC, OCALLMETH, OCALLINTER: 564 s.call(n, callNormal) 565 if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class == PFUNC && 566 (compiling_runtime && n.Left.Sym.Name == "throw" || 567 n.Left.Sym.Pkg == Runtimepkg && (n.Left.Sym.Name == "gopanic" || n.Left.Sym.Name == "selectgo" || n.Left.Sym.Name == "block")) { 568 m := s.mem() 569 b := s.endBlock() 570 b.Kind = ssa.BlockExit 571 b.SetControl(m) 572 // TODO: never rewrite OPANIC to OCALLFUNC in the 573 // first place. Need to wait until all backends 574 // go through SSA. 575 } 576 case ODEFER: 577 s.call(n.Left, callDefer) 578 case OPROC: 579 s.call(n.Left, callGo) 580 581 case OAS2DOTTYPE: 582 res, resok := s.dottype(n.Rlist.First(), true) 583 s.assign(n.List.First(), res, needwritebarrier(n.List.First(), n.Rlist.First()), false, n.Lineno, 0, false) 584 s.assign(n.List.Second(), resok, false, false, n.Lineno, 0, false) 585 return 586 587 case ODCL: 588 if n.Left.Class == PAUTOHEAP { 589 Fatalf("DCL %v", n) 590 } 591 592 case OLABEL: 593 sym := n.Left.Sym 594 595 if isblanksym(sym) { 596 // Empty identifier is valid but useless. 597 // See issues 11589, 11593. 598 return 599 } 600 601 lab := s.label(sym) 602 603 // Associate label with its control flow node, if any 604 if ctl := n.Name.Defn; ctl != nil { 605 switch ctl.Op { 606 case OFOR, OSWITCH, OSELECT: 607 s.labeledNodes[ctl] = lab 608 } 609 } 610 611 if !lab.defined() { 612 lab.defNode = n 613 } else { 614 s.Error("label %v already defined at %v", sym, linestr(lab.defNode.Lineno)) 615 lab.reported = true 616 } 617 // The label might already have a target block via a goto. 618 if lab.target == nil { 619 lab.target = s.f.NewBlock(ssa.BlockPlain) 620 } 621 622 // go to that label (we pretend "label:" is preceded by "goto label") 623 b := s.endBlock() 624 b.AddEdgeTo(lab.target) 625 s.startBlock(lab.target) 626 627 case OGOTO: 628 sym := n.Left.Sym 629 630 lab := s.label(sym) 631 if lab.target == nil { 632 lab.target = s.f.NewBlock(ssa.BlockPlain) 633 } 634 if !lab.used() { 635 lab.useNode = n 636 } 637 638 if lab.defined() { 639 s.checkgoto(n, lab.defNode) 640 } else { 641 s.fwdGotos = append(s.fwdGotos, n) 642 } 643 644 b := s.endBlock() 645 b.AddEdgeTo(lab.target) 646 647 case OAS, OASWB: 648 // Check whether we can generate static data rather than code. 649 // If so, ignore n and defer data generation until codegen. 650 // Failure to do this causes writes to readonly symbols. 651 if gen_as_init(n, true) { 652 var data []*Node 653 if s.f.StaticData != nil { 654 data = s.f.StaticData.([]*Node) 655 } 656 s.f.StaticData = append(data, n) 657 return 658 } 659 660 if n.Left == n.Right && n.Left.Op == ONAME { 661 // An x=x assignment. No point in doing anything 662 // here. In addition, skipping this assignment 663 // prevents generating: 664 // VARDEF x 665 // COPY x -> x 666 // which is bad because x is incorrectly considered 667 // dead before the vardef. See issue #14904. 668 return 669 } 670 671 var t *Type 672 if n.Right != nil { 673 t = n.Right.Type 674 } else { 675 t = n.Left.Type 676 } 677 678 // Evaluate RHS. 679 rhs := n.Right 680 if rhs != nil { 681 switch rhs.Op { 682 case OSTRUCTLIT, OARRAYLIT: 683 // All literals with nonzero fields have already been 684 // rewritten during walk. Any that remain are just T{} 685 // or equivalents. Use the zero value. 686 if !iszero(rhs) { 687 Fatalf("literal with nonzero value in SSA: %v", rhs) 688 } 689 rhs = nil 690 case OAPPEND: 691 // If we're writing the result of an append back to the same slice, 692 // handle it specially to avoid write barriers on the fast (non-growth) path. 693 // If the slice can be SSA'd, it'll be on the stack, 694 // so there will be no write barriers, 695 // so there's no need to attempt to prevent them. 696 if samesafeexpr(n.Left, rhs.List.First()) && !s.canSSA(n.Left) { 697 s.append(rhs, true) 698 return 699 } 700 } 701 } 702 var r *ssa.Value 703 var isVolatile bool 704 needwb := n.Op == OASWB && rhs != nil 705 deref := !canSSAType(t) 706 if deref { 707 if rhs == nil { 708 r = nil // Signal assign to use OpZero. 709 } else { 710 r, isVolatile = s.addr(rhs, false) 711 } 712 } else { 713 if rhs == nil { 714 r = s.zeroVal(t) 715 } else { 716 r = s.expr(rhs) 717 } 718 } 719 if rhs != nil && rhs.Op == OAPPEND { 720 // The frontend gets rid of the write barrier to enable the special OAPPEND 721 // handling above, but since this is not a special case, we need it. 722 // TODO: just add a ptr graying to the end of growslice? 723 // TODO: check whether we need to provide special handling and a write barrier 724 // for ODOTTYPE and ORECV also. 725 // They get similar wb-removal treatment in walk.go:OAS. 726 needwb = true 727 } 728 729 var skip skipMask 730 if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) { 731 // We're assigning a slicing operation back to its source. 732 // Don't write back fields we aren't changing. See issue #14855. 733 i, j, k := rhs.SliceBounds() 734 if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) { 735 // [0:...] is the same as [:...] 736 i = nil 737 } 738 // TODO: detect defaults for len/cap also. 739 // Currently doesn't really work because (*p)[:len(*p)] appears here as: 740 // tmp = len(*p) 741 // (*p)[:tmp] 742 //if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) { 743 // j = nil 744 //} 745 //if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) { 746 // k = nil 747 //} 748 if i == nil { 749 skip |= skipPtr 750 if j == nil { 751 skip |= skipLen 752 } 753 if k == nil { 754 skip |= skipCap 755 } 756 } 757 } 758 759 s.assign(n.Left, r, needwb, deref, n.Lineno, skip, isVolatile) 760 761 case OIF: 762 bThen := s.f.NewBlock(ssa.BlockPlain) 763 bEnd := s.f.NewBlock(ssa.BlockPlain) 764 var bElse *ssa.Block 765 if n.Rlist.Len() != 0 { 766 bElse = s.f.NewBlock(ssa.BlockPlain) 767 s.condBranch(n.Left, bThen, bElse, n.Likely) 768 } else { 769 s.condBranch(n.Left, bThen, bEnd, n.Likely) 770 } 771 772 s.startBlock(bThen) 773 s.stmts(n.Nbody) 774 if b := s.endBlock(); b != nil { 775 b.AddEdgeTo(bEnd) 776 } 777 778 if n.Rlist.Len() != 0 { 779 s.startBlock(bElse) 780 s.stmtList(n.Rlist) 781 if b := s.endBlock(); b != nil { 782 b.AddEdgeTo(bEnd) 783 } 784 } 785 s.startBlock(bEnd) 786 787 case ORETURN: 788 s.stmtList(n.List) 789 s.exit() 790 case ORETJMP: 791 s.stmtList(n.List) 792 b := s.exit() 793 b.Kind = ssa.BlockRetJmp // override BlockRet 794 b.Aux = n.Left.Sym 795 796 case OCONTINUE, OBREAK: 797 var op string 798 var to *ssa.Block 799 switch n.Op { 800 case OCONTINUE: 801 op = "continue" 802 to = s.continueTo 803 case OBREAK: 804 op = "break" 805 to = s.breakTo 806 } 807 if n.Left == nil { 808 // plain break/continue 809 if to == nil { 810 s.Error("%s is not in a loop", op) 811 return 812 } 813 // nothing to do; "to" is already the correct target 814 } else { 815 // labeled break/continue; look up the target 816 sym := n.Left.Sym 817 lab := s.label(sym) 818 if !lab.used() { 819 lab.useNode = n.Left 820 } 821 if !lab.defined() { 822 s.Error("%s label not defined: %v", op, sym) 823 lab.reported = true 824 return 825 } 826 switch n.Op { 827 case OCONTINUE: 828 to = lab.continueTarget 829 case OBREAK: 830 to = lab.breakTarget 831 } 832 if to == nil { 833 // Valid label but not usable with a break/continue here, e.g.: 834 // for { 835 // continue abc 836 // } 837 // abc: 838 // for {} 839 s.Error("invalid %s label %v", op, sym) 840 lab.reported = true 841 return 842 } 843 } 844 845 b := s.endBlock() 846 b.AddEdgeTo(to) 847 848 case OFOR: 849 // OFOR: for Ninit; Left; Right { Nbody } 850 bCond := s.f.NewBlock(ssa.BlockPlain) 851 bBody := s.f.NewBlock(ssa.BlockPlain) 852 bIncr := s.f.NewBlock(ssa.BlockPlain) 853 bEnd := s.f.NewBlock(ssa.BlockPlain) 854 855 // first, jump to condition test 856 b := s.endBlock() 857 b.AddEdgeTo(bCond) 858 859 // generate code to test condition 860 s.startBlock(bCond) 861 if n.Left != nil { 862 s.condBranch(n.Left, bBody, bEnd, 1) 863 } else { 864 b := s.endBlock() 865 b.Kind = ssa.BlockPlain 866 b.AddEdgeTo(bBody) 867 } 868 869 // set up for continue/break in body 870 prevContinue := s.continueTo 871 prevBreak := s.breakTo 872 s.continueTo = bIncr 873 s.breakTo = bEnd 874 lab := s.labeledNodes[n] 875 if lab != nil { 876 // labeled for loop 877 lab.continueTarget = bIncr 878 lab.breakTarget = bEnd 879 } 880 881 // generate body 882 s.startBlock(bBody) 883 s.stmts(n.Nbody) 884 885 // tear down continue/break 886 s.continueTo = prevContinue 887 s.breakTo = prevBreak 888 if lab != nil { 889 lab.continueTarget = nil 890 lab.breakTarget = nil 891 } 892 893 // done with body, goto incr 894 if b := s.endBlock(); b != nil { 895 b.AddEdgeTo(bIncr) 896 } 897 898 // generate incr 899 s.startBlock(bIncr) 900 if n.Right != nil { 901 s.stmt(n.Right) 902 } 903 if b := s.endBlock(); b != nil { 904 b.AddEdgeTo(bCond) 905 } 906 s.startBlock(bEnd) 907 908 case OSWITCH, OSELECT: 909 // These have been mostly rewritten by the front end into their Nbody fields. 910 // Our main task is to correctly hook up any break statements. 911 bEnd := s.f.NewBlock(ssa.BlockPlain) 912 913 prevBreak := s.breakTo 914 s.breakTo = bEnd 915 lab := s.labeledNodes[n] 916 if lab != nil { 917 // labeled 918 lab.breakTarget = bEnd 919 } 920 921 // generate body code 922 s.stmts(n.Nbody) 923 924 s.breakTo = prevBreak 925 if lab != nil { 926 lab.breakTarget = nil 927 } 928 929 // OSWITCH never falls through (s.curBlock == nil here). 930 // OSELECT does not fall through if we're calling selectgo. 931 // OSELECT does fall through if we're calling selectnb{send,recv}[2]. 932 // In those latter cases, go to the code after the select. 933 if b := s.endBlock(); b != nil { 934 b.AddEdgeTo(bEnd) 935 } 936 s.startBlock(bEnd) 937 938 case OVARKILL: 939 // Insert a varkill op to record that a variable is no longer live. 940 // We only care about liveness info at call sites, so putting the 941 // varkill in the store chain is enough to keep it correctly ordered 942 // with respect to call ops. 943 if !s.canSSA(n.Left) { 944 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, n.Left, s.mem()) 945 } 946 947 case OVARLIVE: 948 // Insert a varlive op to record that a variable is still live. 949 if !n.Left.Addrtaken { 950 s.Fatalf("VARLIVE variable %s must have Addrtaken set", n.Left) 951 } 952 s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, ssa.TypeMem, n.Left, s.mem()) 953 954 case OCHECKNIL: 955 p := s.expr(n.Left) 956 s.nilCheck(p) 957 958 default: 959 s.Unimplementedf("unhandled stmt %s", n.Op) 960 } 961 } 962 963 // exit processes any code that needs to be generated just before returning. 964 // It returns a BlockRet block that ends the control flow. Its control value 965 // will be set to the final memory state. 966 func (s *state) exit() *ssa.Block { 967 if hasdefer { 968 s.rtcall(Deferreturn, true, nil) 969 } 970 971 // Run exit code. Typically, this code copies heap-allocated PPARAMOUT 972 // variables back to the stack. 973 s.stmts(s.exitCode) 974 975 // Store SSAable PPARAMOUT variables back to stack locations. 976 for _, n := range s.returns { 977 addr := s.decladdrs[n] 978 val := s.variable(n, n.Type) 979 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, n, s.mem()) 980 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, n.Type.Size(), addr, val, s.mem()) 981 // TODO: if val is ever spilled, we'd like to use the 982 // PPARAMOUT slot for spilling it. That won't happen 983 // currently. 984 } 985 986 // Keep input pointer args live until the return. This is a bandaid 987 // fix for 1.7 for what will become in 1.8 explicit runtime.KeepAlive calls. 988 // For <= 1.7 we guarantee that pointer input arguments live to the end of 989 // the function to prevent premature (from the user's point of view) 990 // execution of finalizers. See issue 15277. 991 // TODO: remove for 1.8? 992 for _, n := range s.ptrargs { 993 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, s.variable(n, n.Type), s.mem()) 994 } 995 996 // Do actual return. 997 m := s.mem() 998 b := s.endBlock() 999 b.Kind = ssa.BlockRet 1000 b.SetControl(m) 1001 return b 1002 } 1003 1004 type opAndType struct { 1005 op Op 1006 etype EType 1007 } 1008 1009 var opToSSA = map[opAndType]ssa.Op{ 1010 opAndType{OADD, TINT8}: ssa.OpAdd8, 1011 opAndType{OADD, TUINT8}: ssa.OpAdd8, 1012 opAndType{OADD, TINT16}: ssa.OpAdd16, 1013 opAndType{OADD, TUINT16}: ssa.OpAdd16, 1014 opAndType{OADD, TINT32}: ssa.OpAdd32, 1015 opAndType{OADD, TUINT32}: ssa.OpAdd32, 1016 opAndType{OADD, TPTR32}: ssa.OpAdd32, 1017 opAndType{OADD, TINT64}: ssa.OpAdd64, 1018 opAndType{OADD, TUINT64}: ssa.OpAdd64, 1019 opAndType{OADD, TPTR64}: ssa.OpAdd64, 1020 opAndType{OADD, TFLOAT32}: ssa.OpAdd32F, 1021 opAndType{OADD, TFLOAT64}: ssa.OpAdd64F, 1022 1023 opAndType{OSUB, TINT8}: ssa.OpSub8, 1024 opAndType{OSUB, TUINT8}: ssa.OpSub8, 1025 opAndType{OSUB, TINT16}: ssa.OpSub16, 1026 opAndType{OSUB, TUINT16}: ssa.OpSub16, 1027 opAndType{OSUB, TINT32}: ssa.OpSub32, 1028 opAndType{OSUB, TUINT32}: ssa.OpSub32, 1029 opAndType{OSUB, TINT64}: ssa.OpSub64, 1030 opAndType{OSUB, TUINT64}: ssa.OpSub64, 1031 opAndType{OSUB, TFLOAT32}: ssa.OpSub32F, 1032 opAndType{OSUB, TFLOAT64}: ssa.OpSub64F, 1033 1034 opAndType{ONOT, TBOOL}: ssa.OpNot, 1035 1036 opAndType{OMINUS, TINT8}: ssa.OpNeg8, 1037 opAndType{OMINUS, TUINT8}: ssa.OpNeg8, 1038 opAndType{OMINUS, TINT16}: ssa.OpNeg16, 1039 opAndType{OMINUS, TUINT16}: ssa.OpNeg16, 1040 opAndType{OMINUS, TINT32}: ssa.OpNeg32, 1041 opAndType{OMINUS, TUINT32}: ssa.OpNeg32, 1042 opAndType{OMINUS, TINT64}: ssa.OpNeg64, 1043 opAndType{OMINUS, TUINT64}: ssa.OpNeg64, 1044 opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F, 1045 opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F, 1046 1047 opAndType{OCOM, TINT8}: ssa.OpCom8, 1048 opAndType{OCOM, TUINT8}: ssa.OpCom8, 1049 opAndType{OCOM, TINT16}: ssa.OpCom16, 1050 opAndType{OCOM, TUINT16}: ssa.OpCom16, 1051 opAndType{OCOM, TINT32}: ssa.OpCom32, 1052 opAndType{OCOM, TUINT32}: ssa.OpCom32, 1053 opAndType{OCOM, TINT64}: ssa.OpCom64, 1054 opAndType{OCOM, TUINT64}: ssa.OpCom64, 1055 1056 opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag, 1057 opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag, 1058 opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal, 1059 opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal, 1060 1061 opAndType{OMUL, TINT8}: ssa.OpMul8, 1062 opAndType{OMUL, TUINT8}: ssa.OpMul8, 1063 opAndType{OMUL, TINT16}: ssa.OpMul16, 1064 opAndType{OMUL, TUINT16}: ssa.OpMul16, 1065 opAndType{OMUL, TINT32}: ssa.OpMul32, 1066 opAndType{OMUL, TUINT32}: ssa.OpMul32, 1067 opAndType{OMUL, TINT64}: ssa.OpMul64, 1068 opAndType{OMUL, TUINT64}: ssa.OpMul64, 1069 opAndType{OMUL, TFLOAT32}: ssa.OpMul32F, 1070 opAndType{OMUL, TFLOAT64}: ssa.OpMul64F, 1071 1072 opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F, 1073 opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F, 1074 1075 opAndType{OHMUL, TINT8}: ssa.OpHmul8, 1076 opAndType{OHMUL, TUINT8}: ssa.OpHmul8u, 1077 opAndType{OHMUL, TINT16}: ssa.OpHmul16, 1078 opAndType{OHMUL, TUINT16}: ssa.OpHmul16u, 1079 opAndType{OHMUL, TINT32}: ssa.OpHmul32, 1080 opAndType{OHMUL, TUINT32}: ssa.OpHmul32u, 1081 1082 opAndType{ODIV, TINT8}: ssa.OpDiv8, 1083 opAndType{ODIV, TUINT8}: ssa.OpDiv8u, 1084 opAndType{ODIV, TINT16}: ssa.OpDiv16, 1085 opAndType{ODIV, TUINT16}: ssa.OpDiv16u, 1086 opAndType{ODIV, TINT32}: ssa.OpDiv32, 1087 opAndType{ODIV, TUINT32}: ssa.OpDiv32u, 1088 opAndType{ODIV, TINT64}: ssa.OpDiv64, 1089 opAndType{ODIV, TUINT64}: ssa.OpDiv64u, 1090 1091 opAndType{OMOD, TINT8}: ssa.OpMod8, 1092 opAndType{OMOD, TUINT8}: ssa.OpMod8u, 1093 opAndType{OMOD, TINT16}: ssa.OpMod16, 1094 opAndType{OMOD, TUINT16}: ssa.OpMod16u, 1095 opAndType{OMOD, TINT32}: ssa.OpMod32, 1096 opAndType{OMOD, TUINT32}: ssa.OpMod32u, 1097 opAndType{OMOD, TINT64}: ssa.OpMod64, 1098 opAndType{OMOD, TUINT64}: ssa.OpMod64u, 1099 1100 opAndType{OAND, TINT8}: ssa.OpAnd8, 1101 opAndType{OAND, TUINT8}: ssa.OpAnd8, 1102 opAndType{OAND, TINT16}: ssa.OpAnd16, 1103 opAndType{OAND, TUINT16}: ssa.OpAnd16, 1104 opAndType{OAND, TINT32}: ssa.OpAnd32, 1105 opAndType{OAND, TUINT32}: ssa.OpAnd32, 1106 opAndType{OAND, TINT64}: ssa.OpAnd64, 1107 opAndType{OAND, TUINT64}: ssa.OpAnd64, 1108 1109 opAndType{OOR, TINT8}: ssa.OpOr8, 1110 opAndType{OOR, TUINT8}: ssa.OpOr8, 1111 opAndType{OOR, TINT16}: ssa.OpOr16, 1112 opAndType{OOR, TUINT16}: ssa.OpOr16, 1113 opAndType{OOR, TINT32}: ssa.OpOr32, 1114 opAndType{OOR, TUINT32}: ssa.OpOr32, 1115 opAndType{OOR, TINT64}: ssa.OpOr64, 1116 opAndType{OOR, TUINT64}: ssa.OpOr64, 1117 1118 opAndType{OXOR, TINT8}: ssa.OpXor8, 1119 opAndType{OXOR, TUINT8}: ssa.OpXor8, 1120 opAndType{OXOR, TINT16}: ssa.OpXor16, 1121 opAndType{OXOR, TUINT16}: ssa.OpXor16, 1122 opAndType{OXOR, TINT32}: ssa.OpXor32, 1123 opAndType{OXOR, TUINT32}: ssa.OpXor32, 1124 opAndType{OXOR, TINT64}: ssa.OpXor64, 1125 opAndType{OXOR, TUINT64}: ssa.OpXor64, 1126 1127 opAndType{OEQ, TBOOL}: ssa.OpEqB, 1128 opAndType{OEQ, TINT8}: ssa.OpEq8, 1129 opAndType{OEQ, TUINT8}: ssa.OpEq8, 1130 opAndType{OEQ, TINT16}: ssa.OpEq16, 1131 opAndType{OEQ, TUINT16}: ssa.OpEq16, 1132 opAndType{OEQ, TINT32}: ssa.OpEq32, 1133 opAndType{OEQ, TUINT32}: ssa.OpEq32, 1134 opAndType{OEQ, TINT64}: ssa.OpEq64, 1135 opAndType{OEQ, TUINT64}: ssa.OpEq64, 1136 opAndType{OEQ, TINTER}: ssa.OpEqInter, 1137 opAndType{OEQ, TSLICE}: ssa.OpEqSlice, 1138 opAndType{OEQ, TFUNC}: ssa.OpEqPtr, 1139 opAndType{OEQ, TMAP}: ssa.OpEqPtr, 1140 opAndType{OEQ, TCHAN}: ssa.OpEqPtr, 1141 opAndType{OEQ, TPTR64}: ssa.OpEqPtr, 1142 opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr, 1143 opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr, 1144 opAndType{OEQ, TFLOAT64}: ssa.OpEq64F, 1145 opAndType{OEQ, TFLOAT32}: ssa.OpEq32F, 1146 1147 opAndType{ONE, TBOOL}: ssa.OpNeqB, 1148 opAndType{ONE, TINT8}: ssa.OpNeq8, 1149 opAndType{ONE, TUINT8}: ssa.OpNeq8, 1150 opAndType{ONE, TINT16}: ssa.OpNeq16, 1151 opAndType{ONE, TUINT16}: ssa.OpNeq16, 1152 opAndType{ONE, TINT32}: ssa.OpNeq32, 1153 opAndType{ONE, TUINT32}: ssa.OpNeq32, 1154 opAndType{ONE, TINT64}: ssa.OpNeq64, 1155 opAndType{ONE, TUINT64}: ssa.OpNeq64, 1156 opAndType{ONE, TINTER}: ssa.OpNeqInter, 1157 opAndType{ONE, TSLICE}: ssa.OpNeqSlice, 1158 opAndType{ONE, TFUNC}: ssa.OpNeqPtr, 1159 opAndType{ONE, TMAP}: ssa.OpNeqPtr, 1160 opAndType{ONE, TCHAN}: ssa.OpNeqPtr, 1161 opAndType{ONE, TPTR64}: ssa.OpNeqPtr, 1162 opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr, 1163 opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr, 1164 opAndType{ONE, TFLOAT64}: ssa.OpNeq64F, 1165 opAndType{ONE, TFLOAT32}: ssa.OpNeq32F, 1166 1167 opAndType{OLT, TINT8}: ssa.OpLess8, 1168 opAndType{OLT, TUINT8}: ssa.OpLess8U, 1169 opAndType{OLT, TINT16}: ssa.OpLess16, 1170 opAndType{OLT, TUINT16}: ssa.OpLess16U, 1171 opAndType{OLT, TINT32}: ssa.OpLess32, 1172 opAndType{OLT, TUINT32}: ssa.OpLess32U, 1173 opAndType{OLT, TINT64}: ssa.OpLess64, 1174 opAndType{OLT, TUINT64}: ssa.OpLess64U, 1175 opAndType{OLT, TFLOAT64}: ssa.OpLess64F, 1176 opAndType{OLT, TFLOAT32}: ssa.OpLess32F, 1177 1178 opAndType{OGT, TINT8}: ssa.OpGreater8, 1179 opAndType{OGT, TUINT8}: ssa.OpGreater8U, 1180 opAndType{OGT, TINT16}: ssa.OpGreater16, 1181 opAndType{OGT, TUINT16}: ssa.OpGreater16U, 1182 opAndType{OGT, TINT32}: ssa.OpGreater32, 1183 opAndType{OGT, TUINT32}: ssa.OpGreater32U, 1184 opAndType{OGT, TINT64}: ssa.OpGreater64, 1185 opAndType{OGT, TUINT64}: ssa.OpGreater64U, 1186 opAndType{OGT, TFLOAT64}: ssa.OpGreater64F, 1187 opAndType{OGT, TFLOAT32}: ssa.OpGreater32F, 1188 1189 opAndType{OLE, TINT8}: ssa.OpLeq8, 1190 opAndType{OLE, TUINT8}: ssa.OpLeq8U, 1191 opAndType{OLE, TINT16}: ssa.OpLeq16, 1192 opAndType{OLE, TUINT16}: ssa.OpLeq16U, 1193 opAndType{OLE, TINT32}: ssa.OpLeq32, 1194 opAndType{OLE, TUINT32}: ssa.OpLeq32U, 1195 opAndType{OLE, TINT64}: ssa.OpLeq64, 1196 opAndType{OLE, TUINT64}: ssa.OpLeq64U, 1197 opAndType{OLE, TFLOAT64}: ssa.OpLeq64F, 1198 opAndType{OLE, TFLOAT32}: ssa.OpLeq32F, 1199 1200 opAndType{OGE, TINT8}: ssa.OpGeq8, 1201 opAndType{OGE, TUINT8}: ssa.OpGeq8U, 1202 opAndType{OGE, TINT16}: ssa.OpGeq16, 1203 opAndType{OGE, TUINT16}: ssa.OpGeq16U, 1204 opAndType{OGE, TINT32}: ssa.OpGeq32, 1205 opAndType{OGE, TUINT32}: ssa.OpGeq32U, 1206 opAndType{OGE, TINT64}: ssa.OpGeq64, 1207 opAndType{OGE, TUINT64}: ssa.OpGeq64U, 1208 opAndType{OGE, TFLOAT64}: ssa.OpGeq64F, 1209 opAndType{OGE, TFLOAT32}: ssa.OpGeq32F, 1210 1211 opAndType{OLROT, TUINT8}: ssa.OpLrot8, 1212 opAndType{OLROT, TUINT16}: ssa.OpLrot16, 1213 opAndType{OLROT, TUINT32}: ssa.OpLrot32, 1214 opAndType{OLROT, TUINT64}: ssa.OpLrot64, 1215 1216 opAndType{OSQRT, TFLOAT64}: ssa.OpSqrt, 1217 } 1218 1219 func (s *state) concreteEtype(t *Type) EType { 1220 e := t.Etype 1221 switch e { 1222 default: 1223 return e 1224 case TINT: 1225 if s.config.IntSize == 8 { 1226 return TINT64 1227 } 1228 return TINT32 1229 case TUINT: 1230 if s.config.IntSize == 8 { 1231 return TUINT64 1232 } 1233 return TUINT32 1234 case TUINTPTR: 1235 if s.config.PtrSize == 8 { 1236 return TUINT64 1237 } 1238 return TUINT32 1239 } 1240 } 1241 1242 func (s *state) ssaOp(op Op, t *Type) ssa.Op { 1243 etype := s.concreteEtype(t) 1244 x, ok := opToSSA[opAndType{op, etype}] 1245 if !ok { 1246 s.Unimplementedf("unhandled binary op %s %s", op, etype) 1247 } 1248 return x 1249 } 1250 1251 func floatForComplex(t *Type) *Type { 1252 if t.Size() == 8 { 1253 return Types[TFLOAT32] 1254 } else { 1255 return Types[TFLOAT64] 1256 } 1257 } 1258 1259 type opAndTwoTypes struct { 1260 op Op 1261 etype1 EType 1262 etype2 EType 1263 } 1264 1265 type twoTypes struct { 1266 etype1 EType 1267 etype2 EType 1268 } 1269 1270 type twoOpsAndType struct { 1271 op1 ssa.Op 1272 op2 ssa.Op 1273 intermediateType EType 1274 } 1275 1276 var fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1277 1278 twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32}, 1279 twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32}, 1280 twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32}, 1281 twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64}, 1282 1283 twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32}, 1284 twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32}, 1285 twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32}, 1286 twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64}, 1287 1288 twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1289 twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1290 twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32}, 1291 twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64}, 1292 1293 twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1294 twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1295 twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32}, 1296 twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64}, 1297 // unsigned 1298 twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32}, 1299 twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32}, 1300 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned 1301 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead 1302 1303 twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32}, 1304 twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32}, 1305 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned 1306 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead 1307 1308 twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1309 twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1310 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1311 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead 1312 1313 twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1314 twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1315 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1316 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead 1317 1318 // float 1319 twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32}, 1320 twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT64}, 1321 twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT32}, 1322 twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64}, 1323 } 1324 1325 var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{ 1326 opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8, 1327 opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8, 1328 opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16, 1329 opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16, 1330 opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32, 1331 opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32, 1332 opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64, 1333 opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64, 1334 1335 opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8, 1336 opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8, 1337 opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16, 1338 opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16, 1339 opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32, 1340 opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32, 1341 opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64, 1342 opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64, 1343 1344 opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8, 1345 opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8, 1346 opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16, 1347 opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16, 1348 opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32, 1349 opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32, 1350 opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64, 1351 opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64, 1352 1353 opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8, 1354 opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8, 1355 opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16, 1356 opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16, 1357 opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32, 1358 opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32, 1359 opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64, 1360 opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64, 1361 1362 opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8, 1363 opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8, 1364 opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16, 1365 opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16, 1366 opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32, 1367 opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32, 1368 opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64, 1369 opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64, 1370 1371 opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8, 1372 opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8, 1373 opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16, 1374 opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16, 1375 opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32, 1376 opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32, 1377 opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64, 1378 opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64, 1379 1380 opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8, 1381 opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8, 1382 opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16, 1383 opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16, 1384 opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32, 1385 opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32, 1386 opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64, 1387 opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64, 1388 1389 opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8, 1390 opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8, 1391 opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16, 1392 opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16, 1393 opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32, 1394 opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32, 1395 opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64, 1396 opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64, 1397 } 1398 1399 func (s *state) ssaShiftOp(op Op, t *Type, u *Type) ssa.Op { 1400 etype1 := s.concreteEtype(t) 1401 etype2 := s.concreteEtype(u) 1402 x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}] 1403 if !ok { 1404 s.Unimplementedf("unhandled shift op %s etype=%s/%s", op, etype1, etype2) 1405 } 1406 return x 1407 } 1408 1409 func (s *state) ssaRotateOp(op Op, t *Type) ssa.Op { 1410 etype1 := s.concreteEtype(t) 1411 x, ok := opToSSA[opAndType{op, etype1}] 1412 if !ok { 1413 s.Unimplementedf("unhandled rotate op %s etype=%s", op, etype1) 1414 } 1415 return x 1416 } 1417 1418 // expr converts the expression n to ssa, adds it to s and returns the ssa result. 1419 func (s *state) expr(n *Node) *ssa.Value { 1420 if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) { 1421 // ONAMEs and named OLITERALs have the line number 1422 // of the decl, not the use. See issue 14742. 1423 s.pushLine(n.Lineno) 1424 defer s.popLine() 1425 } 1426 1427 s.stmtList(n.Ninit) 1428 switch n.Op { 1429 case OCFUNC: 1430 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: n.Left.Sym}) 1431 return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb) 1432 case ONAME: 1433 if n.Class == PFUNC { 1434 // "value" of a function is the address of the function's closure 1435 sym := funcsym(n.Sym) 1436 aux := &ssa.ExternSymbol{Typ: n.Type, Sym: sym} 1437 return s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sb) 1438 } 1439 if s.canSSA(n) { 1440 return s.variable(n, n.Type) 1441 } 1442 addr, _ := s.addr(n, false) 1443 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1444 case OCLOSUREVAR: 1445 addr, _ := s.addr(n, false) 1446 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1447 case OLITERAL: 1448 switch u := n.Val().U.(type) { 1449 case *Mpint: 1450 i := u.Int64() 1451 switch n.Type.Size() { 1452 case 1: 1453 return s.constInt8(n.Type, int8(i)) 1454 case 2: 1455 return s.constInt16(n.Type, int16(i)) 1456 case 4: 1457 return s.constInt32(n.Type, int32(i)) 1458 case 8: 1459 return s.constInt64(n.Type, i) 1460 default: 1461 s.Fatalf("bad integer size %d", n.Type.Size()) 1462 return nil 1463 } 1464 case string: 1465 if u == "" { 1466 return s.constEmptyString(n.Type) 1467 } 1468 return s.entryNewValue0A(ssa.OpConstString, n.Type, u) 1469 case bool: 1470 return s.constBool(u) 1471 case *NilVal: 1472 t := n.Type 1473 switch { 1474 case t.IsSlice(): 1475 return s.constSlice(t) 1476 case t.IsInterface(): 1477 return s.constInterface(t) 1478 default: 1479 return s.constNil(t) 1480 } 1481 case *Mpflt: 1482 switch n.Type.Size() { 1483 case 4: 1484 return s.constFloat32(n.Type, u.Float32()) 1485 case 8: 1486 return s.constFloat64(n.Type, u.Float64()) 1487 default: 1488 s.Fatalf("bad float size %d", n.Type.Size()) 1489 return nil 1490 } 1491 case *Mpcplx: 1492 r := &u.Real 1493 i := &u.Imag 1494 switch n.Type.Size() { 1495 case 8: 1496 pt := Types[TFLOAT32] 1497 return s.newValue2(ssa.OpComplexMake, n.Type, 1498 s.constFloat32(pt, r.Float32()), 1499 s.constFloat32(pt, i.Float32())) 1500 case 16: 1501 pt := Types[TFLOAT64] 1502 return s.newValue2(ssa.OpComplexMake, n.Type, 1503 s.constFloat64(pt, r.Float64()), 1504 s.constFloat64(pt, i.Float64())) 1505 default: 1506 s.Fatalf("bad float size %d", n.Type.Size()) 1507 return nil 1508 } 1509 1510 default: 1511 s.Unimplementedf("unhandled OLITERAL %v", n.Val().Ctype()) 1512 return nil 1513 } 1514 case OCONVNOP: 1515 to := n.Type 1516 from := n.Left.Type 1517 1518 // Assume everything will work out, so set up our return value. 1519 // Anything interesting that happens from here is a fatal. 1520 x := s.expr(n.Left) 1521 1522 // Special case for not confusing GC and liveness. 1523 // We don't want pointers accidentally classified 1524 // as not-pointers or vice-versa because of copy 1525 // elision. 1526 if to.IsPtrShaped() != from.IsPtrShaped() { 1527 return s.newValue2(ssa.OpConvert, to, x, s.mem()) 1528 } 1529 1530 v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type 1531 1532 // CONVNOP closure 1533 if to.Etype == TFUNC && from.IsPtrShaped() { 1534 return v 1535 } 1536 1537 // named <--> unnamed type or typed <--> untyped const 1538 if from.Etype == to.Etype { 1539 return v 1540 } 1541 1542 // unsafe.Pointer <--> *T 1543 if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() { 1544 return v 1545 } 1546 1547 dowidth(from) 1548 dowidth(to) 1549 if from.Width != to.Width { 1550 s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width) 1551 return nil 1552 } 1553 if etypesign(from.Etype) != etypesign(to.Etype) { 1554 s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype) 1555 return nil 1556 } 1557 1558 if instrumenting { 1559 // These appear to be fine, but they fail the 1560 // integer constraint below, so okay them here. 1561 // Sample non-integer conversion: map[string]string -> *uint8 1562 return v 1563 } 1564 1565 if etypesign(from.Etype) == 0 { 1566 s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to) 1567 return nil 1568 } 1569 1570 // integer, same width, same sign 1571 return v 1572 1573 case OCONV: 1574 x := s.expr(n.Left) 1575 ft := n.Left.Type // from type 1576 tt := n.Type // to type 1577 if ft.IsInteger() && tt.IsInteger() { 1578 var op ssa.Op 1579 if tt.Size() == ft.Size() { 1580 op = ssa.OpCopy 1581 } else if tt.Size() < ft.Size() { 1582 // truncation 1583 switch 10*ft.Size() + tt.Size() { 1584 case 21: 1585 op = ssa.OpTrunc16to8 1586 case 41: 1587 op = ssa.OpTrunc32to8 1588 case 42: 1589 op = ssa.OpTrunc32to16 1590 case 81: 1591 op = ssa.OpTrunc64to8 1592 case 82: 1593 op = ssa.OpTrunc64to16 1594 case 84: 1595 op = ssa.OpTrunc64to32 1596 default: 1597 s.Fatalf("weird integer truncation %s -> %s", ft, tt) 1598 } 1599 } else if ft.IsSigned() { 1600 // sign extension 1601 switch 10*ft.Size() + tt.Size() { 1602 case 12: 1603 op = ssa.OpSignExt8to16 1604 case 14: 1605 op = ssa.OpSignExt8to32 1606 case 18: 1607 op = ssa.OpSignExt8to64 1608 case 24: 1609 op = ssa.OpSignExt16to32 1610 case 28: 1611 op = ssa.OpSignExt16to64 1612 case 48: 1613 op = ssa.OpSignExt32to64 1614 default: 1615 s.Fatalf("bad integer sign extension %s -> %s", ft, tt) 1616 } 1617 } else { 1618 // zero extension 1619 switch 10*ft.Size() + tt.Size() { 1620 case 12: 1621 op = ssa.OpZeroExt8to16 1622 case 14: 1623 op = ssa.OpZeroExt8to32 1624 case 18: 1625 op = ssa.OpZeroExt8to64 1626 case 24: 1627 op = ssa.OpZeroExt16to32 1628 case 28: 1629 op = ssa.OpZeroExt16to64 1630 case 48: 1631 op = ssa.OpZeroExt32to64 1632 default: 1633 s.Fatalf("weird integer sign extension %s -> %s", ft, tt) 1634 } 1635 } 1636 return s.newValue1(op, n.Type, x) 1637 } 1638 1639 if ft.IsFloat() || tt.IsFloat() { 1640 conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}] 1641 if !ok { 1642 s.Fatalf("weird float conversion %s -> %s", ft, tt) 1643 } 1644 op1, op2, it := conv.op1, conv.op2, conv.intermediateType 1645 1646 if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid { 1647 // normal case, not tripping over unsigned 64 1648 if op1 == ssa.OpCopy { 1649 if op2 == ssa.OpCopy { 1650 return x 1651 } 1652 return s.newValue1(op2, n.Type, x) 1653 } 1654 if op2 == ssa.OpCopy { 1655 return s.newValue1(op1, n.Type, x) 1656 } 1657 return s.newValue1(op2, n.Type, s.newValue1(op1, Types[it], x)) 1658 } 1659 // Tricky 64-bit unsigned cases. 1660 if ft.IsInteger() { 1661 // therefore tt is float32 or float64, and ft is also unsigned 1662 if tt.Size() == 4 { 1663 return s.uint64Tofloat32(n, x, ft, tt) 1664 } 1665 if tt.Size() == 8 { 1666 return s.uint64Tofloat64(n, x, ft, tt) 1667 } 1668 s.Fatalf("weird unsigned integer to float conversion %s -> %s", ft, tt) 1669 } 1670 // therefore ft is float32 or float64, and tt is unsigned integer 1671 if ft.Size() == 4 { 1672 return s.float32ToUint64(n, x, ft, tt) 1673 } 1674 if ft.Size() == 8 { 1675 return s.float64ToUint64(n, x, ft, tt) 1676 } 1677 s.Fatalf("weird float to unsigned integer conversion %s -> %s", ft, tt) 1678 return nil 1679 } 1680 1681 if ft.IsComplex() && tt.IsComplex() { 1682 var op ssa.Op 1683 if ft.Size() == tt.Size() { 1684 op = ssa.OpCopy 1685 } else if ft.Size() == 8 && tt.Size() == 16 { 1686 op = ssa.OpCvt32Fto64F 1687 } else if ft.Size() == 16 && tt.Size() == 8 { 1688 op = ssa.OpCvt64Fto32F 1689 } else { 1690 s.Fatalf("weird complex conversion %s -> %s", ft, tt) 1691 } 1692 ftp := floatForComplex(ft) 1693 ttp := floatForComplex(tt) 1694 return s.newValue2(ssa.OpComplexMake, tt, 1695 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)), 1696 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x))) 1697 } 1698 1699 s.Unimplementedf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype) 1700 return nil 1701 1702 case ODOTTYPE: 1703 res, _ := s.dottype(n, false) 1704 return res 1705 1706 // binary ops 1707 case OLT, OEQ, ONE, OLE, OGE, OGT: 1708 a := s.expr(n.Left) 1709 b := s.expr(n.Right) 1710 if n.Left.Type.IsComplex() { 1711 pt := floatForComplex(n.Left.Type) 1712 op := s.ssaOp(OEQ, pt) 1713 r := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)) 1714 i := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)) 1715 c := s.newValue2(ssa.OpAnd8, Types[TBOOL], r, i) 1716 switch n.Op { 1717 case OEQ: 1718 return c 1719 case ONE: 1720 return s.newValue1(ssa.OpNot, Types[TBOOL], c) 1721 default: 1722 s.Fatalf("ordered complex compare %s", n.Op) 1723 } 1724 } 1725 return s.newValue2(s.ssaOp(n.Op, n.Left.Type), Types[TBOOL], a, b) 1726 case OMUL: 1727 a := s.expr(n.Left) 1728 b := s.expr(n.Right) 1729 if n.Type.IsComplex() { 1730 mulop := ssa.OpMul64F 1731 addop := ssa.OpAdd64F 1732 subop := ssa.OpSub64F 1733 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1734 wt := Types[TFLOAT64] // Compute in Float64 to minimize cancellation error 1735 1736 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1737 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1738 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1739 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1740 1741 if pt != wt { // Widen for calculation 1742 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal) 1743 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal) 1744 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag) 1745 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag) 1746 } 1747 1748 xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag)) 1749 ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal)) 1750 1751 if pt != wt { // Narrow to store back 1752 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal) 1753 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag) 1754 } 1755 1756 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1757 } 1758 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1759 1760 case ODIV: 1761 a := s.expr(n.Left) 1762 b := s.expr(n.Right) 1763 if n.Type.IsComplex() { 1764 // TODO this is not executed because the front-end substitutes a runtime call. 1765 // That probably ought to change; with modest optimization the widen/narrow 1766 // conversions could all be elided in larger expression trees. 1767 mulop := ssa.OpMul64F 1768 addop := ssa.OpAdd64F 1769 subop := ssa.OpSub64F 1770 divop := ssa.OpDiv64F 1771 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1772 wt := Types[TFLOAT64] // Compute in Float64 to minimize cancellation error 1773 1774 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1775 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1776 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1777 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1778 1779 if pt != wt { // Widen for calculation 1780 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal) 1781 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal) 1782 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag) 1783 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag) 1784 } 1785 1786 denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag)) 1787 xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag)) 1788 ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag)) 1789 1790 // TODO not sure if this is best done in wide precision or narrow 1791 // Double-rounding might be an issue. 1792 // Note that the pre-SSA implementation does the entire calculation 1793 // in wide format, so wide is compatible. 1794 xreal = s.newValue2(divop, wt, xreal, denom) 1795 ximag = s.newValue2(divop, wt, ximag, denom) 1796 1797 if pt != wt { // Narrow to store back 1798 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal) 1799 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag) 1800 } 1801 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1802 } 1803 if n.Type.IsFloat() { 1804 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1805 } else { 1806 // do a size-appropriate check for zero 1807 cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type)) 1808 s.check(cmp, panicdivide) 1809 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1810 } 1811 case OMOD: 1812 a := s.expr(n.Left) 1813 b := s.expr(n.Right) 1814 // do a size-appropriate check for zero 1815 cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type)) 1816 s.check(cmp, panicdivide) 1817 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1818 case OADD, OSUB: 1819 a := s.expr(n.Left) 1820 b := s.expr(n.Right) 1821 if n.Type.IsComplex() { 1822 pt := floatForComplex(n.Type) 1823 op := s.ssaOp(n.Op, pt) 1824 return s.newValue2(ssa.OpComplexMake, n.Type, 1825 s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)), 1826 s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))) 1827 } 1828 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1829 case OAND, OOR, OHMUL, OXOR: 1830 a := s.expr(n.Left) 1831 b := s.expr(n.Right) 1832 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1833 case OLSH, ORSH: 1834 a := s.expr(n.Left) 1835 b := s.expr(n.Right) 1836 return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b) 1837 case OLROT: 1838 a := s.expr(n.Left) 1839 i := n.Right.Int64() 1840 if i <= 0 || i >= n.Type.Size()*8 { 1841 s.Fatalf("Wrong rotate distance for LROT, expected 1 through %d, saw %d", n.Type.Size()*8-1, i) 1842 } 1843 return s.newValue1I(s.ssaRotateOp(n.Op, n.Type), a.Type, i, a) 1844 case OANDAND, OOROR: 1845 // To implement OANDAND (and OOROR), we introduce a 1846 // new temporary variable to hold the result. The 1847 // variable is associated with the OANDAND node in the 1848 // s.vars table (normally variables are only 1849 // associated with ONAME nodes). We convert 1850 // A && B 1851 // to 1852 // var = A 1853 // if var { 1854 // var = B 1855 // } 1856 // Using var in the subsequent block introduces the 1857 // necessary phi variable. 1858 el := s.expr(n.Left) 1859 s.vars[n] = el 1860 1861 b := s.endBlock() 1862 b.Kind = ssa.BlockIf 1863 b.SetControl(el) 1864 // In theory, we should set b.Likely here based on context. 1865 // However, gc only gives us likeliness hints 1866 // in a single place, for plain OIF statements, 1867 // and passing around context is finnicky, so don't bother for now. 1868 1869 bRight := s.f.NewBlock(ssa.BlockPlain) 1870 bResult := s.f.NewBlock(ssa.BlockPlain) 1871 if n.Op == OANDAND { 1872 b.AddEdgeTo(bRight) 1873 b.AddEdgeTo(bResult) 1874 } else if n.Op == OOROR { 1875 b.AddEdgeTo(bResult) 1876 b.AddEdgeTo(bRight) 1877 } 1878 1879 s.startBlock(bRight) 1880 er := s.expr(n.Right) 1881 s.vars[n] = er 1882 1883 b = s.endBlock() 1884 b.AddEdgeTo(bResult) 1885 1886 s.startBlock(bResult) 1887 return s.variable(n, Types[TBOOL]) 1888 case OCOMPLEX: 1889 r := s.expr(n.Left) 1890 i := s.expr(n.Right) 1891 return s.newValue2(ssa.OpComplexMake, n.Type, r, i) 1892 1893 // unary ops 1894 case OMINUS: 1895 a := s.expr(n.Left) 1896 if n.Type.IsComplex() { 1897 tp := floatForComplex(n.Type) 1898 negop := s.ssaOp(n.Op, tp) 1899 return s.newValue2(ssa.OpComplexMake, n.Type, 1900 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)), 1901 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a))) 1902 } 1903 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 1904 case ONOT, OCOM, OSQRT: 1905 a := s.expr(n.Left) 1906 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 1907 case OIMAG, OREAL: 1908 a := s.expr(n.Left) 1909 return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a) 1910 case OPLUS: 1911 return s.expr(n.Left) 1912 1913 case OADDR: 1914 a, _ := s.addr(n.Left, n.Bounded) 1915 // Note we know the volatile result is false because you can't write &f() in Go. 1916 return a 1917 1918 case OINDREG: 1919 if int(n.Reg) != Thearch.REGSP { 1920 s.Unimplementedf("OINDREG of non-SP register %s in expr: %v", obj.Rconv(int(n.Reg)), n) 1921 return nil 1922 } 1923 addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(n.Type), n.Xoffset, s.sp) 1924 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1925 1926 case OIND: 1927 p := s.exprPtr(n.Left, false, n.Lineno) 1928 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1929 1930 case ODOT: 1931 t := n.Left.Type 1932 if canSSAType(t) { 1933 v := s.expr(n.Left) 1934 return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v) 1935 } 1936 p, _ := s.addr(n, false) 1937 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1938 1939 case ODOTPTR: 1940 p := s.exprPtr(n.Left, false, n.Lineno) 1941 p = s.newValue1I(ssa.OpOffPtr, p.Type, n.Xoffset, p) 1942 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1943 1944 case OINDEX: 1945 switch { 1946 case n.Left.Type.IsString(): 1947 a := s.expr(n.Left) 1948 i := s.expr(n.Right) 1949 i = s.extendIndex(i) 1950 if !n.Bounded { 1951 len := s.newValue1(ssa.OpStringLen, Types[TINT], a) 1952 s.boundsCheck(i, len) 1953 } 1954 ptrtyp := Ptrto(Types[TUINT8]) 1955 ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a) 1956 if Isconst(n.Right, CTINT) { 1957 ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr) 1958 } else { 1959 ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i) 1960 } 1961 return s.newValue2(ssa.OpLoad, Types[TUINT8], ptr, s.mem()) 1962 case n.Left.Type.IsSlice(): 1963 p, _ := s.addr(n, false) 1964 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 1965 case n.Left.Type.IsArray(): 1966 // TODO: fix when we can SSA arrays of length 1. 1967 p, _ := s.addr(n, false) 1968 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 1969 default: 1970 s.Fatalf("bad type for index %v", n.Left.Type) 1971 return nil 1972 } 1973 1974 case OLEN, OCAP: 1975 switch { 1976 case n.Left.Type.IsSlice(): 1977 op := ssa.OpSliceLen 1978 if n.Op == OCAP { 1979 op = ssa.OpSliceCap 1980 } 1981 return s.newValue1(op, Types[TINT], s.expr(n.Left)) 1982 case n.Left.Type.IsString(): // string; not reachable for OCAP 1983 return s.newValue1(ssa.OpStringLen, Types[TINT], s.expr(n.Left)) 1984 case n.Left.Type.IsMap(), n.Left.Type.IsChan(): 1985 return s.referenceTypeBuiltin(n, s.expr(n.Left)) 1986 default: // array 1987 return s.constInt(Types[TINT], n.Left.Type.NumElem()) 1988 } 1989 1990 case OSPTR: 1991 a := s.expr(n.Left) 1992 if n.Left.Type.IsSlice() { 1993 return s.newValue1(ssa.OpSlicePtr, n.Type, a) 1994 } else { 1995 return s.newValue1(ssa.OpStringPtr, n.Type, a) 1996 } 1997 1998 case OITAB: 1999 a := s.expr(n.Left) 2000 return s.newValue1(ssa.OpITab, n.Type, a) 2001 2002 case OEFACE: 2003 tab := s.expr(n.Left) 2004 data := s.expr(n.Right) 2005 // The frontend allows putting things like struct{*byte} in 2006 // the data portion of an eface. But we don't want struct{*byte} 2007 // as a register type because (among other reasons) the liveness 2008 // analysis is confused by the "fat" variables that result from 2009 // such types being spilled. 2010 // So here we ensure that we are selecting the underlying pointer 2011 // when we build an eface. 2012 // TODO: get rid of this now that structs can be SSA'd? 2013 for !data.Type.IsPtrShaped() { 2014 switch { 2015 case data.Type.IsArray(): 2016 data = s.newValue1I(ssa.OpArrayIndex, data.Type.ElemType(), 0, data) 2017 case data.Type.IsStruct(): 2018 for i := data.Type.NumFields() - 1; i >= 0; i-- { 2019 f := data.Type.FieldType(i) 2020 if f.Size() == 0 { 2021 // eface type could also be struct{p *byte; q [0]int} 2022 continue 2023 } 2024 data = s.newValue1I(ssa.OpStructSelect, f, int64(i), data) 2025 break 2026 } 2027 default: 2028 s.Fatalf("type being put into an eface isn't a pointer") 2029 } 2030 } 2031 return s.newValue2(ssa.OpIMake, n.Type, tab, data) 2032 2033 case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR: 2034 v := s.expr(n.Left) 2035 var i, j, k *ssa.Value 2036 low, high, max := n.SliceBounds() 2037 if low != nil { 2038 i = s.extendIndex(s.expr(low)) 2039 } 2040 if high != nil { 2041 j = s.extendIndex(s.expr(high)) 2042 } 2043 if max != nil { 2044 k = s.extendIndex(s.expr(max)) 2045 } 2046 p, l, c := s.slice(n.Left.Type, v, i, j, k) 2047 return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c) 2048 2049 case OSLICESTR: 2050 v := s.expr(n.Left) 2051 var i, j *ssa.Value 2052 low, high, _ := n.SliceBounds() 2053 if low != nil { 2054 i = s.extendIndex(s.expr(low)) 2055 } 2056 if high != nil { 2057 j = s.extendIndex(s.expr(high)) 2058 } 2059 p, l, _ := s.slice(n.Left.Type, v, i, j, nil) 2060 return s.newValue2(ssa.OpStringMake, n.Type, p, l) 2061 2062 case OCALLFUNC: 2063 if isIntrinsicCall1(n) { 2064 return s.intrinsicCall1(n) 2065 } 2066 fallthrough 2067 2068 case OCALLINTER, OCALLMETH: 2069 a := s.call(n, callNormal) 2070 return s.newValue2(ssa.OpLoad, n.Type, a, s.mem()) 2071 2072 case OGETG: 2073 return s.newValue1(ssa.OpGetG, n.Type, s.mem()) 2074 2075 case OAPPEND: 2076 return s.append(n, false) 2077 2078 default: 2079 s.Unimplementedf("unhandled expr %s", n.Op) 2080 return nil 2081 } 2082 } 2083 2084 // append converts an OAPPEND node to SSA. 2085 // If inplace is false, it converts the OAPPEND expression n to an ssa.Value, 2086 // adds it to s, and returns the Value. 2087 // If inplace is true, it writes the result of the OAPPEND expression n 2088 // back to the slice being appended to, and returns nil. 2089 // inplace MUST be set to false if the slice can be SSA'd. 2090 func (s *state) append(n *Node, inplace bool) *ssa.Value { 2091 // If inplace is false, process as expression "append(s, e1, e2, e3)": 2092 // 2093 // ptr, len, cap := s 2094 // newlen := len + 3 2095 // if newlen > cap { 2096 // ptr, len, cap = growslice(s, newlen) 2097 // newlen = len + 3 // recalculate to avoid a spill 2098 // } 2099 // // with write barriers, if needed: 2100 // *(ptr+len) = e1 2101 // *(ptr+len+1) = e2 2102 // *(ptr+len+2) = e3 2103 // return makeslice(ptr, newlen, cap) 2104 // 2105 // 2106 // If inplace is true, process as statement "s = append(s, e1, e2, e3)": 2107 // 2108 // a := &s 2109 // ptr, len, cap := s 2110 // newlen := len + 3 2111 // if newlen > cap { 2112 // newptr, len, newcap = growslice(ptr, len, cap, newlen) 2113 // vardef(a) // if necessary, advise liveness we are writing a new a 2114 // *a.cap = newcap // write before ptr to avoid a spill 2115 // *a.ptr = newptr // with write barrier 2116 // } 2117 // newlen = len + 3 // recalculate to avoid a spill 2118 // *a.len = newlen 2119 // // with write barriers, if needed: 2120 // *(ptr+len) = e1 2121 // *(ptr+len+1) = e2 2122 // *(ptr+len+2) = e3 2123 2124 et := n.Type.Elem() 2125 pt := Ptrto(et) 2126 2127 // Evaluate slice 2128 sn := n.List.First() // the slice node is the first in the list 2129 2130 var slice, addr *ssa.Value 2131 if inplace { 2132 addr, _ = s.addr(sn, false) 2133 slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 2134 } else { 2135 slice = s.expr(sn) 2136 } 2137 2138 // Allocate new blocks 2139 grow := s.f.NewBlock(ssa.BlockPlain) 2140 assign := s.f.NewBlock(ssa.BlockPlain) 2141 2142 // Decide if we need to grow 2143 nargs := int64(n.List.Len() - 1) 2144 p := s.newValue1(ssa.OpSlicePtr, pt, slice) 2145 l := s.newValue1(ssa.OpSliceLen, Types[TINT], slice) 2146 c := s.newValue1(ssa.OpSliceCap, Types[TINT], slice) 2147 nl := s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs)) 2148 2149 cmp := s.newValue2(s.ssaOp(OGT, Types[TINT]), Types[TBOOL], nl, c) 2150 s.vars[&ptrVar] = p 2151 2152 if !inplace { 2153 s.vars[&newlenVar] = nl 2154 s.vars[&capVar] = c 2155 } else { 2156 s.vars[&lenVar] = l 2157 } 2158 2159 b := s.endBlock() 2160 b.Kind = ssa.BlockIf 2161 b.Likely = ssa.BranchUnlikely 2162 b.SetControl(cmp) 2163 b.AddEdgeTo(grow) 2164 b.AddEdgeTo(assign) 2165 2166 // Call growslice 2167 s.startBlock(grow) 2168 taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(n.Type.Elem())}, s.sb) 2169 2170 r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl) 2171 2172 if inplace { 2173 if sn.Op == ONAME { 2174 // Tell liveness we're about to build a new slice 2175 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, sn, s.mem()) 2176 } 2177 capaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(Array_cap), addr) 2178 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capaddr, r[2], s.mem()) 2179 s.insertWBstore(pt, addr, r[0], n.Lineno, 0) 2180 // load the value we just stored to avoid having to spill it 2181 s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem()) 2182 s.vars[&lenVar] = r[1] // avoid a spill in the fast path 2183 } else { 2184 s.vars[&ptrVar] = r[0] 2185 s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], r[1], s.constInt(Types[TINT], nargs)) 2186 s.vars[&capVar] = r[2] 2187 } 2188 2189 b = s.endBlock() 2190 b.AddEdgeTo(assign) 2191 2192 // assign new elements to slots 2193 s.startBlock(assign) 2194 2195 if inplace { 2196 l = s.variable(&lenVar, Types[TINT]) // generates phi for len 2197 nl = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs)) 2198 lenaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(Array_nel), addr) 2199 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenaddr, nl, s.mem()) 2200 } 2201 2202 // Evaluate args 2203 type argRec struct { 2204 // if store is true, we're appending the value v. If false, we're appending the 2205 // value at *v. If store==false, isVolatile reports whether the source 2206 // is in the outargs section of the stack frame. 2207 v *ssa.Value 2208 store bool 2209 isVolatile bool 2210 } 2211 args := make([]argRec, 0, nargs) 2212 for _, n := range n.List.Slice()[1:] { 2213 if canSSAType(n.Type) { 2214 args = append(args, argRec{v: s.expr(n), store: true}) 2215 } else { 2216 v, isVolatile := s.addr(n, false) 2217 args = append(args, argRec{v: v, isVolatile: isVolatile}) 2218 } 2219 } 2220 2221 p = s.variable(&ptrVar, pt) // generates phi for ptr 2222 if !inplace { 2223 nl = s.variable(&newlenVar, Types[TINT]) // generates phi for nl 2224 c = s.variable(&capVar, Types[TINT]) // generates phi for cap 2225 } 2226 p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l) 2227 // TODO: just one write barrier call for all of these writes? 2228 // TODO: maybe just one writeBarrier.enabled check? 2229 for i, arg := range args { 2230 addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(Types[TINT], int64(i))) 2231 if arg.store { 2232 if haspointers(et) { 2233 s.insertWBstore(et, addr, arg.v, n.Lineno, 0) 2234 } else { 2235 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, et.Size(), addr, arg.v, s.mem()) 2236 } 2237 } else { 2238 if haspointers(et) { 2239 s.insertWBmove(et, addr, arg.v, n.Lineno, arg.isVolatile) 2240 } else { 2241 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, et.Size(), addr, arg.v, s.mem()) 2242 } 2243 } 2244 } 2245 2246 delete(s.vars, &ptrVar) 2247 if inplace { 2248 delete(s.vars, &lenVar) 2249 return nil 2250 } 2251 delete(s.vars, &newlenVar) 2252 delete(s.vars, &capVar) 2253 // make result 2254 return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c) 2255 } 2256 2257 // condBranch evaluates the boolean expression cond and branches to yes 2258 // if cond is true and no if cond is false. 2259 // This function is intended to handle && and || better than just calling 2260 // s.expr(cond) and branching on the result. 2261 func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) { 2262 if cond.Op == OANDAND { 2263 mid := s.f.NewBlock(ssa.BlockPlain) 2264 s.stmtList(cond.Ninit) 2265 s.condBranch(cond.Left, mid, no, max8(likely, 0)) 2266 s.startBlock(mid) 2267 s.condBranch(cond.Right, yes, no, likely) 2268 return 2269 // Note: if likely==1, then both recursive calls pass 1. 2270 // If likely==-1, then we don't have enough information to decide 2271 // whether the first branch is likely or not. So we pass 0 for 2272 // the likeliness of the first branch. 2273 // TODO: have the frontend give us branch prediction hints for 2274 // OANDAND and OOROR nodes (if it ever has such info). 2275 } 2276 if cond.Op == OOROR { 2277 mid := s.f.NewBlock(ssa.BlockPlain) 2278 s.stmtList(cond.Ninit) 2279 s.condBranch(cond.Left, yes, mid, min8(likely, 0)) 2280 s.startBlock(mid) 2281 s.condBranch(cond.Right, yes, no, likely) 2282 return 2283 // Note: if likely==-1, then both recursive calls pass -1. 2284 // If likely==1, then we don't have enough info to decide 2285 // the likelihood of the first branch. 2286 } 2287 if cond.Op == ONOT { 2288 s.stmtList(cond.Ninit) 2289 s.condBranch(cond.Left, no, yes, -likely) 2290 return 2291 } 2292 c := s.expr(cond) 2293 b := s.endBlock() 2294 b.Kind = ssa.BlockIf 2295 b.SetControl(c) 2296 b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness 2297 b.AddEdgeTo(yes) 2298 b.AddEdgeTo(no) 2299 } 2300 2301 type skipMask uint8 2302 2303 const ( 2304 skipPtr skipMask = 1 << iota 2305 skipLen 2306 skipCap 2307 ) 2308 2309 // assign does left = right. 2310 // Right has already been evaluated to ssa, left has not. 2311 // If deref is true, then we do left = *right instead (and right has already been nil-checked). 2312 // If deref is true and right == nil, just do left = 0. 2313 // If deref is true, rightIsVolatile reports whether right points to volatile (clobbered by a call) storage. 2314 // Include a write barrier if wb is true. 2315 // skip indicates assignments (at the top level) that can be avoided. 2316 func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, line int32, skip skipMask, rightIsVolatile bool) { 2317 if left.Op == ONAME && isblank(left) { 2318 return 2319 } 2320 t := left.Type 2321 dowidth(t) 2322 if s.canSSA(left) { 2323 if deref { 2324 s.Fatalf("can SSA LHS %s but not RHS %s", left, right) 2325 } 2326 if left.Op == ODOT { 2327 // We're assigning to a field of an ssa-able value. 2328 // We need to build a new structure with the new value for the 2329 // field we're assigning and the old values for the other fields. 2330 // For instance: 2331 // type T struct {a, b, c int} 2332 // var T x 2333 // x.b = 5 2334 // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c} 2335 2336 // Grab information about the structure type. 2337 t := left.Left.Type 2338 nf := t.NumFields() 2339 idx := fieldIdx(left) 2340 2341 // Grab old value of structure. 2342 old := s.expr(left.Left) 2343 2344 // Make new structure. 2345 new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t) 2346 2347 // Add fields as args. 2348 for i := 0; i < nf; i++ { 2349 if i == idx { 2350 new.AddArg(right) 2351 } else { 2352 new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old)) 2353 } 2354 } 2355 2356 // Recursively assign the new value we've made to the base of the dot op. 2357 s.assign(left.Left, new, false, false, line, 0, rightIsVolatile) 2358 // TODO: do we need to update named values here? 2359 return 2360 } 2361 // Update variable assignment. 2362 s.vars[left] = right 2363 s.addNamedValue(left, right) 2364 return 2365 } 2366 // Left is not ssa-able. Compute its address. 2367 addr, _ := s.addr(left, false) 2368 if left.Op == ONAME && skip == 0 { 2369 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, left, s.mem()) 2370 } 2371 if deref { 2372 // Treat as a mem->mem move. 2373 if right == nil { 2374 s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, t.Size(), addr, s.mem()) 2375 return 2376 } 2377 if wb { 2378 s.insertWBmove(t, addr, right, line, rightIsVolatile) 2379 return 2380 } 2381 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), addr, right, s.mem()) 2382 return 2383 } 2384 // Treat as a store. 2385 if wb { 2386 if skip&skipPtr != 0 { 2387 // Special case: if we don't write back the pointers, don't bother 2388 // doing the write barrier check. 2389 s.storeTypeScalars(t, addr, right, skip) 2390 return 2391 } 2392 s.insertWBstore(t, addr, right, line, skip) 2393 return 2394 } 2395 if skip != 0 { 2396 if skip&skipPtr == 0 { 2397 s.storeTypePtrs(t, addr, right) 2398 } 2399 s.storeTypeScalars(t, addr, right, skip) 2400 return 2401 } 2402 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), addr, right, s.mem()) 2403 } 2404 2405 // zeroVal returns the zero value for type t. 2406 func (s *state) zeroVal(t *Type) *ssa.Value { 2407 switch { 2408 case t.IsInteger(): 2409 switch t.Size() { 2410 case 1: 2411 return s.constInt8(t, 0) 2412 case 2: 2413 return s.constInt16(t, 0) 2414 case 4: 2415 return s.constInt32(t, 0) 2416 case 8: 2417 return s.constInt64(t, 0) 2418 default: 2419 s.Fatalf("bad sized integer type %s", t) 2420 } 2421 case t.IsFloat(): 2422 switch t.Size() { 2423 case 4: 2424 return s.constFloat32(t, 0) 2425 case 8: 2426 return s.constFloat64(t, 0) 2427 default: 2428 s.Fatalf("bad sized float type %s", t) 2429 } 2430 case t.IsComplex(): 2431 switch t.Size() { 2432 case 8: 2433 z := s.constFloat32(Types[TFLOAT32], 0) 2434 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2435 case 16: 2436 z := s.constFloat64(Types[TFLOAT64], 0) 2437 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2438 default: 2439 s.Fatalf("bad sized complex type %s", t) 2440 } 2441 2442 case t.IsString(): 2443 return s.constEmptyString(t) 2444 case t.IsPtrShaped(): 2445 return s.constNil(t) 2446 case t.IsBoolean(): 2447 return s.constBool(false) 2448 case t.IsInterface(): 2449 return s.constInterface(t) 2450 case t.IsSlice(): 2451 return s.constSlice(t) 2452 case t.IsStruct(): 2453 n := t.NumFields() 2454 v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t) 2455 for i := 0; i < n; i++ { 2456 v.AddArg(s.zeroVal(t.FieldType(i).(*Type))) 2457 } 2458 return v 2459 } 2460 s.Unimplementedf("zero for type %v not implemented", t) 2461 return nil 2462 } 2463 2464 type callKind int8 2465 2466 const ( 2467 callNormal callKind = iota 2468 callDefer 2469 callGo 2470 ) 2471 2472 // isSSAIntrinsic1 returns true if n is a call to a recognized 1-arg intrinsic 2473 // that can be handled by the SSA backend. 2474 // SSA uses this, but so does the front end to see if should not 2475 // inline a function because it is a candidate for intrinsic 2476 // substitution. 2477 func isSSAIntrinsic1(s *Sym) bool { 2478 // The test below is not quite accurate -- in the event that 2479 // a function is disabled on a per-function basis, for example 2480 // because of hash-keyed binary failure search, SSA might be 2481 // disabled for that function but it would not be noted here, 2482 // and thus an inlining would not occur (in practice, inlining 2483 // so far has only been noticed for Bswap32 and the 16-bit count 2484 // leading/trailing instructions, but heuristics might change 2485 // in the future or on different architectures). 2486 if !ssaEnabled || ssa.IntrinsicsDisable || Thearch.LinkArch.Family != sys.AMD64 { 2487 return false 2488 } 2489 if s != nil && s.Pkg != nil && s.Pkg.Path == "runtime/internal/sys" { 2490 switch s.Name { 2491 case 2492 "Ctz64", "Ctz32", "Ctz16", 2493 "Bswap64", "Bswap32": 2494 return true 2495 } 2496 } 2497 return false 2498 } 2499 2500 func isIntrinsicCall1(n *Node) bool { 2501 if n == nil || n.Left == nil { 2502 return false 2503 } 2504 return isSSAIntrinsic1(n.Left.Sym) 2505 } 2506 2507 // intrinsicFirstArg extracts arg from n.List and eval 2508 func (s *state) intrinsicFirstArg(n *Node) *ssa.Value { 2509 x := n.List.First() 2510 if x.Op == OAS { 2511 x = x.Right 2512 } 2513 return s.expr(x) 2514 } 2515 2516 // intrinsicCall1 converts a call to a recognized 1-arg intrinsic 2517 // into the intrinsic 2518 func (s *state) intrinsicCall1(n *Node) *ssa.Value { 2519 var result *ssa.Value 2520 switch n.Left.Sym.Name { 2521 case "Ctz64": 2522 result = s.newValue1(ssa.OpCtz64, Types[TUINT64], s.intrinsicFirstArg(n)) 2523 case "Ctz32": 2524 result = s.newValue1(ssa.OpCtz32, Types[TUINT32], s.intrinsicFirstArg(n)) 2525 case "Ctz16": 2526 result = s.newValue1(ssa.OpCtz16, Types[TUINT16], s.intrinsicFirstArg(n)) 2527 case "Bswap64": 2528 result = s.newValue1(ssa.OpBswap64, Types[TUINT64], s.intrinsicFirstArg(n)) 2529 case "Bswap32": 2530 result = s.newValue1(ssa.OpBswap32, Types[TUINT32], s.intrinsicFirstArg(n)) 2531 } 2532 if result == nil { 2533 Fatalf("Unknown special call: %v", n.Left.Sym) 2534 } 2535 if ssa.IntrinsicsDebug > 0 { 2536 Warnl(n.Lineno, "intrinsic substitution for %v with %s", n.Left.Sym.Name, result.LongString()) 2537 } 2538 return result 2539 } 2540 2541 // Calls the function n using the specified call type. 2542 // Returns the address of the return value (or nil if none). 2543 func (s *state) call(n *Node, k callKind) *ssa.Value { 2544 var sym *Sym // target symbol (if static) 2545 var closure *ssa.Value // ptr to closure to run (if dynamic) 2546 var codeptr *ssa.Value // ptr to target code (if dynamic) 2547 var rcvr *ssa.Value // receiver to set 2548 fn := n.Left 2549 switch n.Op { 2550 case OCALLFUNC: 2551 if k == callNormal && fn.Op == ONAME && fn.Class == PFUNC { 2552 sym = fn.Sym 2553 break 2554 } 2555 closure = s.expr(fn) 2556 case OCALLMETH: 2557 if fn.Op != ODOTMETH { 2558 Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) 2559 } 2560 if k == callNormal { 2561 sym = fn.Sym 2562 break 2563 } 2564 n2 := newname(fn.Sym) 2565 n2.Class = PFUNC 2566 n2.Lineno = fn.Lineno 2567 closure = s.expr(n2) 2568 // Note: receiver is already assigned in n.List, so we don't 2569 // want to set it here. 2570 case OCALLINTER: 2571 if fn.Op != ODOTINTER { 2572 Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op) 2573 } 2574 i := s.expr(fn.Left) 2575 itab := s.newValue1(ssa.OpITab, Types[TUINTPTR], i) 2576 if k != callNormal { 2577 s.nilCheck(itab) 2578 } 2579 itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab 2580 itab = s.newValue1I(ssa.OpOffPtr, Types[TUINTPTR], itabidx, itab) 2581 if k == callNormal { 2582 codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], itab, s.mem()) 2583 } else { 2584 closure = itab 2585 } 2586 rcvr = s.newValue1(ssa.OpIData, Types[TUINTPTR], i) 2587 } 2588 dowidth(fn.Type) 2589 stksize := fn.Type.ArgWidth() // includes receiver 2590 2591 // Run all argument assignments. The arg slots have already 2592 // been offset by the appropriate amount (+2*widthptr for go/defer, 2593 // +widthptr for interface calls). 2594 // For OCALLMETH, the receiver is set in these statements. 2595 s.stmtList(n.List) 2596 2597 // Set receiver (for interface calls) 2598 if rcvr != nil { 2599 argStart := Ctxt.FixedFrameSize() 2600 if k != callNormal { 2601 argStart += int64(2 * Widthptr) 2602 } 2603 addr := s.entryNewValue1I(ssa.OpOffPtr, Types[TUINTPTR], argStart, s.sp) 2604 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, rcvr, s.mem()) 2605 } 2606 2607 // Defer/go args 2608 if k != callNormal { 2609 // Write argsize and closure (args to Newproc/Deferproc). 2610 argsize := s.constInt32(Types[TUINT32], int32(stksize)) 2611 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, 4, s.sp, argsize, s.mem()) 2612 addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(Types[TUINTPTR]), int64(Widthptr), s.sp) 2613 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, closure, s.mem()) 2614 stksize += 2 * int64(Widthptr) 2615 } 2616 2617 // call target 2618 bNext := s.f.NewBlock(ssa.BlockPlain) 2619 var call *ssa.Value 2620 switch { 2621 case k == callDefer: 2622 call = s.newValue1(ssa.OpDeferCall, ssa.TypeMem, s.mem()) 2623 case k == callGo: 2624 call = s.newValue1(ssa.OpGoCall, ssa.TypeMem, s.mem()) 2625 case closure != nil: 2626 codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], closure, s.mem()) 2627 call = s.newValue3(ssa.OpClosureCall, ssa.TypeMem, codeptr, closure, s.mem()) 2628 case codeptr != nil: 2629 call = s.newValue2(ssa.OpInterCall, ssa.TypeMem, codeptr, s.mem()) 2630 case sym != nil: 2631 call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, sym, s.mem()) 2632 default: 2633 Fatalf("bad call type %s %v", n.Op, n) 2634 } 2635 call.AuxInt = stksize // Call operations carry the argsize of the callee along with them 2636 2637 // Finish call block 2638 s.vars[&memVar] = call 2639 b := s.endBlock() 2640 b.Kind = ssa.BlockCall 2641 b.SetControl(call) 2642 b.AddEdgeTo(bNext) 2643 if k == callDefer { 2644 // Add recover edge to exit code. 2645 b.Kind = ssa.BlockDefer 2646 r := s.f.NewBlock(ssa.BlockPlain) 2647 s.startBlock(r) 2648 s.exit() 2649 b.AddEdgeTo(r) 2650 b.Likely = ssa.BranchLikely 2651 } 2652 2653 // Start exit block, find address of result. 2654 s.startBlock(bNext) 2655 // Keep input pointer args live across calls. This is a bandaid until 1.8. 2656 for _, n := range s.ptrargs { 2657 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, s.variable(n, n.Type), s.mem()) 2658 } 2659 res := n.Left.Type.Results() 2660 if res.NumFields() == 0 || k != callNormal { 2661 // call has no return value. Continue with the next statement. 2662 return nil 2663 } 2664 fp := res.Field(0) 2665 return s.entryNewValue1I(ssa.OpOffPtr, Ptrto(fp.Type), fp.Offset+Ctxt.FixedFrameSize(), s.sp) 2666 } 2667 2668 // etypesign returns the signed-ness of e, for integer/pointer etypes. 2669 // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer. 2670 func etypesign(e EType) int8 { 2671 switch e { 2672 case TINT8, TINT16, TINT32, TINT64, TINT: 2673 return -1 2674 case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR: 2675 return +1 2676 } 2677 return 0 2678 } 2679 2680 // lookupSymbol is used to retrieve the symbol (Extern, Arg or Auto) used for a particular node. 2681 // This improves the effectiveness of cse by using the same Aux values for the 2682 // same symbols. 2683 func (s *state) lookupSymbol(n *Node, sym interface{}) interface{} { 2684 switch sym.(type) { 2685 default: 2686 s.Fatalf("sym %v is of uknown type %T", sym, sym) 2687 case *ssa.ExternSymbol, *ssa.ArgSymbol, *ssa.AutoSymbol: 2688 // these are the only valid types 2689 } 2690 2691 if lsym, ok := s.varsyms[n]; ok { 2692 return lsym 2693 } else { 2694 s.varsyms[n] = sym 2695 return sym 2696 } 2697 } 2698 2699 // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result. 2700 // Also returns a bool reporting whether the returned value is "volatile", that is it 2701 // points to the outargs section and thus the referent will be clobbered by any call. 2702 // The value that the returned Value represents is guaranteed to be non-nil. 2703 // If bounded is true then this address does not require a nil check for its operand 2704 // even if that would otherwise be implied. 2705 func (s *state) addr(n *Node, bounded bool) (*ssa.Value, bool) { 2706 t := Ptrto(n.Type) 2707 switch n.Op { 2708 case ONAME: 2709 switch n.Class { 2710 case PEXTERN: 2711 // global variable 2712 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: n.Sym}) 2713 v := s.entryNewValue1A(ssa.OpAddr, t, aux, s.sb) 2714 // TODO: Make OpAddr use AuxInt as well as Aux. 2715 if n.Xoffset != 0 { 2716 v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v) 2717 } 2718 return v, false 2719 case PPARAM: 2720 // parameter slot 2721 v := s.decladdrs[n] 2722 if v != nil { 2723 return v, false 2724 } 2725 if n.String() == ".fp" { 2726 // Special arg that points to the frame pointer. 2727 // (Used by the race detector, others?) 2728 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n}) 2729 return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp), false 2730 } 2731 s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs) 2732 return nil, false 2733 case PAUTO: 2734 aux := s.lookupSymbol(n, &ssa.AutoSymbol{Typ: n.Type, Node: n}) 2735 return s.newValue1A(ssa.OpAddr, t, aux, s.sp), false 2736 case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early. 2737 // ensure that we reuse symbols for out parameters so 2738 // that cse works on their addresses 2739 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n}) 2740 return s.newValue1A(ssa.OpAddr, t, aux, s.sp), false 2741 default: 2742 s.Unimplementedf("variable address class %v not implemented", classnames[n.Class]) 2743 return nil, false 2744 } 2745 case OINDREG: 2746 // indirect off a register 2747 // used for storing/loading arguments/returns to/from callees 2748 if int(n.Reg) != Thearch.REGSP { 2749 s.Unimplementedf("OINDREG of non-SP register %s in addr: %v", obj.Rconv(int(n.Reg)), n) 2750 return nil, false 2751 } 2752 return s.entryNewValue1I(ssa.OpOffPtr, t, n.Xoffset, s.sp), true 2753 case OINDEX: 2754 if n.Left.Type.IsSlice() { 2755 a := s.expr(n.Left) 2756 i := s.expr(n.Right) 2757 i = s.extendIndex(i) 2758 len := s.newValue1(ssa.OpSliceLen, Types[TINT], a) 2759 if !n.Bounded { 2760 s.boundsCheck(i, len) 2761 } 2762 p := s.newValue1(ssa.OpSlicePtr, t, a) 2763 return s.newValue2(ssa.OpPtrIndex, t, p, i), false 2764 } else { // array 2765 a, isVolatile := s.addr(n.Left, bounded) 2766 i := s.expr(n.Right) 2767 i = s.extendIndex(i) 2768 len := s.constInt(Types[TINT], n.Left.Type.NumElem()) 2769 if !n.Bounded { 2770 s.boundsCheck(i, len) 2771 } 2772 return s.newValue2(ssa.OpPtrIndex, Ptrto(n.Left.Type.Elem()), a, i), isVolatile 2773 } 2774 case OIND: 2775 return s.exprPtr(n.Left, bounded, n.Lineno), false 2776 case ODOT: 2777 p, isVolatile := s.addr(n.Left, bounded) 2778 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p), isVolatile 2779 case ODOTPTR: 2780 p := s.exprPtr(n.Left, bounded, n.Lineno) 2781 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p), false 2782 case OCLOSUREVAR: 2783 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, 2784 s.entryNewValue0(ssa.OpGetClosurePtr, Ptrto(Types[TUINT8]))), false 2785 case OCONVNOP: 2786 addr, isVolatile := s.addr(n.Left, bounded) 2787 return s.newValue1(ssa.OpCopy, t, addr), isVolatile // ensure that addr has the right type 2788 case OCALLFUNC, OCALLINTER, OCALLMETH: 2789 return s.call(n, callNormal), true 2790 2791 default: 2792 s.Unimplementedf("unhandled addr %v", n.Op) 2793 return nil, false 2794 } 2795 } 2796 2797 // canSSA reports whether n is SSA-able. 2798 // n must be an ONAME (or an ODOT sequence with an ONAME base). 2799 func (s *state) canSSA(n *Node) bool { 2800 if Debug['N'] != 0 { 2801 return false 2802 } 2803 for n.Op == ODOT { 2804 n = n.Left 2805 } 2806 if n.Op != ONAME { 2807 return false 2808 } 2809 if n.Addrtaken { 2810 return false 2811 } 2812 if n.isParamHeapCopy() { 2813 return false 2814 } 2815 if n.Class == PAUTOHEAP { 2816 Fatalf("canSSA of PAUTOHEAP %v", n) 2817 } 2818 switch n.Class { 2819 case PEXTERN: 2820 return false 2821 case PPARAMOUT: 2822 if hasdefer { 2823 // TODO: handle this case? Named return values must be 2824 // in memory so that the deferred function can see them. 2825 // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false } 2826 return false 2827 } 2828 if s.cgoUnsafeArgs { 2829 // Cgo effectively takes the address of all result args, 2830 // but the compiler can't see that. 2831 return false 2832 } 2833 } 2834 if n.Class == PPARAM && n.String() == ".this" { 2835 // wrappers generated by genwrapper need to update 2836 // the .this pointer in place. 2837 // TODO: treat as a PPARMOUT? 2838 return false 2839 } 2840 return canSSAType(n.Type) 2841 // TODO: try to make more variables SSAable? 2842 } 2843 2844 // canSSA reports whether variables of type t are SSA-able. 2845 func canSSAType(t *Type) bool { 2846 dowidth(t) 2847 if t.Width > int64(4*Widthptr) { 2848 // 4*Widthptr is an arbitrary constant. We want it 2849 // to be at least 3*Widthptr so slices can be registerized. 2850 // Too big and we'll introduce too much register pressure. 2851 return false 2852 } 2853 switch t.Etype { 2854 case TARRAY: 2855 // We can't do arrays because dynamic indexing is 2856 // not supported on SSA variables. 2857 // TODO: maybe allow if length is <=1? All indexes 2858 // are constant? Might be good for the arrays 2859 // introduced by the compiler for variadic functions. 2860 return false 2861 case TSTRUCT: 2862 if t.NumFields() > ssa.MaxStruct { 2863 return false 2864 } 2865 for _, t1 := range t.Fields().Slice() { 2866 if !canSSAType(t1.Type) { 2867 return false 2868 } 2869 } 2870 return true 2871 default: 2872 return true 2873 } 2874 } 2875 2876 // exprPtr evaluates n to a pointer and nil-checks it. 2877 func (s *state) exprPtr(n *Node, bounded bool, lineno int32) *ssa.Value { 2878 p := s.expr(n) 2879 if bounded || n.NonNil { 2880 if s.f.Config.Debug_checknil() && lineno > 1 { 2881 s.f.Config.Warnl(lineno, "removed nil check") 2882 } 2883 return p 2884 } 2885 s.nilCheck(p) 2886 return p 2887 } 2888 2889 // nilCheck generates nil pointer checking code. 2890 // Starts a new block on return, unless nil checks are disabled. 2891 // Used only for automatically inserted nil checks, 2892 // not for user code like 'x != nil'. 2893 func (s *state) nilCheck(ptr *ssa.Value) { 2894 if Disable_checknil != 0 { 2895 return 2896 } 2897 chk := s.newValue2(ssa.OpNilCheck, ssa.TypeVoid, ptr, s.mem()) 2898 b := s.endBlock() 2899 b.Kind = ssa.BlockCheck 2900 b.SetControl(chk) 2901 bNext := s.f.NewBlock(ssa.BlockPlain) 2902 b.AddEdgeTo(bNext) 2903 s.startBlock(bNext) 2904 } 2905 2906 // boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not. 2907 // Starts a new block on return. 2908 func (s *state) boundsCheck(idx, len *ssa.Value) { 2909 if Debug['B'] != 0 { 2910 return 2911 } 2912 // TODO: convert index to full width? 2913 // TODO: if index is 64-bit and we're compiling to 32-bit, check that high 32 bits are zero. 2914 2915 // bounds check 2916 cmp := s.newValue2(ssa.OpIsInBounds, Types[TBOOL], idx, len) 2917 s.check(cmp, Panicindex) 2918 } 2919 2920 // sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not. 2921 // Starts a new block on return. 2922 func (s *state) sliceBoundsCheck(idx, len *ssa.Value) { 2923 if Debug['B'] != 0 { 2924 return 2925 } 2926 // TODO: convert index to full width? 2927 // TODO: if index is 64-bit and we're compiling to 32-bit, check that high 32 bits are zero. 2928 2929 // bounds check 2930 cmp := s.newValue2(ssa.OpIsSliceInBounds, Types[TBOOL], idx, len) 2931 s.check(cmp, panicslice) 2932 } 2933 2934 // If cmp (a bool) is true, panic using the given function. 2935 func (s *state) check(cmp *ssa.Value, fn *Node) { 2936 b := s.endBlock() 2937 b.Kind = ssa.BlockIf 2938 b.SetControl(cmp) 2939 b.Likely = ssa.BranchLikely 2940 bNext := s.f.NewBlock(ssa.BlockPlain) 2941 line := s.peekLine() 2942 bPanic := s.panics[funcLine{fn, line}] 2943 if bPanic == nil { 2944 bPanic = s.f.NewBlock(ssa.BlockPlain) 2945 s.panics[funcLine{fn, line}] = bPanic 2946 s.startBlock(bPanic) 2947 // The panic call takes/returns memory to ensure that the right 2948 // memory state is observed if the panic happens. 2949 s.rtcall(fn, false, nil) 2950 } 2951 b.AddEdgeTo(bNext) 2952 b.AddEdgeTo(bPanic) 2953 s.startBlock(bNext) 2954 } 2955 2956 // rtcall issues a call to the given runtime function fn with the listed args. 2957 // Returns a slice of results of the given result types. 2958 // The call is added to the end of the current block. 2959 // If returns is false, the block is marked as an exit block. 2960 // If returns is true, the block is marked as a call block. A new block 2961 // is started to load the return values. 2962 func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Value) []*ssa.Value { 2963 // Write args to the stack 2964 var off int64 // TODO: arch-dependent starting offset? 2965 for _, arg := range args { 2966 t := arg.Type 2967 off = Rnd(off, t.Alignment()) 2968 ptr := s.sp 2969 if off != 0 { 2970 ptr = s.newValue1I(ssa.OpOffPtr, Types[TUINTPTR], off, s.sp) 2971 } 2972 size := t.Size() 2973 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, size, ptr, arg, s.mem()) 2974 off += size 2975 } 2976 off = Rnd(off, int64(Widthptr)) 2977 2978 // Issue call 2979 call := s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, fn.Sym, s.mem()) 2980 s.vars[&memVar] = call 2981 2982 // Finish block 2983 b := s.endBlock() 2984 if !returns { 2985 b.Kind = ssa.BlockExit 2986 b.SetControl(call) 2987 call.AuxInt = off 2988 if len(results) > 0 { 2989 Fatalf("panic call can't have results") 2990 } 2991 return nil 2992 } 2993 b.Kind = ssa.BlockCall 2994 b.SetControl(call) 2995 bNext := s.f.NewBlock(ssa.BlockPlain) 2996 b.AddEdgeTo(bNext) 2997 s.startBlock(bNext) 2998 2999 // Keep input pointer args live across calls. This is a bandaid until 1.8. 3000 for _, n := range s.ptrargs { 3001 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, s.variable(n, n.Type), s.mem()) 3002 } 3003 3004 // Load results 3005 res := make([]*ssa.Value, len(results)) 3006 for i, t := range results { 3007 off = Rnd(off, t.Alignment()) 3008 ptr := s.sp 3009 if off != 0 { 3010 ptr = s.newValue1I(ssa.OpOffPtr, Types[TUINTPTR], off, s.sp) 3011 } 3012 res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem()) 3013 off += t.Size() 3014 } 3015 off = Rnd(off, int64(Widthptr)) 3016 3017 // Remember how much callee stack space we needed. 3018 call.AuxInt = off 3019 3020 return res 3021 } 3022 3023 // insertWBmove inserts the assignment *left = *right including a write barrier. 3024 // t is the type being assigned. 3025 func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32, rightIsVolatile bool) { 3026 // if writeBarrier.enabled { 3027 // typedmemmove(&t, left, right) 3028 // } else { 3029 // *left = *right 3030 // } 3031 3032 if s.noWB { 3033 s.Fatalf("write barrier prohibited") 3034 } 3035 if s.WBLineno == 0 { 3036 s.WBLineno = left.Line 3037 } 3038 bThen := s.f.NewBlock(ssa.BlockPlain) 3039 bElse := s.f.NewBlock(ssa.BlockPlain) 3040 bEnd := s.f.NewBlock(ssa.BlockPlain) 3041 3042 aux := &ssa.ExternSymbol{Typ: Types[TBOOL], Sym: syslook("writeBarrier").Sym} 3043 flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb) 3044 // TODO: select the .enabled field. It is currently first, so not needed for now. 3045 // Load word, test byte, avoiding partial register write from load byte. 3046 flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem()) 3047 flag = s.newValue1(ssa.OpTrunc64to8, Types[TBOOL], flag) 3048 b := s.endBlock() 3049 b.Kind = ssa.BlockIf 3050 b.Likely = ssa.BranchUnlikely 3051 b.SetControl(flag) 3052 b.AddEdgeTo(bThen) 3053 b.AddEdgeTo(bElse) 3054 3055 s.startBlock(bThen) 3056 3057 if !rightIsVolatile { 3058 // Issue typedmemmove call. 3059 taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(t)}, s.sb) 3060 s.rtcall(typedmemmove, true, nil, taddr, left, right) 3061 } else { 3062 // Copy to temp location if the source is volatile (will be clobbered by 3063 // a function call). Marshaling the args to typedmemmove might clobber the 3064 // value we're trying to move. 3065 tmp := temp(t) 3066 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, tmp, s.mem()) 3067 tmpaddr, _ := s.addr(tmp, true) 3068 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), tmpaddr, right, s.mem()) 3069 // Issue typedmemmove call. 3070 taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(t)}, s.sb) 3071 s.rtcall(typedmemmove, true, nil, taddr, left, tmpaddr) 3072 // Mark temp as dead. 3073 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, tmp, s.mem()) 3074 } 3075 s.endBlock().AddEdgeTo(bEnd) 3076 3077 s.startBlock(bElse) 3078 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), left, right, s.mem()) 3079 s.endBlock().AddEdgeTo(bEnd) 3080 3081 s.startBlock(bEnd) 3082 3083 if Debug_wb > 0 { 3084 Warnl(line, "write barrier") 3085 } 3086 } 3087 3088 // insertWBstore inserts the assignment *left = right including a write barrier. 3089 // t is the type being assigned. 3090 func (s *state) insertWBstore(t *Type, left, right *ssa.Value, line int32, skip skipMask) { 3091 // store scalar fields 3092 // if writeBarrier.enabled { 3093 // writebarrierptr for pointer fields 3094 // } else { 3095 // store pointer fields 3096 // } 3097 3098 if s.noWB { 3099 s.Fatalf("write barrier prohibited") 3100 } 3101 if s.WBLineno == 0 { 3102 s.WBLineno = left.Line 3103 } 3104 s.storeTypeScalars(t, left, right, skip) 3105 3106 bThen := s.f.NewBlock(ssa.BlockPlain) 3107 bElse := s.f.NewBlock(ssa.BlockPlain) 3108 bEnd := s.f.NewBlock(ssa.BlockPlain) 3109 3110 aux := &ssa.ExternSymbol{Typ: Types[TBOOL], Sym: syslook("writeBarrier").Sym} 3111 flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb) 3112 // TODO: select the .enabled field. It is currently first, so not needed for now. 3113 // Load word, test byte, avoiding partial register write from load byte. 3114 flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem()) 3115 flag = s.newValue1(ssa.OpTrunc64to8, Types[TBOOL], flag) 3116 b := s.endBlock() 3117 b.Kind = ssa.BlockIf 3118 b.Likely = ssa.BranchUnlikely 3119 b.SetControl(flag) 3120 b.AddEdgeTo(bThen) 3121 b.AddEdgeTo(bElse) 3122 3123 // Issue write barriers for pointer writes. 3124 s.startBlock(bThen) 3125 s.storeTypePtrsWB(t, left, right) 3126 s.endBlock().AddEdgeTo(bEnd) 3127 3128 // Issue regular stores for pointer writes. 3129 s.startBlock(bElse) 3130 s.storeTypePtrs(t, left, right) 3131 s.endBlock().AddEdgeTo(bEnd) 3132 3133 s.startBlock(bEnd) 3134 3135 if Debug_wb > 0 { 3136 Warnl(line, "write barrier") 3137 } 3138 } 3139 3140 // do *left = right for all scalar (non-pointer) parts of t. 3141 func (s *state) storeTypeScalars(t *Type, left, right *ssa.Value, skip skipMask) { 3142 switch { 3143 case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex(): 3144 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), left, right, s.mem()) 3145 case t.IsPtrShaped(): 3146 // no scalar fields. 3147 case t.IsString(): 3148 if skip&skipLen != 0 { 3149 return 3150 } 3151 len := s.newValue1(ssa.OpStringLen, Types[TINT], right) 3152 lenAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), s.config.IntSize, left) 3153 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem()) 3154 case t.IsSlice(): 3155 if skip&skipLen == 0 { 3156 len := s.newValue1(ssa.OpSliceLen, Types[TINT], right) 3157 lenAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), s.config.IntSize, left) 3158 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem()) 3159 } 3160 if skip&skipCap == 0 { 3161 cap := s.newValue1(ssa.OpSliceCap, Types[TINT], right) 3162 capAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), 2*s.config.IntSize, left) 3163 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capAddr, cap, s.mem()) 3164 } 3165 case t.IsInterface(): 3166 // itab field doesn't need a write barrier (even though it is a pointer). 3167 itab := s.newValue1(ssa.OpITab, Ptrto(Types[TUINT8]), right) 3168 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, left, itab, s.mem()) 3169 case t.IsStruct(): 3170 n := t.NumFields() 3171 for i := 0; i < n; i++ { 3172 ft := t.FieldType(i) 3173 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3174 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3175 s.storeTypeScalars(ft.(*Type), addr, val, 0) 3176 } 3177 default: 3178 s.Fatalf("bad write barrier type %s", t) 3179 } 3180 } 3181 3182 // do *left = right for all pointer parts of t. 3183 func (s *state) storeTypePtrs(t *Type, left, right *ssa.Value) { 3184 switch { 3185 case t.IsPtrShaped(): 3186 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, right, s.mem()) 3187 case t.IsString(): 3188 ptr := s.newValue1(ssa.OpStringPtr, Ptrto(Types[TUINT8]), right) 3189 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem()) 3190 case t.IsSlice(): 3191 ptr := s.newValue1(ssa.OpSlicePtr, Ptrto(Types[TUINT8]), right) 3192 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem()) 3193 case t.IsInterface(): 3194 // itab field is treated as a scalar. 3195 idata := s.newValue1(ssa.OpIData, Ptrto(Types[TUINT8]), right) 3196 idataAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TUINT8]), s.config.PtrSize, left) 3197 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, idataAddr, idata, s.mem()) 3198 case t.IsStruct(): 3199 n := t.NumFields() 3200 for i := 0; i < n; i++ { 3201 ft := t.FieldType(i) 3202 if !haspointers(ft.(*Type)) { 3203 continue 3204 } 3205 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3206 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3207 s.storeTypePtrs(ft.(*Type), addr, val) 3208 } 3209 default: 3210 s.Fatalf("bad write barrier type %s", t) 3211 } 3212 } 3213 3214 // do *left = right with a write barrier for all pointer parts of t. 3215 func (s *state) storeTypePtrsWB(t *Type, left, right *ssa.Value) { 3216 switch { 3217 case t.IsPtrShaped(): 3218 s.rtcall(writebarrierptr, true, nil, left, right) 3219 case t.IsString(): 3220 ptr := s.newValue1(ssa.OpStringPtr, Ptrto(Types[TUINT8]), right) 3221 s.rtcall(writebarrierptr, true, nil, left, ptr) 3222 case t.IsSlice(): 3223 ptr := s.newValue1(ssa.OpSlicePtr, Ptrto(Types[TUINT8]), right) 3224 s.rtcall(writebarrierptr, true, nil, left, ptr) 3225 case t.IsInterface(): 3226 idata := s.newValue1(ssa.OpIData, Ptrto(Types[TUINT8]), right) 3227 idataAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TUINT8]), s.config.PtrSize, left) 3228 s.rtcall(writebarrierptr, true, nil, idataAddr, idata) 3229 case t.IsStruct(): 3230 n := t.NumFields() 3231 for i := 0; i < n; i++ { 3232 ft := t.FieldType(i) 3233 if !haspointers(ft.(*Type)) { 3234 continue 3235 } 3236 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3237 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3238 s.storeTypePtrsWB(ft.(*Type), addr, val) 3239 } 3240 default: 3241 s.Fatalf("bad write barrier type %s", t) 3242 } 3243 } 3244 3245 // slice computes the slice v[i:j:k] and returns ptr, len, and cap of result. 3246 // i,j,k may be nil, in which case they are set to their default value. 3247 // t is a slice, ptr to array, or string type. 3248 func (s *state) slice(t *Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) { 3249 var elemtype *Type 3250 var ptrtype *Type 3251 var ptr *ssa.Value 3252 var len *ssa.Value 3253 var cap *ssa.Value 3254 zero := s.constInt(Types[TINT], 0) 3255 switch { 3256 case t.IsSlice(): 3257 elemtype = t.Elem() 3258 ptrtype = Ptrto(elemtype) 3259 ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v) 3260 len = s.newValue1(ssa.OpSliceLen, Types[TINT], v) 3261 cap = s.newValue1(ssa.OpSliceCap, Types[TINT], v) 3262 case t.IsString(): 3263 elemtype = Types[TUINT8] 3264 ptrtype = Ptrto(elemtype) 3265 ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v) 3266 len = s.newValue1(ssa.OpStringLen, Types[TINT], v) 3267 cap = len 3268 case t.IsPtr(): 3269 if !t.Elem().IsArray() { 3270 s.Fatalf("bad ptr to array in slice %v\n", t) 3271 } 3272 elemtype = t.Elem().Elem() 3273 ptrtype = Ptrto(elemtype) 3274 s.nilCheck(v) 3275 ptr = v 3276 len = s.constInt(Types[TINT], t.Elem().NumElem()) 3277 cap = len 3278 default: 3279 s.Fatalf("bad type in slice %v\n", t) 3280 } 3281 3282 // Set default values 3283 if i == nil { 3284 i = zero 3285 } 3286 if j == nil { 3287 j = len 3288 } 3289 if k == nil { 3290 k = cap 3291 } 3292 3293 // Panic if slice indices are not in bounds. 3294 s.sliceBoundsCheck(i, j) 3295 if j != k { 3296 s.sliceBoundsCheck(j, k) 3297 } 3298 if k != cap { 3299 s.sliceBoundsCheck(k, cap) 3300 } 3301 3302 // Generate the following code assuming that indexes are in bounds. 3303 // The conditional is to make sure that we don't generate a slice 3304 // that points to the next object in memory. 3305 // rlen = j-i 3306 // rcap = k-i 3307 // delta = i*elemsize 3308 // if rcap == 0 { 3309 // delta = 0 3310 // } 3311 // rptr = p+delta 3312 // result = (SliceMake rptr rlen rcap) 3313 subOp := s.ssaOp(OSUB, Types[TINT]) 3314 eqOp := s.ssaOp(OEQ, Types[TINT]) 3315 mulOp := s.ssaOp(OMUL, Types[TINT]) 3316 rlen := s.newValue2(subOp, Types[TINT], j, i) 3317 var rcap *ssa.Value 3318 switch { 3319 case t.IsString(): 3320 // Capacity of the result is unimportant. However, we use 3321 // rcap to test if we've generated a zero-length slice. 3322 // Use length of strings for that. 3323 rcap = rlen 3324 case j == k: 3325 rcap = rlen 3326 default: 3327 rcap = s.newValue2(subOp, Types[TINT], k, i) 3328 } 3329 3330 // delta = # of elements to offset pointer by. 3331 s.vars[&deltaVar] = i 3332 3333 // Generate code to set delta=0 if the resulting capacity is zero. 3334 if !((i.Op == ssa.OpConst64 && i.AuxInt == 0) || 3335 (i.Op == ssa.OpConst32 && int32(i.AuxInt) == 0)) { 3336 cmp := s.newValue2(eqOp, Types[TBOOL], rcap, zero) 3337 3338 b := s.endBlock() 3339 b.Kind = ssa.BlockIf 3340 b.Likely = ssa.BranchUnlikely 3341 b.SetControl(cmp) 3342 3343 // Generate block which zeros the delta variable. 3344 nz := s.f.NewBlock(ssa.BlockPlain) 3345 b.AddEdgeTo(nz) 3346 s.startBlock(nz) 3347 s.vars[&deltaVar] = zero 3348 s.endBlock() 3349 3350 // All done. 3351 merge := s.f.NewBlock(ssa.BlockPlain) 3352 b.AddEdgeTo(merge) 3353 nz.AddEdgeTo(merge) 3354 s.startBlock(merge) 3355 3356 // TODO: use conditional moves somehow? 3357 } 3358 3359 // Compute rptr = ptr + delta * elemsize 3360 rptr := s.newValue2(ssa.OpAddPtr, ptrtype, ptr, s.newValue2(mulOp, Types[TINT], s.variable(&deltaVar, Types[TINT]), s.constInt(Types[TINT], elemtype.Width))) 3361 delete(s.vars, &deltaVar) 3362 return rptr, rlen, rcap 3363 } 3364 3365 type u2fcvtTab struct { 3366 geq, cvt2F, and, rsh, or, add ssa.Op 3367 one func(*state, ssa.Type, int64) *ssa.Value 3368 } 3369 3370 var u64_f64 u2fcvtTab = u2fcvtTab{ 3371 geq: ssa.OpGeq64, 3372 cvt2F: ssa.OpCvt64to64F, 3373 and: ssa.OpAnd64, 3374 rsh: ssa.OpRsh64Ux64, 3375 or: ssa.OpOr64, 3376 add: ssa.OpAdd64F, 3377 one: (*state).constInt64, 3378 } 3379 3380 var u64_f32 u2fcvtTab = u2fcvtTab{ 3381 geq: ssa.OpGeq64, 3382 cvt2F: ssa.OpCvt64to32F, 3383 and: ssa.OpAnd64, 3384 rsh: ssa.OpRsh64Ux64, 3385 or: ssa.OpOr64, 3386 add: ssa.OpAdd32F, 3387 one: (*state).constInt64, 3388 } 3389 3390 // Excess generality on a machine with 64-bit integer registers. 3391 // Not used on AMD64. 3392 var u32_f32 u2fcvtTab = u2fcvtTab{ 3393 geq: ssa.OpGeq32, 3394 cvt2F: ssa.OpCvt32to32F, 3395 and: ssa.OpAnd32, 3396 rsh: ssa.OpRsh32Ux32, 3397 or: ssa.OpOr32, 3398 add: ssa.OpAdd32F, 3399 one: func(s *state, t ssa.Type, x int64) *ssa.Value { 3400 return s.constInt32(t, int32(x)) 3401 }, 3402 } 3403 3404 func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3405 return s.uintTofloat(&u64_f64, n, x, ft, tt) 3406 } 3407 3408 func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3409 return s.uintTofloat(&u64_f32, n, x, ft, tt) 3410 } 3411 3412 func (s *state) uintTofloat(cvttab *u2fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3413 // if x >= 0 { 3414 // result = (floatY) x 3415 // } else { 3416 // y = uintX(x) ; y = x & 1 3417 // z = uintX(x) ; z = z >> 1 3418 // z = z >> 1 3419 // z = z | y 3420 // result = floatY(z) 3421 // result = result + result 3422 // } 3423 // 3424 // Code borrowed from old code generator. 3425 // What's going on: large 64-bit "unsigned" looks like 3426 // negative number to hardware's integer-to-float 3427 // conversion. However, because the mantissa is only 3428 // 63 bits, we don't need the LSB, so instead we do an 3429 // unsigned right shift (divide by two), convert, and 3430 // double. However, before we do that, we need to be 3431 // sure that we do not lose a "1" if that made the 3432 // difference in the resulting rounding. Therefore, we 3433 // preserve it, and OR (not ADD) it back in. The case 3434 // that matters is when the eleven discarded bits are 3435 // equal to 10000000001; that rounds up, and the 1 cannot 3436 // be lost else it would round down if the LSB of the 3437 // candidate mantissa is 0. 3438 cmp := s.newValue2(cvttab.geq, Types[TBOOL], x, s.zeroVal(ft)) 3439 b := s.endBlock() 3440 b.Kind = ssa.BlockIf 3441 b.SetControl(cmp) 3442 b.Likely = ssa.BranchLikely 3443 3444 bThen := s.f.NewBlock(ssa.BlockPlain) 3445 bElse := s.f.NewBlock(ssa.BlockPlain) 3446 bAfter := s.f.NewBlock(ssa.BlockPlain) 3447 3448 b.AddEdgeTo(bThen) 3449 s.startBlock(bThen) 3450 a0 := s.newValue1(cvttab.cvt2F, tt, x) 3451 s.vars[n] = a0 3452 s.endBlock() 3453 bThen.AddEdgeTo(bAfter) 3454 3455 b.AddEdgeTo(bElse) 3456 s.startBlock(bElse) 3457 one := cvttab.one(s, ft, 1) 3458 y := s.newValue2(cvttab.and, ft, x, one) 3459 z := s.newValue2(cvttab.rsh, ft, x, one) 3460 z = s.newValue2(cvttab.or, ft, z, y) 3461 a := s.newValue1(cvttab.cvt2F, tt, z) 3462 a1 := s.newValue2(cvttab.add, tt, a, a) 3463 s.vars[n] = a1 3464 s.endBlock() 3465 bElse.AddEdgeTo(bAfter) 3466 3467 s.startBlock(bAfter) 3468 return s.variable(n, n.Type) 3469 } 3470 3471 // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels. 3472 func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value { 3473 if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() { 3474 s.Fatalf("node must be a map or a channel") 3475 } 3476 // if n == nil { 3477 // return 0 3478 // } else { 3479 // // len 3480 // return *((*int)n) 3481 // // cap 3482 // return *(((*int)n)+1) 3483 // } 3484 lenType := n.Type 3485 nilValue := s.constNil(Types[TUINTPTR]) 3486 cmp := s.newValue2(ssa.OpEqPtr, Types[TBOOL], x, nilValue) 3487 b := s.endBlock() 3488 b.Kind = ssa.BlockIf 3489 b.SetControl(cmp) 3490 b.Likely = ssa.BranchUnlikely 3491 3492 bThen := s.f.NewBlock(ssa.BlockPlain) 3493 bElse := s.f.NewBlock(ssa.BlockPlain) 3494 bAfter := s.f.NewBlock(ssa.BlockPlain) 3495 3496 // length/capacity of a nil map/chan is zero 3497 b.AddEdgeTo(bThen) 3498 s.startBlock(bThen) 3499 s.vars[n] = s.zeroVal(lenType) 3500 s.endBlock() 3501 bThen.AddEdgeTo(bAfter) 3502 3503 b.AddEdgeTo(bElse) 3504 s.startBlock(bElse) 3505 if n.Op == OLEN { 3506 // length is stored in the first word for map/chan 3507 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem()) 3508 } else if n.Op == OCAP { 3509 // capacity is stored in the second word for chan 3510 sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x) 3511 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem()) 3512 } else { 3513 s.Fatalf("op must be OLEN or OCAP") 3514 } 3515 s.endBlock() 3516 bElse.AddEdgeTo(bAfter) 3517 3518 s.startBlock(bAfter) 3519 return s.variable(n, lenType) 3520 } 3521 3522 type f2uCvtTab struct { 3523 ltf, cvt2U, subf ssa.Op 3524 value func(*state, ssa.Type, float64) *ssa.Value 3525 } 3526 3527 var f32_u64 f2uCvtTab = f2uCvtTab{ 3528 ltf: ssa.OpLess32F, 3529 cvt2U: ssa.OpCvt32Fto64, 3530 subf: ssa.OpSub32F, 3531 value: (*state).constFloat32, 3532 } 3533 3534 var f64_u64 f2uCvtTab = f2uCvtTab{ 3535 ltf: ssa.OpLess64F, 3536 cvt2U: ssa.OpCvt64Fto64, 3537 subf: ssa.OpSub64F, 3538 value: (*state).constFloat64, 3539 } 3540 3541 func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3542 return s.floatToUint(&f32_u64, n, x, ft, tt) 3543 } 3544 func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3545 return s.floatToUint(&f64_u64, n, x, ft, tt) 3546 } 3547 3548 func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3549 // if x < 9223372036854775808.0 { 3550 // result = uintY(x) 3551 // } else { 3552 // y = x - 9223372036854775808.0 3553 // z = uintY(y) 3554 // result = z | -9223372036854775808 3555 // } 3556 twoToThe63 := cvttab.value(s, ft, 9223372036854775808.0) 3557 cmp := s.newValue2(cvttab.ltf, Types[TBOOL], x, twoToThe63) 3558 b := s.endBlock() 3559 b.Kind = ssa.BlockIf 3560 b.SetControl(cmp) 3561 b.Likely = ssa.BranchLikely 3562 3563 bThen := s.f.NewBlock(ssa.BlockPlain) 3564 bElse := s.f.NewBlock(ssa.BlockPlain) 3565 bAfter := s.f.NewBlock(ssa.BlockPlain) 3566 3567 b.AddEdgeTo(bThen) 3568 s.startBlock(bThen) 3569 a0 := s.newValue1(cvttab.cvt2U, tt, x) 3570 s.vars[n] = a0 3571 s.endBlock() 3572 bThen.AddEdgeTo(bAfter) 3573 3574 b.AddEdgeTo(bElse) 3575 s.startBlock(bElse) 3576 y := s.newValue2(cvttab.subf, ft, x, twoToThe63) 3577 y = s.newValue1(cvttab.cvt2U, tt, y) 3578 z := s.constInt64(tt, -9223372036854775808) 3579 a1 := s.newValue2(ssa.OpOr64, tt, y, z) 3580 s.vars[n] = a1 3581 s.endBlock() 3582 bElse.AddEdgeTo(bAfter) 3583 3584 s.startBlock(bAfter) 3585 return s.variable(n, n.Type) 3586 } 3587 3588 // ifaceType returns the value for the word containing the type. 3589 // n is the node for the interface expression. 3590 // v is the corresponding value. 3591 func (s *state) ifaceType(n *Node, v *ssa.Value) *ssa.Value { 3592 byteptr := Ptrto(Types[TUINT8]) // type used in runtime prototypes for runtime type (*byte) 3593 3594 if n.Type.IsEmptyInterface() { 3595 // Have *eface. The type is the first word in the struct. 3596 return s.newValue1(ssa.OpITab, byteptr, v) 3597 } 3598 3599 // Have *iface. 3600 // The first word in the struct is the *itab. 3601 // If the *itab is nil, return 0. 3602 // Otherwise, the second word in the *itab is the type. 3603 3604 tab := s.newValue1(ssa.OpITab, byteptr, v) 3605 s.vars[&typVar] = tab 3606 isnonnil := s.newValue2(ssa.OpNeqPtr, Types[TBOOL], tab, s.constNil(byteptr)) 3607 b := s.endBlock() 3608 b.Kind = ssa.BlockIf 3609 b.SetControl(isnonnil) 3610 b.Likely = ssa.BranchLikely 3611 3612 bLoad := s.f.NewBlock(ssa.BlockPlain) 3613 bEnd := s.f.NewBlock(ssa.BlockPlain) 3614 3615 b.AddEdgeTo(bLoad) 3616 b.AddEdgeTo(bEnd) 3617 bLoad.AddEdgeTo(bEnd) 3618 3619 s.startBlock(bLoad) 3620 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), tab) 3621 s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem()) 3622 s.endBlock() 3623 3624 s.startBlock(bEnd) 3625 typ := s.variable(&typVar, byteptr) 3626 delete(s.vars, &typVar) 3627 return typ 3628 } 3629 3630 // dottype generates SSA for a type assertion node. 3631 // commaok indicates whether to panic or return a bool. 3632 // If commaok is false, resok will be nil. 3633 func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { 3634 iface := s.expr(n.Left) 3635 typ := s.ifaceType(n.Left, iface) // actual concrete type 3636 target := s.expr(typename(n.Type)) // target type 3637 if !isdirectiface(n.Type) { 3638 // walk rewrites ODOTTYPE/OAS2DOTTYPE into runtime calls except for this case. 3639 Fatalf("dottype needs a direct iface type %s", n.Type) 3640 } 3641 3642 if Debug_typeassert > 0 { 3643 Warnl(n.Lineno, "type assertion inlined") 3644 } 3645 3646 // TODO: If we have a nonempty interface and its itab field is nil, 3647 // then this test is redundant and ifaceType should just branch directly to bFail. 3648 cond := s.newValue2(ssa.OpEqPtr, Types[TBOOL], typ, target) 3649 b := s.endBlock() 3650 b.Kind = ssa.BlockIf 3651 b.SetControl(cond) 3652 b.Likely = ssa.BranchLikely 3653 3654 byteptr := Ptrto(Types[TUINT8]) 3655 3656 bOk := s.f.NewBlock(ssa.BlockPlain) 3657 bFail := s.f.NewBlock(ssa.BlockPlain) 3658 b.AddEdgeTo(bOk) 3659 b.AddEdgeTo(bFail) 3660 3661 if !commaok { 3662 // on failure, panic by calling panicdottype 3663 s.startBlock(bFail) 3664 taddr := s.newValue1A(ssa.OpAddr, byteptr, &ssa.ExternSymbol{Typ: byteptr, Sym: typenamesym(n.Left.Type)}, s.sb) 3665 s.rtcall(panicdottype, false, nil, typ, target, taddr) 3666 3667 // on success, return idata field 3668 s.startBlock(bOk) 3669 return s.newValue1(ssa.OpIData, n.Type, iface), nil 3670 } 3671 3672 // commaok is the more complicated case because we have 3673 // a control flow merge point. 3674 bEnd := s.f.NewBlock(ssa.BlockPlain) 3675 3676 // type assertion succeeded 3677 s.startBlock(bOk) 3678 s.vars[&idataVar] = s.newValue1(ssa.OpIData, n.Type, iface) 3679 s.vars[&okVar] = s.constBool(true) 3680 s.endBlock() 3681 bOk.AddEdgeTo(bEnd) 3682 3683 // type assertion failed 3684 s.startBlock(bFail) 3685 s.vars[&idataVar] = s.constNil(byteptr) 3686 s.vars[&okVar] = s.constBool(false) 3687 s.endBlock() 3688 bFail.AddEdgeTo(bEnd) 3689 3690 // merge point 3691 s.startBlock(bEnd) 3692 res = s.variable(&idataVar, byteptr) 3693 resok = s.variable(&okVar, Types[TBOOL]) 3694 delete(s.vars, &idataVar) 3695 delete(s.vars, &okVar) 3696 return res, resok 3697 } 3698 3699 // checkgoto checks that a goto from from to to does not 3700 // jump into a block or jump over variable declarations. 3701 // It is a copy of checkgoto in the pre-SSA backend, 3702 // modified only for line number handling. 3703 // TODO: document how this works and why it is designed the way it is. 3704 func (s *state) checkgoto(from *Node, to *Node) { 3705 if from.Sym == to.Sym { 3706 return 3707 } 3708 3709 nf := 0 3710 for fs := from.Sym; fs != nil; fs = fs.Link { 3711 nf++ 3712 } 3713 nt := 0 3714 for fs := to.Sym; fs != nil; fs = fs.Link { 3715 nt++ 3716 } 3717 fs := from.Sym 3718 for ; nf > nt; nf-- { 3719 fs = fs.Link 3720 } 3721 if fs != to.Sym { 3722 // decide what to complain about. 3723 // prefer to complain about 'into block' over declarations, 3724 // so scan backward to find most recent block or else dcl. 3725 var block *Sym 3726 3727 var dcl *Sym 3728 ts := to.Sym 3729 for ; nt > nf; nt-- { 3730 if ts.Pkg == nil { 3731 block = ts 3732 } else { 3733 dcl = ts 3734 } 3735 ts = ts.Link 3736 } 3737 3738 for ts != fs { 3739 if ts.Pkg == nil { 3740 block = ts 3741 } else { 3742 dcl = ts 3743 } 3744 ts = ts.Link 3745 fs = fs.Link 3746 } 3747 3748 lno := from.Left.Lineno 3749 if block != nil { 3750 yyerrorl(lno, "goto %v jumps into block starting at %v", from.Left.Sym, linestr(block.Lastlineno)) 3751 } else { 3752 yyerrorl(lno, "goto %v jumps over declaration of %v at %v", from.Left.Sym, dcl, linestr(dcl.Lastlineno)) 3753 } 3754 } 3755 } 3756 3757 // variable returns the value of a variable at the current location. 3758 func (s *state) variable(name *Node, t ssa.Type) *ssa.Value { 3759 v := s.vars[name] 3760 if v == nil { 3761 v = s.newValue0A(ssa.OpFwdRef, t, name) 3762 s.fwdRefs = append(s.fwdRefs, v) 3763 s.vars[name] = v 3764 s.addNamedValue(name, v) 3765 } 3766 return v 3767 } 3768 3769 func (s *state) mem() *ssa.Value { 3770 return s.variable(&memVar, ssa.TypeMem) 3771 } 3772 3773 func (s *state) linkForwardReferences(dm *sparseDefState) { 3774 3775 // Build SSA graph. Each variable on its first use in a basic block 3776 // leaves a FwdRef in that block representing the incoming value 3777 // of that variable. This function links that ref up with possible definitions, 3778 // inserting Phi values as needed. This is essentially the algorithm 3779 // described by Braun, Buchwald, Hack, Leißa, Mallon, and Zwinkau: 3780 // http://pp.info.uni-karlsruhe.de/uploads/publikationen/braun13cc.pdf 3781 // Differences: 3782 // - We use FwdRef nodes to postpone phi building until the CFG is 3783 // completely built. That way we can avoid the notion of "sealed" 3784 // blocks. 3785 // - Phi optimization is a separate pass (in ../ssa/phielim.go). 3786 for len(s.fwdRefs) > 0 { 3787 v := s.fwdRefs[len(s.fwdRefs)-1] 3788 s.fwdRefs = s.fwdRefs[:len(s.fwdRefs)-1] 3789 s.resolveFwdRef(v, dm) 3790 } 3791 } 3792 3793 // resolveFwdRef modifies v to be the variable's value at the start of its block. 3794 // v must be a FwdRef op. 3795 func (s *state) resolveFwdRef(v *ssa.Value, dm *sparseDefState) { 3796 b := v.Block 3797 name := v.Aux.(*Node) 3798 v.Aux = nil 3799 if b == s.f.Entry { 3800 // Live variable at start of function. 3801 if s.canSSA(name) { 3802 if strings.HasPrefix(name.Sym.Name, "autotmp_") { 3803 // It's likely that this is an uninitialized variable in the entry block. 3804 s.Fatalf("Treating auto as if it were arg, func %s, node %v, value %v", b.Func.Name, name, v) 3805 } 3806 v.Op = ssa.OpArg 3807 v.Aux = name 3808 return 3809 } 3810 // Not SSAable. Load it. 3811 addr := s.decladdrs[name] 3812 if addr == nil { 3813 // TODO: closure args reach here. 3814 s.Unimplementedf("unhandled closure arg %s at entry to function %s", name, b.Func.Name) 3815 } 3816 if _, ok := addr.Aux.(*ssa.ArgSymbol); !ok { 3817 s.Fatalf("variable live at start of function %s is not an argument %s", b.Func.Name, name) 3818 } 3819 v.Op = ssa.OpLoad 3820 v.AddArgs(addr, s.startmem) 3821 return 3822 } 3823 if len(b.Preds) == 0 { 3824 // This block is dead; we have no predecessors and we're not the entry block. 3825 // It doesn't matter what we use here as long as it is well-formed. 3826 v.Op = ssa.OpUnknown 3827 return 3828 } 3829 // Find variable value on each predecessor. 3830 var argstore [4]*ssa.Value 3831 args := argstore[:0] 3832 for _, e := range b.Preds { 3833 p := e.Block() 3834 p = dm.FindBetterDefiningBlock(name, p) // try sparse improvement on p 3835 args = append(args, s.lookupVarOutgoing(p, v.Type, name, v.Line)) 3836 } 3837 3838 // Decide if we need a phi or not. We need a phi if there 3839 // are two different args (which are both not v). 3840 var w *ssa.Value 3841 for _, a := range args { 3842 if a == v { 3843 continue // self-reference 3844 } 3845 if a == w { 3846 continue // already have this witness 3847 } 3848 if w != nil { 3849 // two witnesses, need a phi value 3850 v.Op = ssa.OpPhi 3851 v.AddArgs(args...) 3852 return 3853 } 3854 w = a // save witness 3855 } 3856 if w == nil { 3857 s.Fatalf("no witness for reachable phi %s", v) 3858 } 3859 // One witness. Make v a copy of w. 3860 v.Op = ssa.OpCopy 3861 v.AddArg(w) 3862 } 3863 3864 // lookupVarOutgoing finds the variable's value at the end of block b. 3865 func (s *state) lookupVarOutgoing(b *ssa.Block, t ssa.Type, name *Node, line int32) *ssa.Value { 3866 for { 3867 if v, ok := s.defvars[b.ID][name]; ok { 3868 return v 3869 } 3870 // The variable is not defined by b and we haven't looked it up yet. 3871 // If b has exactly one predecessor, loop to look it up there. 3872 // Otherwise, give up and insert a new FwdRef and resolve it later. 3873 if len(b.Preds) != 1 { 3874 break 3875 } 3876 b = b.Preds[0].Block() 3877 } 3878 // Generate a FwdRef for the variable and return that. 3879 v := b.NewValue0A(line, ssa.OpFwdRef, t, name) 3880 s.fwdRefs = append(s.fwdRefs, v) 3881 s.defvars[b.ID][name] = v 3882 s.addNamedValue(name, v) 3883 return v 3884 } 3885 3886 func (s *state) addNamedValue(n *Node, v *ssa.Value) { 3887 if n.Class == Pxxx { 3888 // Don't track our dummy nodes (&memVar etc.). 3889 return 3890 } 3891 if strings.HasPrefix(n.Sym.Name, "autotmp_") { 3892 // Don't track autotmp_ variables. 3893 return 3894 } 3895 if n.Class == PPARAMOUT { 3896 // Don't track named output values. This prevents return values 3897 // from being assigned too early. See #14591 and #14762. TODO: allow this. 3898 return 3899 } 3900 if n.Class == PAUTO && n.Xoffset != 0 { 3901 s.Fatalf("AUTO var with offset %s %d", n, n.Xoffset) 3902 } 3903 loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0} 3904 values, ok := s.f.NamedValues[loc] 3905 if !ok { 3906 s.f.Names = append(s.f.Names, loc) 3907 } 3908 s.f.NamedValues[loc] = append(values, v) 3909 } 3910 3911 // Branch is an unresolved branch. 3912 type Branch struct { 3913 P *obj.Prog // branch instruction 3914 B *ssa.Block // target 3915 } 3916 3917 // SSAGenState contains state needed during Prog generation. 3918 type SSAGenState struct { 3919 // Branches remembers all the branch instructions we've seen 3920 // and where they would like to go. 3921 Branches []Branch 3922 3923 // bstart remembers where each block starts (indexed by block ID) 3924 bstart []*obj.Prog 3925 } 3926 3927 // Pc returns the current Prog. 3928 func (s *SSAGenState) Pc() *obj.Prog { 3929 return Pc 3930 } 3931 3932 // SetLineno sets the current source line number. 3933 func (s *SSAGenState) SetLineno(l int32) { 3934 lineno = l 3935 } 3936 3937 // genssa appends entries to ptxt for each instruction in f. 3938 // gcargs and gclocals are filled in with pointer maps for the frame. 3939 func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) { 3940 var s SSAGenState 3941 3942 e := f.Config.Frontend().(*ssaExport) 3943 // We're about to emit a bunch of Progs. 3944 // Since the only way to get here is to explicitly request it, 3945 // just fail on unimplemented instead of trying to unwind our mess. 3946 e.mustImplement = true 3947 3948 // Remember where each block starts. 3949 s.bstart = make([]*obj.Prog, f.NumBlocks()) 3950 3951 var valueProgs map[*obj.Prog]*ssa.Value 3952 var blockProgs map[*obj.Prog]*ssa.Block 3953 var logProgs = e.log 3954 if logProgs { 3955 valueProgs = make(map[*obj.Prog]*ssa.Value, f.NumValues()) 3956 blockProgs = make(map[*obj.Prog]*ssa.Block, f.NumBlocks()) 3957 f.Logf("genssa %s\n", f.Name) 3958 blockProgs[Pc] = f.Blocks[0] 3959 } 3960 3961 // Emit basic blocks 3962 for i, b := range f.Blocks { 3963 s.bstart[b.ID] = Pc 3964 // Emit values in block 3965 Thearch.SSAMarkMoves(&s, b) 3966 for _, v := range b.Values { 3967 x := Pc 3968 Thearch.SSAGenValue(&s, v) 3969 if logProgs { 3970 for ; x != Pc; x = x.Link { 3971 valueProgs[x] = v 3972 } 3973 } 3974 } 3975 // Emit control flow instructions for block 3976 var next *ssa.Block 3977 if i < len(f.Blocks)-1 && (Debug['N'] == 0 || b.Kind == ssa.BlockCall) { 3978 // If -N, leave next==nil so every block with successors 3979 // ends in a JMP (except call blocks - plive doesn't like 3980 // select{send,recv} followed by a JMP call). Helps keep 3981 // line numbers for otherwise empty blocks. 3982 next = f.Blocks[i+1] 3983 } 3984 x := Pc 3985 Thearch.SSAGenBlock(&s, b, next) 3986 if logProgs { 3987 for ; x != Pc; x = x.Link { 3988 blockProgs[x] = b 3989 } 3990 } 3991 } 3992 3993 // Resolve branches 3994 for _, br := range s.Branches { 3995 br.P.To.Val = s.bstart[br.B.ID] 3996 } 3997 3998 if logProgs { 3999 for p := ptxt; p != nil; p = p.Link { 4000 var s string 4001 if v, ok := valueProgs[p]; ok { 4002 s = v.String() 4003 } else if b, ok := blockProgs[p]; ok { 4004 s = b.String() 4005 } else { 4006 s = " " // most value and branch strings are 2-3 characters long 4007 } 4008 f.Logf("%s\t%s\n", s, p) 4009 } 4010 if f.Config.HTML != nil { 4011 saved := ptxt.Ctxt.LineHist.PrintFilenameOnly 4012 ptxt.Ctxt.LineHist.PrintFilenameOnly = true 4013 var buf bytes.Buffer 4014 buf.WriteString("<code>") 4015 buf.WriteString("<dl class=\"ssa-gen\">") 4016 for p := ptxt; p != nil; p = p.Link { 4017 buf.WriteString("<dt class=\"ssa-prog-src\">") 4018 if v, ok := valueProgs[p]; ok { 4019 buf.WriteString(v.HTML()) 4020 } else if b, ok := blockProgs[p]; ok { 4021 buf.WriteString(b.HTML()) 4022 } 4023 buf.WriteString("</dt>") 4024 buf.WriteString("<dd class=\"ssa-prog\">") 4025 buf.WriteString(html.EscapeString(p.String())) 4026 buf.WriteString("</dd>") 4027 buf.WriteString("</li>") 4028 } 4029 buf.WriteString("</dl>") 4030 buf.WriteString("</code>") 4031 f.Config.HTML.WriteColumn("genssa", buf.String()) 4032 ptxt.Ctxt.LineHist.PrintFilenameOnly = saved 4033 } 4034 } 4035 4036 // Emit static data 4037 if f.StaticData != nil { 4038 for _, n := range f.StaticData.([]*Node) { 4039 if !gen_as_init(n, false) { 4040 Fatalf("non-static data marked as static: %v\n\n", n) 4041 } 4042 } 4043 } 4044 4045 // Allocate stack frame 4046 allocauto(ptxt) 4047 4048 // Generate gc bitmaps. 4049 liveness(Curfn, ptxt, gcargs, gclocals) 4050 4051 // Add frame prologue. Zero ambiguously live variables. 4052 Thearch.Defframe(ptxt) 4053 if Debug['f'] != 0 { 4054 frame(0) 4055 } 4056 4057 // Remove leftover instrumentation from the instruction stream. 4058 removevardef(ptxt) 4059 4060 f.Config.HTML.Close() 4061 } 4062 4063 // movZero generates a register indirect move with a 0 immediate and keeps track of bytes left and next offset 4064 func movZero(as obj.As, width int64, nbytes int64, offset int64, regnum int16) (nleft int64, noff int64) { 4065 p := Prog(as) 4066 // TODO: use zero register on archs that support it. 4067 p.From.Type = obj.TYPE_CONST 4068 p.From.Offset = 0 4069 p.To.Type = obj.TYPE_MEM 4070 p.To.Reg = regnum 4071 p.To.Offset = offset 4072 offset += width 4073 nleft = nbytes - width 4074 return nleft, offset 4075 } 4076 4077 type FloatingEQNEJump struct { 4078 Jump obj.As 4079 Index int 4080 } 4081 4082 func oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPrediction, branches []Branch) []Branch { 4083 p := Prog(jumps.Jump) 4084 p.To.Type = obj.TYPE_BRANCH 4085 to := jumps.Index 4086 branches = append(branches, Branch{p, b.Succs[to].Block()}) 4087 if to == 1 { 4088 likely = -likely 4089 } 4090 // liblink reorders the instruction stream as it sees fit. 4091 // Pass along what we know so liblink can make use of it. 4092 // TODO: Once we've fully switched to SSA, 4093 // make liblink leave our output alone. 4094 switch likely { 4095 case ssa.BranchUnlikely: 4096 p.From.Type = obj.TYPE_CONST 4097 p.From.Offset = 0 4098 case ssa.BranchLikely: 4099 p.From.Type = obj.TYPE_CONST 4100 p.From.Offset = 1 4101 } 4102 return branches 4103 } 4104 4105 func SSAGenFPJump(s *SSAGenState, b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) { 4106 likely := b.Likely 4107 switch next { 4108 case b.Succs[0].Block(): 4109 s.Branches = oneFPJump(b, &jumps[0][0], likely, s.Branches) 4110 s.Branches = oneFPJump(b, &jumps[0][1], likely, s.Branches) 4111 case b.Succs[1].Block(): 4112 s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches) 4113 s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches) 4114 default: 4115 s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches) 4116 s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches) 4117 q := Prog(obj.AJMP) 4118 q.To.Type = obj.TYPE_BRANCH 4119 s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()}) 4120 } 4121 } 4122 4123 // AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a. 4124 func AddAux(a *obj.Addr, v *ssa.Value) { 4125 AddAux2(a, v, v.AuxInt) 4126 } 4127 func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { 4128 if a.Type != obj.TYPE_MEM { 4129 v.Fatalf("bad AddAux addr %v", a) 4130 } 4131 // add integer offset 4132 a.Offset += offset 4133 4134 // If no additional symbol offset, we're done. 4135 if v.Aux == nil { 4136 return 4137 } 4138 // Add symbol's offset from its base register. 4139 switch sym := v.Aux.(type) { 4140 case *ssa.ExternSymbol: 4141 a.Name = obj.NAME_EXTERN 4142 switch s := sym.Sym.(type) { 4143 case *Sym: 4144 a.Sym = Linksym(s) 4145 case *obj.LSym: 4146 a.Sym = s 4147 default: 4148 v.Fatalf("ExternSymbol.Sym is %T", s) 4149 } 4150 case *ssa.ArgSymbol: 4151 n := sym.Node.(*Node) 4152 a.Name = obj.NAME_PARAM 4153 a.Node = n 4154 a.Sym = Linksym(n.Orig.Sym) 4155 a.Offset += n.Xoffset // TODO: why do I have to add this here? I don't for auto variables. 4156 case *ssa.AutoSymbol: 4157 n := sym.Node.(*Node) 4158 a.Name = obj.NAME_AUTO 4159 a.Node = n 4160 a.Sym = Linksym(n.Sym) 4161 default: 4162 v.Fatalf("aux in %s not implemented %#v", v, v.Aux) 4163 } 4164 } 4165 4166 // extendIndex extends v to a full int width. 4167 func (s *state) extendIndex(v *ssa.Value) *ssa.Value { 4168 size := v.Type.Size() 4169 if size == s.config.IntSize { 4170 return v 4171 } 4172 if size > s.config.IntSize { 4173 // TODO: truncate 64-bit indexes on 32-bit pointer archs. We'd need to test 4174 // the high word and branch to out-of-bounds failure if it is not 0. 4175 s.Unimplementedf("64->32 index truncation not implemented") 4176 return v 4177 } 4178 4179 // Extend value to the required size 4180 var op ssa.Op 4181 if v.Type.IsSigned() { 4182 switch 10*size + s.config.IntSize { 4183 case 14: 4184 op = ssa.OpSignExt8to32 4185 case 18: 4186 op = ssa.OpSignExt8to64 4187 case 24: 4188 op = ssa.OpSignExt16to32 4189 case 28: 4190 op = ssa.OpSignExt16to64 4191 case 48: 4192 op = ssa.OpSignExt32to64 4193 default: 4194 s.Fatalf("bad signed index extension %s", v.Type) 4195 } 4196 } else { 4197 switch 10*size + s.config.IntSize { 4198 case 14: 4199 op = ssa.OpZeroExt8to32 4200 case 18: 4201 op = ssa.OpZeroExt8to64 4202 case 24: 4203 op = ssa.OpZeroExt16to32 4204 case 28: 4205 op = ssa.OpZeroExt16to64 4206 case 48: 4207 op = ssa.OpZeroExt32to64 4208 default: 4209 s.Fatalf("bad unsigned index extension %s", v.Type) 4210 } 4211 } 4212 return s.newValue1(op, Types[TINT], v) 4213 } 4214 4215 // SSARegNum returns the register (in cmd/internal/obj numbering) to 4216 // which v has been allocated. Panics if v is not assigned to a 4217 // register. 4218 // TODO: Make this panic again once it stops happening routinely. 4219 func SSARegNum(v *ssa.Value) int16 { 4220 reg := v.Block.Func.RegAlloc[v.ID] 4221 if reg == nil { 4222 v.Unimplementedf("nil regnum for value: %s\n%s\n", v.LongString(), v.Block.Func) 4223 return 0 4224 } 4225 return Thearch.SSARegToReg[reg.(*ssa.Register).Num] 4226 } 4227 4228 // AutoVar returns a *Node and int64 representing the auto variable and offset within it 4229 // where v should be spilled. 4230 func AutoVar(v *ssa.Value) (*Node, int64) { 4231 loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot) 4232 if v.Type.Size() > loc.Type.Size() { 4233 v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) 4234 } 4235 return loc.N.(*Node), loc.Off 4236 } 4237 4238 // fieldIdx finds the index of the field referred to by the ODOT node n. 4239 func fieldIdx(n *Node) int { 4240 t := n.Left.Type 4241 f := n.Sym 4242 if !t.IsStruct() { 4243 panic("ODOT's LHS is not a struct") 4244 } 4245 4246 var i int 4247 for _, t1 := range t.Fields().Slice() { 4248 if t1.Sym != f { 4249 i++ 4250 continue 4251 } 4252 if t1.Offset != n.Xoffset { 4253 panic("field offset doesn't match") 4254 } 4255 return i 4256 } 4257 panic(fmt.Sprintf("can't find field in expr %s\n", n)) 4258 4259 // TODO: keep the result of this function somewhere in the ODOT Node 4260 // so we don't have to recompute it each time we need it. 4261 } 4262 4263 // ssaExport exports a bunch of compiler services for the ssa backend. 4264 type ssaExport struct { 4265 log bool 4266 unimplemented bool 4267 mustImplement bool 4268 } 4269 4270 func (s *ssaExport) TypeBool() ssa.Type { return Types[TBOOL] } 4271 func (s *ssaExport) TypeInt8() ssa.Type { return Types[TINT8] } 4272 func (s *ssaExport) TypeInt16() ssa.Type { return Types[TINT16] } 4273 func (s *ssaExport) TypeInt32() ssa.Type { return Types[TINT32] } 4274 func (s *ssaExport) TypeInt64() ssa.Type { return Types[TINT64] } 4275 func (s *ssaExport) TypeUInt8() ssa.Type { return Types[TUINT8] } 4276 func (s *ssaExport) TypeUInt16() ssa.Type { return Types[TUINT16] } 4277 func (s *ssaExport) TypeUInt32() ssa.Type { return Types[TUINT32] } 4278 func (s *ssaExport) TypeUInt64() ssa.Type { return Types[TUINT64] } 4279 func (s *ssaExport) TypeFloat32() ssa.Type { return Types[TFLOAT32] } 4280 func (s *ssaExport) TypeFloat64() ssa.Type { return Types[TFLOAT64] } 4281 func (s *ssaExport) TypeInt() ssa.Type { return Types[TINT] } 4282 func (s *ssaExport) TypeUintptr() ssa.Type { return Types[TUINTPTR] } 4283 func (s *ssaExport) TypeString() ssa.Type { return Types[TSTRING] } 4284 func (s *ssaExport) TypeBytePtr() ssa.Type { return Ptrto(Types[TUINT8]) } 4285 4286 // StringData returns a symbol (a *Sym wrapped in an interface) which 4287 // is the data component of a global string constant containing s. 4288 func (*ssaExport) StringData(s string) interface{} { 4289 // TODO: is idealstring correct? It might not matter... 4290 _, data := stringsym(s) 4291 return &ssa.ExternSymbol{Typ: idealstring, Sym: data} 4292 } 4293 4294 func (e *ssaExport) Auto(t ssa.Type) ssa.GCNode { 4295 n := temp(t.(*Type)) // Note: adds new auto to Curfn.Func.Dcl list 4296 e.mustImplement = true // This modifies the input to SSA, so we want to make sure we succeed from here! 4297 return n 4298 } 4299 4300 func (e *ssaExport) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4301 n := name.N.(*Node) 4302 ptrType := Ptrto(Types[TUINT8]) 4303 lenType := Types[TINT] 4304 if n.Class == PAUTO && !n.Addrtaken { 4305 // Split this string up into two separate variables. 4306 p := e.namedAuto(n.Sym.Name+".ptr", ptrType) 4307 l := e.namedAuto(n.Sym.Name+".len", lenType) 4308 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0} 4309 } 4310 // Return the two parts of the larger variable. 4311 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)} 4312 } 4313 4314 func (e *ssaExport) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4315 n := name.N.(*Node) 4316 t := Ptrto(Types[TUINT8]) 4317 if n.Class == PAUTO && !n.Addrtaken { 4318 // Split this interface up into two separate variables. 4319 f := ".itab" 4320 if n.Type.IsEmptyInterface() { 4321 f = ".type" 4322 } 4323 c := e.namedAuto(n.Sym.Name+f, t) 4324 d := e.namedAuto(n.Sym.Name+".data", t) 4325 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0} 4326 } 4327 // Return the two parts of the larger variable. 4328 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)} 4329 } 4330 4331 func (e *ssaExport) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) { 4332 n := name.N.(*Node) 4333 ptrType := Ptrto(name.Type.ElemType().(*Type)) 4334 lenType := Types[TINT] 4335 if n.Class == PAUTO && !n.Addrtaken { 4336 // Split this slice up into three separate variables. 4337 p := e.namedAuto(n.Sym.Name+".ptr", ptrType) 4338 l := e.namedAuto(n.Sym.Name+".len", lenType) 4339 c := e.namedAuto(n.Sym.Name+".cap", lenType) 4340 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}, ssa.LocalSlot{N: c, Type: lenType, Off: 0} 4341 } 4342 // Return the three parts of the larger variable. 4343 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, 4344 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}, 4345 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)} 4346 } 4347 4348 func (e *ssaExport) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4349 n := name.N.(*Node) 4350 s := name.Type.Size() / 2 4351 var t *Type 4352 if s == 8 { 4353 t = Types[TFLOAT64] 4354 } else { 4355 t = Types[TFLOAT32] 4356 } 4357 if n.Class == PAUTO && !n.Addrtaken { 4358 // Split this complex up into two separate variables. 4359 c := e.namedAuto(n.Sym.Name+".real", t) 4360 d := e.namedAuto(n.Sym.Name+".imag", t) 4361 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0} 4362 } 4363 // Return the two parts of the larger variable. 4364 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s} 4365 } 4366 4367 func (e *ssaExport) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot { 4368 n := name.N.(*Node) 4369 st := name.Type 4370 ft := st.FieldType(i) 4371 if n.Class == PAUTO && !n.Addrtaken { 4372 // Note: the _ field may appear several times. But 4373 // have no fear, identically-named but distinct Autos are 4374 // ok, albeit maybe confusing for a debugger. 4375 x := e.namedAuto(n.Sym.Name+"."+st.FieldName(i), ft) 4376 return ssa.LocalSlot{N: x, Type: ft, Off: 0} 4377 } 4378 return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)} 4379 } 4380 4381 // namedAuto returns a new AUTO variable with the given name and type. 4382 func (e *ssaExport) namedAuto(name string, typ ssa.Type) ssa.GCNode { 4383 t := typ.(*Type) 4384 s := &Sym{Name: name, Pkg: autopkg} 4385 n := Nod(ONAME, nil, nil) 4386 s.Def = n 4387 s.Def.Used = true 4388 n.Sym = s 4389 n.Type = t 4390 n.Class = PAUTO 4391 n.Addable = true 4392 n.Ullman = 1 4393 n.Esc = EscNever 4394 n.Xoffset = 0 4395 n.Name.Curfn = Curfn 4396 Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) 4397 4398 dowidth(t) 4399 e.mustImplement = true 4400 4401 return n 4402 } 4403 4404 func (e *ssaExport) CanSSA(t ssa.Type) bool { 4405 return canSSAType(t.(*Type)) 4406 } 4407 4408 func (e *ssaExport) Line(line int32) string { 4409 return linestr(line) 4410 } 4411 4412 // Log logs a message from the compiler. 4413 func (e *ssaExport) Logf(msg string, args ...interface{}) { 4414 // If e was marked as unimplemented, anything could happen. Ignore. 4415 if e.log && !e.unimplemented { 4416 fmt.Printf(msg, args...) 4417 } 4418 } 4419 4420 func (e *ssaExport) Log() bool { 4421 return e.log 4422 } 4423 4424 // Fatal reports a compiler error and exits. 4425 func (e *ssaExport) Fatalf(line int32, msg string, args ...interface{}) { 4426 // If e was marked as unimplemented, anything could happen. Ignore. 4427 if !e.unimplemented { 4428 lineno = line 4429 Fatalf(msg, args...) 4430 } 4431 } 4432 4433 // Unimplemented reports that the function cannot be compiled. 4434 // It will be removed once SSA work is complete. 4435 func (e *ssaExport) Unimplementedf(line int32, msg string, args ...interface{}) { 4436 if e.mustImplement { 4437 lineno = line 4438 Fatalf(msg, args...) 4439 } 4440 const alwaysLog = false // enable to calculate top unimplemented features 4441 if !e.unimplemented && (e.log || alwaysLog) { 4442 // first implementation failure, print explanation 4443 fmt.Printf("SSA unimplemented: "+msg+"\n", args...) 4444 } 4445 e.unimplemented = true 4446 } 4447 4448 // Warnl reports a "warning", which is usually flag-triggered 4449 // logging output for the benefit of tests. 4450 func (e *ssaExport) Warnl(line int32, fmt_ string, args ...interface{}) { 4451 Warnl(line, fmt_, args...) 4452 } 4453 4454 func (e *ssaExport) Debug_checknil() bool { 4455 return Debug_checknil != 0 4456 } 4457 4458 func (n *Node) Typ() ssa.Type { 4459 return n.Type 4460 }