github.com/riscv/riscv-go@v0.0.0-20200123204226-124ebd6fcc8e/src/cmd/compile/internal/gc/ssa.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "bytes" 9 "encoding/binary" 10 "fmt" 11 "html" 12 "os" 13 "sort" 14 15 "cmd/compile/internal/ssa" 16 "cmd/internal/obj" 17 "cmd/internal/src" 18 "cmd/internal/sys" 19 ) 20 21 var ssaConfig *ssa.Config 22 var ssaExp ssaExport 23 24 func initssa() *ssa.Config { 25 if ssaConfig == nil { 26 ssaConfig = ssa.NewConfig(Thearch.LinkArch.Name, &ssaExp, Ctxt, Debug['N'] == 0) 27 if Thearch.LinkArch.Name == "386" { 28 ssaConfig.Set387(Thearch.Use387) 29 } 30 } 31 ssaConfig.HTML = nil 32 return ssaConfig 33 } 34 35 // buildssa builds an SSA function. 36 func buildssa(fn *Node) *ssa.Func { 37 name := fn.Func.Nname.Sym.Name 38 printssa := name == os.Getenv("GOSSAFUNC") 39 if printssa { 40 fmt.Println("generating SSA for", name) 41 dumplist("buildssa-enter", fn.Func.Enter) 42 dumplist("buildssa-body", fn.Nbody) 43 dumplist("buildssa-exit", fn.Func.Exit) 44 } 45 46 var s state 47 s.pushLine(fn.Pos) 48 defer s.popLine() 49 50 if fn.Func.Pragma&CgoUnsafeArgs != 0 { 51 s.cgoUnsafeArgs = true 52 } 53 if fn.Func.Pragma&Nowritebarrier != 0 { 54 s.noWB = true 55 } 56 defer func() { 57 if s.WBPos.IsKnown() { 58 fn.Func.WBPos = s.WBPos 59 } 60 }() 61 // TODO(khr): build config just once at the start of the compiler binary 62 63 ssaExp.log = printssa 64 65 s.config = initssa() 66 s.f = s.config.NewFunc() 67 s.f.Name = name 68 if fn.Func.Pragma&Nosplit != 0 { 69 s.f.NoSplit = true 70 } 71 s.exitCode = fn.Func.Exit 72 s.panics = map[funcLine]*ssa.Block{} 73 s.config.DebugTest = s.config.DebugHashMatch("GOSSAHASH", name) 74 75 if name == os.Getenv("GOSSAFUNC") { 76 // TODO: tempfile? it is handy to have the location 77 // of this file be stable, so you can just reload in the browser. 78 s.config.HTML = ssa.NewHTMLWriter("ssa.html", s.config, name) 79 // TODO: generate and print a mapping from nodes to values and blocks 80 } 81 82 // Allocate starting block 83 s.f.Entry = s.f.NewBlock(ssa.BlockPlain) 84 85 // Allocate starting values 86 s.labels = map[string]*ssaLabel{} 87 s.labeledNodes = map[*Node]*ssaLabel{} 88 s.fwdVars = map[*Node]*ssa.Value{} 89 s.startmem = s.entryNewValue0(ssa.OpInitMem, ssa.TypeMem) 90 s.sp = s.entryNewValue0(ssa.OpSP, Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead 91 s.sb = s.entryNewValue0(ssa.OpSB, Types[TUINTPTR]) 92 93 s.startBlock(s.f.Entry) 94 s.vars[&memVar] = s.startmem 95 96 s.varsyms = map[*Node]interface{}{} 97 98 // Generate addresses of local declarations 99 s.decladdrs = map[*Node]*ssa.Value{} 100 for _, n := range fn.Func.Dcl { 101 switch n.Class { 102 case PPARAM, PPARAMOUT: 103 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n}) 104 s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, ptrto(n.Type), aux, s.sp) 105 if n.Class == PPARAMOUT && s.canSSA(n) { 106 // Save ssa-able PPARAMOUT variables so we can 107 // store them back to the stack at the end of 108 // the function. 109 s.returns = append(s.returns, n) 110 } 111 case PAUTO: 112 // processed at each use, to prevent Addr coming 113 // before the decl. 114 case PAUTOHEAP: 115 // moved to heap - already handled by frontend 116 case PFUNC: 117 // local function - already handled by frontend 118 default: 119 s.Fatalf("local variable with class %s unimplemented", classnames[n.Class]) 120 } 121 } 122 123 // Populate arguments. 124 for _, n := range fn.Func.Dcl { 125 if n.Class != PPARAM { 126 continue 127 } 128 var v *ssa.Value 129 if s.canSSA(n) { 130 v = s.newValue0A(ssa.OpArg, n.Type, n) 131 } else { 132 // Not SSAable. Load it. 133 v = s.newValue2(ssa.OpLoad, n.Type, s.decladdrs[n], s.startmem) 134 } 135 s.vars[n] = v 136 } 137 138 // Convert the AST-based IR to the SSA-based IR 139 s.stmtList(fn.Func.Enter) 140 s.stmtList(fn.Nbody) 141 142 // fallthrough to exit 143 if s.curBlock != nil { 144 s.pushLine(fn.Func.Endlineno) 145 s.exit() 146 s.popLine() 147 } 148 149 // Check that we used all labels 150 for name, lab := range s.labels { 151 if !lab.used() && !lab.reported && !lab.defNode.Used { 152 yyerrorl(lab.defNode.Pos, "label %v defined and not used", name) 153 lab.reported = true 154 } 155 if lab.used() && !lab.defined() && !lab.reported { 156 yyerrorl(lab.useNode.Pos, "label %v not defined", name) 157 lab.reported = true 158 } 159 } 160 161 // Check any forward gotos. Non-forward gotos have already been checked. 162 for _, n := range s.fwdGotos { 163 lab := s.labels[n.Left.Sym.Name] 164 // If the label is undefined, we have already have printed an error. 165 if lab.defined() { 166 s.checkgoto(n, lab.defNode) 167 } 168 } 169 170 if nerrors > 0 { 171 s.f.Free() 172 return nil 173 } 174 175 s.insertPhis() 176 177 // Don't carry reference this around longer than necessary 178 s.exitCode = Nodes{} 179 180 // Main call to ssa package to compile function 181 ssa.Compile(s.f) 182 183 return s.f 184 } 185 186 type state struct { 187 // configuration (arch) information 188 config *ssa.Config 189 190 // function we're building 191 f *ssa.Func 192 193 // labels and labeled control flow nodes (OFOR, OSWITCH, OSELECT) in f 194 labels map[string]*ssaLabel 195 labeledNodes map[*Node]*ssaLabel 196 197 // gotos that jump forward; required for deferred checkgoto calls 198 fwdGotos []*Node 199 // Code that must precede any return 200 // (e.g., copying value of heap-escaped paramout back to true paramout) 201 exitCode Nodes 202 203 // unlabeled break and continue statement tracking 204 breakTo *ssa.Block // current target for plain break statement 205 continueTo *ssa.Block // current target for plain continue statement 206 207 // current location where we're interpreting the AST 208 curBlock *ssa.Block 209 210 // variable assignments in the current block (map from variable symbol to ssa value) 211 // *Node is the unique identifier (an ONAME Node) for the variable. 212 // TODO: keep a single varnum map, then make all of these maps slices instead? 213 vars map[*Node]*ssa.Value 214 215 // fwdVars are variables that are used before they are defined in the current block. 216 // This map exists just to coalesce multiple references into a single FwdRef op. 217 // *Node is the unique identifier (an ONAME Node) for the variable. 218 fwdVars map[*Node]*ssa.Value 219 220 // all defined variables at the end of each block. Indexed by block ID. 221 defvars []map[*Node]*ssa.Value 222 223 // addresses of PPARAM and PPARAMOUT variables. 224 decladdrs map[*Node]*ssa.Value 225 226 // symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused. 227 varsyms map[*Node]interface{} 228 229 // starting values. Memory, stack pointer, and globals pointer 230 startmem *ssa.Value 231 sp *ssa.Value 232 sb *ssa.Value 233 234 // line number stack. The current line number is top of stack 235 line []src.XPos 236 237 // list of panic calls by function name and line number. 238 // Used to deduplicate panic calls. 239 panics map[funcLine]*ssa.Block 240 241 // list of PPARAMOUT (return) variables. 242 returns []*Node 243 244 // A dummy value used during phi construction. 245 placeholder *ssa.Value 246 247 cgoUnsafeArgs bool 248 noWB bool 249 WBPos src.XPos // line number of first write barrier. 0=no write barriers 250 } 251 252 type funcLine struct { 253 f *Node 254 line src.XPos 255 } 256 257 type ssaLabel struct { 258 target *ssa.Block // block identified by this label 259 breakTarget *ssa.Block // block to break to in control flow node identified by this label 260 continueTarget *ssa.Block // block to continue to in control flow node identified by this label 261 defNode *Node // label definition Node (OLABEL) 262 // Label use Node (OGOTO, OBREAK, OCONTINUE). 263 // Used only for error detection and reporting. 264 // There might be multiple uses, but we only need to track one. 265 useNode *Node 266 reported bool // reported indicates whether an error has already been reported for this label 267 } 268 269 // defined reports whether the label has a definition (OLABEL node). 270 func (l *ssaLabel) defined() bool { return l.defNode != nil } 271 272 // used reports whether the label has a use (OGOTO, OBREAK, or OCONTINUE node). 273 func (l *ssaLabel) used() bool { return l.useNode != nil } 274 275 // label returns the label associated with sym, creating it if necessary. 276 func (s *state) label(sym *Sym) *ssaLabel { 277 lab := s.labels[sym.Name] 278 if lab == nil { 279 lab = new(ssaLabel) 280 s.labels[sym.Name] = lab 281 } 282 return lab 283 } 284 285 func (s *state) Logf(msg string, args ...interface{}) { s.config.Logf(msg, args...) } 286 func (s *state) Log() bool { return s.config.Log() } 287 func (s *state) Fatalf(msg string, args ...interface{}) { s.config.Fatalf(s.peekPos(), msg, args...) } 288 func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { 289 s.config.Warnl(pos, msg, args...) 290 } 291 func (s *state) Debug_checknil() bool { return s.config.Debug_checknil() } 292 293 var ( 294 // dummy node for the memory variable 295 memVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "mem"}} 296 297 // dummy nodes for temporary variables 298 ptrVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ptr"}} 299 lenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "len"}} 300 newlenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "newlen"}} 301 capVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "cap"}} 302 typVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "typ"}} 303 okVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ok"}} 304 ) 305 306 // startBlock sets the current block we're generating code in to b. 307 func (s *state) startBlock(b *ssa.Block) { 308 if s.curBlock != nil { 309 s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock) 310 } 311 s.curBlock = b 312 s.vars = map[*Node]*ssa.Value{} 313 for n := range s.fwdVars { 314 delete(s.fwdVars, n) 315 } 316 } 317 318 // endBlock marks the end of generating code for the current block. 319 // Returns the (former) current block. Returns nil if there is no current 320 // block, i.e. if no code flows to the current execution point. 321 func (s *state) endBlock() *ssa.Block { 322 b := s.curBlock 323 if b == nil { 324 return nil 325 } 326 for len(s.defvars) <= int(b.ID) { 327 s.defvars = append(s.defvars, nil) 328 } 329 s.defvars[b.ID] = s.vars 330 s.curBlock = nil 331 s.vars = nil 332 b.Pos = s.peekPos() 333 return b 334 } 335 336 // pushLine pushes a line number on the line number stack. 337 func (s *state) pushLine(line src.XPos) { 338 if !line.IsKnown() { 339 // the frontend may emit node with line number missing, 340 // use the parent line number in this case. 341 line = s.peekPos() 342 if Debug['K'] != 0 { 343 Warn("buildssa: unknown position (line 0)") 344 } 345 } 346 s.line = append(s.line, line) 347 } 348 349 // popLine pops the top of the line number stack. 350 func (s *state) popLine() { 351 s.line = s.line[:len(s.line)-1] 352 } 353 354 // peekPos peeks the top of the line number stack. 355 func (s *state) peekPos() src.XPos { 356 return s.line[len(s.line)-1] 357 } 358 359 func (s *state) Error(msg string, args ...interface{}) { 360 yyerrorl(s.peekPos(), msg, args...) 361 } 362 363 // newValue0 adds a new value with no arguments to the current block. 364 func (s *state) newValue0(op ssa.Op, t ssa.Type) *ssa.Value { 365 return s.curBlock.NewValue0(s.peekPos(), op, t) 366 } 367 368 // newValue0A adds a new value with no arguments and an aux value to the current block. 369 func (s *state) newValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value { 370 return s.curBlock.NewValue0A(s.peekPos(), op, t, aux) 371 } 372 373 // newValue0I adds a new value with no arguments and an auxint value to the current block. 374 func (s *state) newValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value { 375 return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint) 376 } 377 378 // newValue1 adds a new value with one argument to the current block. 379 func (s *state) newValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value { 380 return s.curBlock.NewValue1(s.peekPos(), op, t, arg) 381 } 382 383 // newValue1A adds a new value with one argument and an aux value to the current block. 384 func (s *state) newValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 385 return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg) 386 } 387 388 // newValue1I adds a new value with one argument and an auxint value to the current block. 389 func (s *state) newValue1I(op ssa.Op, t ssa.Type, aux int64, arg *ssa.Value) *ssa.Value { 390 return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg) 391 } 392 393 // newValue2 adds a new value with two arguments to the current block. 394 func (s *state) newValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value { 395 return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1) 396 } 397 398 // newValue2I adds a new value with two arguments and an auxint value to the current block. 399 func (s *state) newValue2I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value { 400 return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1) 401 } 402 403 // newValue3 adds a new value with three arguments to the current block. 404 func (s *state) newValue3(op ssa.Op, t ssa.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 405 return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2) 406 } 407 408 // newValue3I adds a new value with three arguments and an auxint value to the current block. 409 func (s *state) newValue3I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 410 return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2) 411 } 412 413 // newValue4 adds a new value with four arguments to the current block. 414 func (s *state) newValue4(op ssa.Op, t ssa.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value { 415 return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3) 416 } 417 418 // entryNewValue0 adds a new value with no arguments to the entry block. 419 func (s *state) entryNewValue0(op ssa.Op, t ssa.Type) *ssa.Value { 420 return s.f.Entry.NewValue0(s.peekPos(), op, t) 421 } 422 423 // entryNewValue0A adds a new value with no arguments and an aux value to the entry block. 424 func (s *state) entryNewValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value { 425 return s.f.Entry.NewValue0A(s.peekPos(), op, t, aux) 426 } 427 428 // entryNewValue0I adds a new value with no arguments and an auxint value to the entry block. 429 func (s *state) entryNewValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value { 430 return s.f.Entry.NewValue0I(s.peekPos(), op, t, auxint) 431 } 432 433 // entryNewValue1 adds a new value with one argument to the entry block. 434 func (s *state) entryNewValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value { 435 return s.f.Entry.NewValue1(s.peekPos(), op, t, arg) 436 } 437 438 // entryNewValue1 adds a new value with one argument and an auxint value to the entry block. 439 func (s *state) entryNewValue1I(op ssa.Op, t ssa.Type, auxint int64, arg *ssa.Value) *ssa.Value { 440 return s.f.Entry.NewValue1I(s.peekPos(), op, t, auxint, arg) 441 } 442 443 // entryNewValue1A adds a new value with one argument and an aux value to the entry block. 444 func (s *state) entryNewValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 445 return s.f.Entry.NewValue1A(s.peekPos(), op, t, aux, arg) 446 } 447 448 // entryNewValue2 adds a new value with two arguments to the entry block. 449 func (s *state) entryNewValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value { 450 return s.f.Entry.NewValue2(s.peekPos(), op, t, arg0, arg1) 451 } 452 453 // const* routines add a new const value to the entry block. 454 func (s *state) constSlice(t ssa.Type) *ssa.Value { return s.f.ConstSlice(s.peekPos(), t) } 455 func (s *state) constInterface(t ssa.Type) *ssa.Value { return s.f.ConstInterface(s.peekPos(), t) } 456 func (s *state) constNil(t ssa.Type) *ssa.Value { return s.f.ConstNil(s.peekPos(), t) } 457 func (s *state) constEmptyString(t ssa.Type) *ssa.Value { return s.f.ConstEmptyString(s.peekPos(), t) } 458 func (s *state) constBool(c bool) *ssa.Value { 459 return s.f.ConstBool(s.peekPos(), Types[TBOOL], c) 460 } 461 func (s *state) constInt8(t ssa.Type, c int8) *ssa.Value { 462 return s.f.ConstInt8(s.peekPos(), t, c) 463 } 464 func (s *state) constInt16(t ssa.Type, c int16) *ssa.Value { 465 return s.f.ConstInt16(s.peekPos(), t, c) 466 } 467 func (s *state) constInt32(t ssa.Type, c int32) *ssa.Value { 468 return s.f.ConstInt32(s.peekPos(), t, c) 469 } 470 func (s *state) constInt64(t ssa.Type, c int64) *ssa.Value { 471 return s.f.ConstInt64(s.peekPos(), t, c) 472 } 473 func (s *state) constFloat32(t ssa.Type, c float64) *ssa.Value { 474 return s.f.ConstFloat32(s.peekPos(), t, c) 475 } 476 func (s *state) constFloat64(t ssa.Type, c float64) *ssa.Value { 477 return s.f.ConstFloat64(s.peekPos(), t, c) 478 } 479 func (s *state) constInt(t ssa.Type, c int64) *ssa.Value { 480 if s.config.IntSize == 8 { 481 return s.constInt64(t, c) 482 } 483 if int64(int32(c)) != c { 484 s.Fatalf("integer constant too big %d", c) 485 } 486 return s.constInt32(t, int32(c)) 487 } 488 489 // stmtList converts the statement list n to SSA and adds it to s. 490 func (s *state) stmtList(l Nodes) { 491 for _, n := range l.Slice() { 492 s.stmt(n) 493 } 494 } 495 496 // stmt converts the statement n to SSA and adds it to s. 497 func (s *state) stmt(n *Node) { 498 s.pushLine(n.Pos) 499 defer s.popLine() 500 501 // If s.curBlock is nil, then we're about to generate dead code. 502 // We can't just short-circuit here, though, 503 // because we check labels and gotos as part of SSA generation. 504 // Provide a block for the dead code so that we don't have 505 // to add special cases everywhere else. 506 if s.curBlock == nil { 507 dead := s.f.NewBlock(ssa.BlockPlain) 508 s.startBlock(dead) 509 } 510 511 s.stmtList(n.Ninit) 512 switch n.Op { 513 514 case OBLOCK: 515 s.stmtList(n.List) 516 517 // No-ops 518 case OEMPTY, ODCLCONST, ODCLTYPE, OFALL: 519 520 // Expression statements 521 case OCALLFUNC: 522 if isIntrinsicCall(n) { 523 s.intrinsicCall(n) 524 return 525 } 526 fallthrough 527 528 case OCALLMETH, OCALLINTER: 529 s.call(n, callNormal) 530 if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class == PFUNC { 531 if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" || 532 n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "selectgo" || fn == "block") { 533 m := s.mem() 534 b := s.endBlock() 535 b.Kind = ssa.BlockExit 536 b.SetControl(m) 537 // TODO: never rewrite OPANIC to OCALLFUNC in the 538 // first place. Need to wait until all backends 539 // go through SSA. 540 } 541 } 542 case ODEFER: 543 s.call(n.Left, callDefer) 544 case OPROC: 545 s.call(n.Left, callGo) 546 547 case OAS2DOTTYPE: 548 res, resok := s.dottype(n.Rlist.First(), true) 549 deref := false 550 if !canSSAType(n.Rlist.First().Type) { 551 if res.Op != ssa.OpLoad { 552 s.Fatalf("dottype of non-load") 553 } 554 mem := s.mem() 555 if mem.Op == ssa.OpVarKill { 556 mem = mem.Args[0] 557 } 558 if res.Args[1] != mem { 559 s.Fatalf("memory no longer live from 2-result dottype load") 560 } 561 deref = true 562 res = res.Args[0] 563 } 564 s.assign(n.List.First(), res, needwritebarrier(n.List.First(), n.Rlist.First()), deref, n.Pos, 0, false) 565 s.assign(n.List.Second(), resok, false, false, n.Pos, 0, false) 566 return 567 568 case OAS2FUNC: 569 // We come here only when it is an intrinsic call returning two values. 570 if !isIntrinsicCall(n.Rlist.First()) { 571 s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Rlist.First()) 572 } 573 v := s.intrinsicCall(n.Rlist.First()) 574 v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v) 575 v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v) 576 // Make a fake node to mimic loading return value, ONLY for write barrier test. 577 // This is future-proofing against non-scalar 2-result intrinsics. 578 // Currently we only have scalar ones, which result in no write barrier. 579 fakeret := &Node{Op: OINDREGSP} 580 s.assign(n.List.First(), v1, needwritebarrier(n.List.First(), fakeret), false, n.Pos, 0, false) 581 s.assign(n.List.Second(), v2, needwritebarrier(n.List.Second(), fakeret), false, n.Pos, 0, false) 582 return 583 584 case ODCL: 585 if n.Left.Class == PAUTOHEAP { 586 Fatalf("DCL %v", n) 587 } 588 589 case OLABEL: 590 sym := n.Left.Sym 591 592 if isblanksym(sym) { 593 // Empty identifier is valid but useless. 594 // See issues 11589, 11593. 595 return 596 } 597 598 lab := s.label(sym) 599 600 // Associate label with its control flow node, if any 601 if ctl := n.Name.Defn; ctl != nil { 602 switch ctl.Op { 603 case OFOR, OSWITCH, OSELECT: 604 s.labeledNodes[ctl] = lab 605 } 606 } 607 608 if !lab.defined() { 609 lab.defNode = n 610 } else { 611 s.Error("label %v already defined at %v", sym, linestr(lab.defNode.Pos)) 612 lab.reported = true 613 } 614 // The label might already have a target block via a goto. 615 if lab.target == nil { 616 lab.target = s.f.NewBlock(ssa.BlockPlain) 617 } 618 619 // go to that label (we pretend "label:" is preceded by "goto label") 620 b := s.endBlock() 621 b.AddEdgeTo(lab.target) 622 s.startBlock(lab.target) 623 624 case OGOTO: 625 sym := n.Left.Sym 626 627 lab := s.label(sym) 628 if lab.target == nil { 629 lab.target = s.f.NewBlock(ssa.BlockPlain) 630 } 631 if !lab.used() { 632 lab.useNode = n 633 } 634 635 if lab.defined() { 636 s.checkgoto(n, lab.defNode) 637 } else { 638 s.fwdGotos = append(s.fwdGotos, n) 639 } 640 641 b := s.endBlock() 642 b.AddEdgeTo(lab.target) 643 644 case OAS, OASWB: 645 // Generate static data rather than code, if possible. 646 if n.IsStatic { 647 if !genAsInitNoCheck(n) { 648 Dump("\ngen_as_init", n) 649 Fatalf("gen_as_init couldn't generate static data") 650 } 651 return 652 } 653 654 if n.Left == n.Right && n.Left.Op == ONAME { 655 // An x=x assignment. No point in doing anything 656 // here. In addition, skipping this assignment 657 // prevents generating: 658 // VARDEF x 659 // COPY x -> x 660 // which is bad because x is incorrectly considered 661 // dead before the vardef. See issue #14904. 662 return 663 } 664 665 var t *Type 666 if n.Right != nil { 667 t = n.Right.Type 668 } else { 669 t = n.Left.Type 670 } 671 672 // Evaluate RHS. 673 rhs := n.Right 674 if rhs != nil { 675 switch rhs.Op { 676 case OSTRUCTLIT, OARRAYLIT, OSLICELIT: 677 // All literals with nonzero fields have already been 678 // rewritten during walk. Any that remain are just T{} 679 // or equivalents. Use the zero value. 680 if !iszero(rhs) { 681 Fatalf("literal with nonzero value in SSA: %v", rhs) 682 } 683 rhs = nil 684 case OAPPEND: 685 // If we're writing the result of an append back to the same slice, 686 // handle it specially to avoid write barriers on the fast (non-growth) path. 687 // If the slice can be SSA'd, it'll be on the stack, 688 // so there will be no write barriers, 689 // so there's no need to attempt to prevent them. 690 if samesafeexpr(n.Left, rhs.List.First()) { 691 if !s.canSSA(n.Left) { 692 if Debug_append > 0 { 693 Warnl(n.Pos, "append: len-only update") 694 } 695 s.append(rhs, true) 696 return 697 } else { 698 if Debug_append > 0 { // replicating old diagnostic message 699 Warnl(n.Pos, "append: len-only update (in local slice)") 700 } 701 } 702 } 703 } 704 } 705 var r *ssa.Value 706 var isVolatile bool 707 needwb := n.Op == OASWB 708 deref := !canSSAType(t) 709 if deref { 710 if rhs == nil { 711 r = nil // Signal assign to use OpZero. 712 } else { 713 r, isVolatile = s.addr(rhs, false) 714 } 715 } else { 716 if rhs == nil { 717 r = s.zeroVal(t) 718 } else { 719 r = s.expr(rhs) 720 } 721 } 722 if rhs != nil && rhs.Op == OAPPEND && needwritebarrier(n.Left, rhs) { 723 // The frontend gets rid of the write barrier to enable the special OAPPEND 724 // handling above, but since this is not a special case, we need it. 725 // TODO: just add a ptr graying to the end of growslice? 726 // TODO: check whether we need to provide special handling and a write barrier 727 // for ODOTTYPE and ORECV also. 728 // They get similar wb-removal treatment in walk.go:OAS. 729 needwb = true 730 } 731 732 var skip skipMask 733 if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) { 734 // We're assigning a slicing operation back to its source. 735 // Don't write back fields we aren't changing. See issue #14855. 736 i, j, k := rhs.SliceBounds() 737 if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) { 738 // [0:...] is the same as [:...] 739 i = nil 740 } 741 // TODO: detect defaults for len/cap also. 742 // Currently doesn't really work because (*p)[:len(*p)] appears here as: 743 // tmp = len(*p) 744 // (*p)[:tmp] 745 //if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) { 746 // j = nil 747 //} 748 //if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) { 749 // k = nil 750 //} 751 if i == nil { 752 skip |= skipPtr 753 if j == nil { 754 skip |= skipLen 755 } 756 if k == nil { 757 skip |= skipCap 758 } 759 } 760 } 761 762 s.assign(n.Left, r, needwb, deref, n.Pos, skip, isVolatile) 763 764 case OIF: 765 bThen := s.f.NewBlock(ssa.BlockPlain) 766 bEnd := s.f.NewBlock(ssa.BlockPlain) 767 var bElse *ssa.Block 768 if n.Rlist.Len() != 0 { 769 bElse = s.f.NewBlock(ssa.BlockPlain) 770 s.condBranch(n.Left, bThen, bElse, n.Likely) 771 } else { 772 s.condBranch(n.Left, bThen, bEnd, n.Likely) 773 } 774 775 s.startBlock(bThen) 776 s.stmtList(n.Nbody) 777 if b := s.endBlock(); b != nil { 778 b.AddEdgeTo(bEnd) 779 } 780 781 if n.Rlist.Len() != 0 { 782 s.startBlock(bElse) 783 s.stmtList(n.Rlist) 784 if b := s.endBlock(); b != nil { 785 b.AddEdgeTo(bEnd) 786 } 787 } 788 s.startBlock(bEnd) 789 790 case ORETURN: 791 s.stmtList(n.List) 792 s.exit() 793 case ORETJMP: 794 s.stmtList(n.List) 795 b := s.exit() 796 b.Kind = ssa.BlockRetJmp // override BlockRet 797 b.Aux = n.Left.Sym 798 799 case OCONTINUE, OBREAK: 800 var op string 801 var to *ssa.Block 802 switch n.Op { 803 case OCONTINUE: 804 op = "continue" 805 to = s.continueTo 806 case OBREAK: 807 op = "break" 808 to = s.breakTo 809 } 810 if n.Left == nil { 811 // plain break/continue 812 if to == nil { 813 s.Error("%s is not in a loop", op) 814 return 815 } 816 // nothing to do; "to" is already the correct target 817 } else { 818 // labeled break/continue; look up the target 819 sym := n.Left.Sym 820 lab := s.label(sym) 821 if !lab.used() { 822 lab.useNode = n.Left 823 } 824 if !lab.defined() { 825 s.Error("%s label not defined: %v", op, sym) 826 lab.reported = true 827 return 828 } 829 switch n.Op { 830 case OCONTINUE: 831 to = lab.continueTarget 832 case OBREAK: 833 to = lab.breakTarget 834 } 835 if to == nil { 836 // Valid label but not usable with a break/continue here, e.g.: 837 // for { 838 // continue abc 839 // } 840 // abc: 841 // for {} 842 s.Error("invalid %s label %v", op, sym) 843 lab.reported = true 844 return 845 } 846 } 847 848 b := s.endBlock() 849 b.AddEdgeTo(to) 850 851 case OFOR: 852 // OFOR: for Ninit; Left; Right { Nbody } 853 bCond := s.f.NewBlock(ssa.BlockPlain) 854 bBody := s.f.NewBlock(ssa.BlockPlain) 855 bIncr := s.f.NewBlock(ssa.BlockPlain) 856 bEnd := s.f.NewBlock(ssa.BlockPlain) 857 858 // first, jump to condition test 859 b := s.endBlock() 860 b.AddEdgeTo(bCond) 861 862 // generate code to test condition 863 s.startBlock(bCond) 864 if n.Left != nil { 865 s.condBranch(n.Left, bBody, bEnd, 1) 866 } else { 867 b := s.endBlock() 868 b.Kind = ssa.BlockPlain 869 b.AddEdgeTo(bBody) 870 } 871 872 // set up for continue/break in body 873 prevContinue := s.continueTo 874 prevBreak := s.breakTo 875 s.continueTo = bIncr 876 s.breakTo = bEnd 877 lab := s.labeledNodes[n] 878 if lab != nil { 879 // labeled for loop 880 lab.continueTarget = bIncr 881 lab.breakTarget = bEnd 882 } 883 884 // generate body 885 s.startBlock(bBody) 886 s.stmtList(n.Nbody) 887 888 // tear down continue/break 889 s.continueTo = prevContinue 890 s.breakTo = prevBreak 891 if lab != nil { 892 lab.continueTarget = nil 893 lab.breakTarget = nil 894 } 895 896 // done with body, goto incr 897 if b := s.endBlock(); b != nil { 898 b.AddEdgeTo(bIncr) 899 } 900 901 // generate incr 902 s.startBlock(bIncr) 903 if n.Right != nil { 904 s.stmt(n.Right) 905 } 906 if b := s.endBlock(); b != nil { 907 b.AddEdgeTo(bCond) 908 } 909 s.startBlock(bEnd) 910 911 case OSWITCH, OSELECT: 912 // These have been mostly rewritten by the front end into their Nbody fields. 913 // Our main task is to correctly hook up any break statements. 914 bEnd := s.f.NewBlock(ssa.BlockPlain) 915 916 prevBreak := s.breakTo 917 s.breakTo = bEnd 918 lab := s.labeledNodes[n] 919 if lab != nil { 920 // labeled 921 lab.breakTarget = bEnd 922 } 923 924 // generate body code 925 s.stmtList(n.Nbody) 926 927 s.breakTo = prevBreak 928 if lab != nil { 929 lab.breakTarget = nil 930 } 931 932 // OSWITCH never falls through (s.curBlock == nil here). 933 // OSELECT does not fall through if we're calling selectgo. 934 // OSELECT does fall through if we're calling selectnb{send,recv}[2]. 935 // In those latter cases, go to the code after the select. 936 if b := s.endBlock(); b != nil { 937 b.AddEdgeTo(bEnd) 938 } 939 s.startBlock(bEnd) 940 941 case OVARKILL: 942 // Insert a varkill op to record that a variable is no longer live. 943 // We only care about liveness info at call sites, so putting the 944 // varkill in the store chain is enough to keep it correctly ordered 945 // with respect to call ops. 946 if !s.canSSA(n.Left) { 947 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, n.Left, s.mem()) 948 } 949 950 case OVARLIVE: 951 // Insert a varlive op to record that a variable is still live. 952 if !n.Left.Addrtaken { 953 s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left) 954 } 955 s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, ssa.TypeMem, n.Left, s.mem()) 956 957 case OCHECKNIL: 958 p := s.expr(n.Left) 959 s.nilCheck(p) 960 961 default: 962 s.Fatalf("unhandled stmt %v", n.Op) 963 } 964 } 965 966 // exit processes any code that needs to be generated just before returning. 967 // It returns a BlockRet block that ends the control flow. Its control value 968 // will be set to the final memory state. 969 func (s *state) exit() *ssa.Block { 970 if hasdefer { 971 s.rtcall(Deferreturn, true, nil) 972 } 973 974 // Run exit code. Typically, this code copies heap-allocated PPARAMOUT 975 // variables back to the stack. 976 s.stmtList(s.exitCode) 977 978 // Store SSAable PPARAMOUT variables back to stack locations. 979 for _, n := range s.returns { 980 addr := s.decladdrs[n] 981 val := s.variable(n, n.Type) 982 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, n, s.mem()) 983 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, n.Type.Size(), addr, val, s.mem()) 984 // TODO: if val is ever spilled, we'd like to use the 985 // PPARAMOUT slot for spilling it. That won't happen 986 // currently. 987 } 988 989 // Do actual return. 990 m := s.mem() 991 b := s.endBlock() 992 b.Kind = ssa.BlockRet 993 b.SetControl(m) 994 return b 995 } 996 997 type opAndType struct { 998 op Op 999 etype EType 1000 } 1001 1002 var opToSSA = map[opAndType]ssa.Op{ 1003 opAndType{OADD, TINT8}: ssa.OpAdd8, 1004 opAndType{OADD, TUINT8}: ssa.OpAdd8, 1005 opAndType{OADD, TINT16}: ssa.OpAdd16, 1006 opAndType{OADD, TUINT16}: ssa.OpAdd16, 1007 opAndType{OADD, TINT32}: ssa.OpAdd32, 1008 opAndType{OADD, TUINT32}: ssa.OpAdd32, 1009 opAndType{OADD, TPTR32}: ssa.OpAdd32, 1010 opAndType{OADD, TINT64}: ssa.OpAdd64, 1011 opAndType{OADD, TUINT64}: ssa.OpAdd64, 1012 opAndType{OADD, TPTR64}: ssa.OpAdd64, 1013 opAndType{OADD, TFLOAT32}: ssa.OpAdd32F, 1014 opAndType{OADD, TFLOAT64}: ssa.OpAdd64F, 1015 1016 opAndType{OSUB, TINT8}: ssa.OpSub8, 1017 opAndType{OSUB, TUINT8}: ssa.OpSub8, 1018 opAndType{OSUB, TINT16}: ssa.OpSub16, 1019 opAndType{OSUB, TUINT16}: ssa.OpSub16, 1020 opAndType{OSUB, TINT32}: ssa.OpSub32, 1021 opAndType{OSUB, TUINT32}: ssa.OpSub32, 1022 opAndType{OSUB, TINT64}: ssa.OpSub64, 1023 opAndType{OSUB, TUINT64}: ssa.OpSub64, 1024 opAndType{OSUB, TFLOAT32}: ssa.OpSub32F, 1025 opAndType{OSUB, TFLOAT64}: ssa.OpSub64F, 1026 1027 opAndType{ONOT, TBOOL}: ssa.OpNot, 1028 1029 opAndType{OMINUS, TINT8}: ssa.OpNeg8, 1030 opAndType{OMINUS, TUINT8}: ssa.OpNeg8, 1031 opAndType{OMINUS, TINT16}: ssa.OpNeg16, 1032 opAndType{OMINUS, TUINT16}: ssa.OpNeg16, 1033 opAndType{OMINUS, TINT32}: ssa.OpNeg32, 1034 opAndType{OMINUS, TUINT32}: ssa.OpNeg32, 1035 opAndType{OMINUS, TINT64}: ssa.OpNeg64, 1036 opAndType{OMINUS, TUINT64}: ssa.OpNeg64, 1037 opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F, 1038 opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F, 1039 1040 opAndType{OCOM, TINT8}: ssa.OpCom8, 1041 opAndType{OCOM, TUINT8}: ssa.OpCom8, 1042 opAndType{OCOM, TINT16}: ssa.OpCom16, 1043 opAndType{OCOM, TUINT16}: ssa.OpCom16, 1044 opAndType{OCOM, TINT32}: ssa.OpCom32, 1045 opAndType{OCOM, TUINT32}: ssa.OpCom32, 1046 opAndType{OCOM, TINT64}: ssa.OpCom64, 1047 opAndType{OCOM, TUINT64}: ssa.OpCom64, 1048 1049 opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag, 1050 opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag, 1051 opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal, 1052 opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal, 1053 1054 opAndType{OMUL, TINT8}: ssa.OpMul8, 1055 opAndType{OMUL, TUINT8}: ssa.OpMul8, 1056 opAndType{OMUL, TINT16}: ssa.OpMul16, 1057 opAndType{OMUL, TUINT16}: ssa.OpMul16, 1058 opAndType{OMUL, TINT32}: ssa.OpMul32, 1059 opAndType{OMUL, TUINT32}: ssa.OpMul32, 1060 opAndType{OMUL, TINT64}: ssa.OpMul64, 1061 opAndType{OMUL, TUINT64}: ssa.OpMul64, 1062 opAndType{OMUL, TFLOAT32}: ssa.OpMul32F, 1063 opAndType{OMUL, TFLOAT64}: ssa.OpMul64F, 1064 1065 opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F, 1066 opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F, 1067 1068 opAndType{OHMUL, TINT8}: ssa.OpHmul8, 1069 opAndType{OHMUL, TUINT8}: ssa.OpHmul8u, 1070 opAndType{OHMUL, TINT16}: ssa.OpHmul16, 1071 opAndType{OHMUL, TUINT16}: ssa.OpHmul16u, 1072 opAndType{OHMUL, TINT32}: ssa.OpHmul32, 1073 opAndType{OHMUL, TUINT32}: ssa.OpHmul32u, 1074 1075 opAndType{ODIV, TINT8}: ssa.OpDiv8, 1076 opAndType{ODIV, TUINT8}: ssa.OpDiv8u, 1077 opAndType{ODIV, TINT16}: ssa.OpDiv16, 1078 opAndType{ODIV, TUINT16}: ssa.OpDiv16u, 1079 opAndType{ODIV, TINT32}: ssa.OpDiv32, 1080 opAndType{ODIV, TUINT32}: ssa.OpDiv32u, 1081 opAndType{ODIV, TINT64}: ssa.OpDiv64, 1082 opAndType{ODIV, TUINT64}: ssa.OpDiv64u, 1083 1084 opAndType{OMOD, TINT8}: ssa.OpMod8, 1085 opAndType{OMOD, TUINT8}: ssa.OpMod8u, 1086 opAndType{OMOD, TINT16}: ssa.OpMod16, 1087 opAndType{OMOD, TUINT16}: ssa.OpMod16u, 1088 opAndType{OMOD, TINT32}: ssa.OpMod32, 1089 opAndType{OMOD, TUINT32}: ssa.OpMod32u, 1090 opAndType{OMOD, TINT64}: ssa.OpMod64, 1091 opAndType{OMOD, TUINT64}: ssa.OpMod64u, 1092 1093 opAndType{OAND, TINT8}: ssa.OpAnd8, 1094 opAndType{OAND, TUINT8}: ssa.OpAnd8, 1095 opAndType{OAND, TINT16}: ssa.OpAnd16, 1096 opAndType{OAND, TUINT16}: ssa.OpAnd16, 1097 opAndType{OAND, TINT32}: ssa.OpAnd32, 1098 opAndType{OAND, TUINT32}: ssa.OpAnd32, 1099 opAndType{OAND, TINT64}: ssa.OpAnd64, 1100 opAndType{OAND, TUINT64}: ssa.OpAnd64, 1101 1102 opAndType{OOR, TINT8}: ssa.OpOr8, 1103 opAndType{OOR, TUINT8}: ssa.OpOr8, 1104 opAndType{OOR, TINT16}: ssa.OpOr16, 1105 opAndType{OOR, TUINT16}: ssa.OpOr16, 1106 opAndType{OOR, TINT32}: ssa.OpOr32, 1107 opAndType{OOR, TUINT32}: ssa.OpOr32, 1108 opAndType{OOR, TINT64}: ssa.OpOr64, 1109 opAndType{OOR, TUINT64}: ssa.OpOr64, 1110 1111 opAndType{OXOR, TINT8}: ssa.OpXor8, 1112 opAndType{OXOR, TUINT8}: ssa.OpXor8, 1113 opAndType{OXOR, TINT16}: ssa.OpXor16, 1114 opAndType{OXOR, TUINT16}: ssa.OpXor16, 1115 opAndType{OXOR, TINT32}: ssa.OpXor32, 1116 opAndType{OXOR, TUINT32}: ssa.OpXor32, 1117 opAndType{OXOR, TINT64}: ssa.OpXor64, 1118 opAndType{OXOR, TUINT64}: ssa.OpXor64, 1119 1120 opAndType{OEQ, TBOOL}: ssa.OpEqB, 1121 opAndType{OEQ, TINT8}: ssa.OpEq8, 1122 opAndType{OEQ, TUINT8}: ssa.OpEq8, 1123 opAndType{OEQ, TINT16}: ssa.OpEq16, 1124 opAndType{OEQ, TUINT16}: ssa.OpEq16, 1125 opAndType{OEQ, TINT32}: ssa.OpEq32, 1126 opAndType{OEQ, TUINT32}: ssa.OpEq32, 1127 opAndType{OEQ, TINT64}: ssa.OpEq64, 1128 opAndType{OEQ, TUINT64}: ssa.OpEq64, 1129 opAndType{OEQ, TINTER}: ssa.OpEqInter, 1130 opAndType{OEQ, TSLICE}: ssa.OpEqSlice, 1131 opAndType{OEQ, TFUNC}: ssa.OpEqPtr, 1132 opAndType{OEQ, TMAP}: ssa.OpEqPtr, 1133 opAndType{OEQ, TCHAN}: ssa.OpEqPtr, 1134 opAndType{OEQ, TPTR32}: ssa.OpEqPtr, 1135 opAndType{OEQ, TPTR64}: ssa.OpEqPtr, 1136 opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr, 1137 opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr, 1138 opAndType{OEQ, TFLOAT64}: ssa.OpEq64F, 1139 opAndType{OEQ, TFLOAT32}: ssa.OpEq32F, 1140 1141 opAndType{ONE, TBOOL}: ssa.OpNeqB, 1142 opAndType{ONE, TINT8}: ssa.OpNeq8, 1143 opAndType{ONE, TUINT8}: ssa.OpNeq8, 1144 opAndType{ONE, TINT16}: ssa.OpNeq16, 1145 opAndType{ONE, TUINT16}: ssa.OpNeq16, 1146 opAndType{ONE, TINT32}: ssa.OpNeq32, 1147 opAndType{ONE, TUINT32}: ssa.OpNeq32, 1148 opAndType{ONE, TINT64}: ssa.OpNeq64, 1149 opAndType{ONE, TUINT64}: ssa.OpNeq64, 1150 opAndType{ONE, TINTER}: ssa.OpNeqInter, 1151 opAndType{ONE, TSLICE}: ssa.OpNeqSlice, 1152 opAndType{ONE, TFUNC}: ssa.OpNeqPtr, 1153 opAndType{ONE, TMAP}: ssa.OpNeqPtr, 1154 opAndType{ONE, TCHAN}: ssa.OpNeqPtr, 1155 opAndType{ONE, TPTR32}: ssa.OpNeqPtr, 1156 opAndType{ONE, TPTR64}: ssa.OpNeqPtr, 1157 opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr, 1158 opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr, 1159 opAndType{ONE, TFLOAT64}: ssa.OpNeq64F, 1160 opAndType{ONE, TFLOAT32}: ssa.OpNeq32F, 1161 1162 opAndType{OLT, TINT8}: ssa.OpLess8, 1163 opAndType{OLT, TUINT8}: ssa.OpLess8U, 1164 opAndType{OLT, TINT16}: ssa.OpLess16, 1165 opAndType{OLT, TUINT16}: ssa.OpLess16U, 1166 opAndType{OLT, TINT32}: ssa.OpLess32, 1167 opAndType{OLT, TUINT32}: ssa.OpLess32U, 1168 opAndType{OLT, TINT64}: ssa.OpLess64, 1169 opAndType{OLT, TUINT64}: ssa.OpLess64U, 1170 opAndType{OLT, TFLOAT64}: ssa.OpLess64F, 1171 opAndType{OLT, TFLOAT32}: ssa.OpLess32F, 1172 1173 opAndType{OGT, TINT8}: ssa.OpGreater8, 1174 opAndType{OGT, TUINT8}: ssa.OpGreater8U, 1175 opAndType{OGT, TINT16}: ssa.OpGreater16, 1176 opAndType{OGT, TUINT16}: ssa.OpGreater16U, 1177 opAndType{OGT, TINT32}: ssa.OpGreater32, 1178 opAndType{OGT, TUINT32}: ssa.OpGreater32U, 1179 opAndType{OGT, TINT64}: ssa.OpGreater64, 1180 opAndType{OGT, TUINT64}: ssa.OpGreater64U, 1181 opAndType{OGT, TFLOAT64}: ssa.OpGreater64F, 1182 opAndType{OGT, TFLOAT32}: ssa.OpGreater32F, 1183 1184 opAndType{OLE, TINT8}: ssa.OpLeq8, 1185 opAndType{OLE, TUINT8}: ssa.OpLeq8U, 1186 opAndType{OLE, TINT16}: ssa.OpLeq16, 1187 opAndType{OLE, TUINT16}: ssa.OpLeq16U, 1188 opAndType{OLE, TINT32}: ssa.OpLeq32, 1189 opAndType{OLE, TUINT32}: ssa.OpLeq32U, 1190 opAndType{OLE, TINT64}: ssa.OpLeq64, 1191 opAndType{OLE, TUINT64}: ssa.OpLeq64U, 1192 opAndType{OLE, TFLOAT64}: ssa.OpLeq64F, 1193 opAndType{OLE, TFLOAT32}: ssa.OpLeq32F, 1194 1195 opAndType{OGE, TINT8}: ssa.OpGeq8, 1196 opAndType{OGE, TUINT8}: ssa.OpGeq8U, 1197 opAndType{OGE, TINT16}: ssa.OpGeq16, 1198 opAndType{OGE, TUINT16}: ssa.OpGeq16U, 1199 opAndType{OGE, TINT32}: ssa.OpGeq32, 1200 opAndType{OGE, TUINT32}: ssa.OpGeq32U, 1201 opAndType{OGE, TINT64}: ssa.OpGeq64, 1202 opAndType{OGE, TUINT64}: ssa.OpGeq64U, 1203 opAndType{OGE, TFLOAT64}: ssa.OpGeq64F, 1204 opAndType{OGE, TFLOAT32}: ssa.OpGeq32F, 1205 } 1206 1207 func (s *state) concreteEtype(t *Type) EType { 1208 e := t.Etype 1209 switch e { 1210 default: 1211 return e 1212 case TINT: 1213 if s.config.IntSize == 8 { 1214 return TINT64 1215 } 1216 return TINT32 1217 case TUINT: 1218 if s.config.IntSize == 8 { 1219 return TUINT64 1220 } 1221 return TUINT32 1222 case TUINTPTR: 1223 if s.config.PtrSize == 8 { 1224 return TUINT64 1225 } 1226 return TUINT32 1227 } 1228 } 1229 1230 func (s *state) ssaOp(op Op, t *Type) ssa.Op { 1231 etype := s.concreteEtype(t) 1232 x, ok := opToSSA[opAndType{op, etype}] 1233 if !ok { 1234 s.Fatalf("unhandled binary op %v %s", op, etype) 1235 } 1236 return x 1237 } 1238 1239 func floatForComplex(t *Type) *Type { 1240 if t.Size() == 8 { 1241 return Types[TFLOAT32] 1242 } else { 1243 return Types[TFLOAT64] 1244 } 1245 } 1246 1247 type opAndTwoTypes struct { 1248 op Op 1249 etype1 EType 1250 etype2 EType 1251 } 1252 1253 type twoTypes struct { 1254 etype1 EType 1255 etype2 EType 1256 } 1257 1258 type twoOpsAndType struct { 1259 op1 ssa.Op 1260 op2 ssa.Op 1261 intermediateType EType 1262 } 1263 1264 var fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1265 1266 twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32}, 1267 twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32}, 1268 twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32}, 1269 twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64}, 1270 1271 twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32}, 1272 twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32}, 1273 twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32}, 1274 twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64}, 1275 1276 twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1277 twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1278 twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32}, 1279 twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64}, 1280 1281 twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1282 twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1283 twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32}, 1284 twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64}, 1285 // unsigned 1286 twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32}, 1287 twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32}, 1288 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned 1289 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead 1290 1291 twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32}, 1292 twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32}, 1293 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned 1294 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead 1295 1296 twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1297 twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1298 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1299 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead 1300 1301 twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1302 twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1303 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1304 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead 1305 1306 // float 1307 twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32}, 1308 twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT64}, 1309 twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT32}, 1310 twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64}, 1311 } 1312 1313 // this map is used only for 32-bit arch, and only includes the difference 1314 // on 32-bit arch, don't use int64<->float conversion for uint32 1315 var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{ 1316 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32}, 1317 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32}, 1318 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32}, 1319 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32}, 1320 } 1321 1322 // uint64<->float conversions, only on machines that have intructions for that 1323 var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1324 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64}, 1325 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64}, 1326 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64}, 1327 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64}, 1328 } 1329 1330 var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{ 1331 opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8, 1332 opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8, 1333 opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16, 1334 opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16, 1335 opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32, 1336 opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32, 1337 opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64, 1338 opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64, 1339 1340 opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8, 1341 opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8, 1342 opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16, 1343 opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16, 1344 opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32, 1345 opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32, 1346 opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64, 1347 opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64, 1348 1349 opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8, 1350 opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8, 1351 opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16, 1352 opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16, 1353 opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32, 1354 opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32, 1355 opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64, 1356 opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64, 1357 1358 opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8, 1359 opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8, 1360 opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16, 1361 opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16, 1362 opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32, 1363 opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32, 1364 opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64, 1365 opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64, 1366 1367 opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8, 1368 opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8, 1369 opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16, 1370 opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16, 1371 opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32, 1372 opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32, 1373 opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64, 1374 opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64, 1375 1376 opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8, 1377 opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8, 1378 opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16, 1379 opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16, 1380 opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32, 1381 opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32, 1382 opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64, 1383 opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64, 1384 1385 opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8, 1386 opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8, 1387 opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16, 1388 opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16, 1389 opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32, 1390 opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32, 1391 opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64, 1392 opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64, 1393 1394 opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8, 1395 opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8, 1396 opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16, 1397 opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16, 1398 opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32, 1399 opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32, 1400 opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64, 1401 opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64, 1402 } 1403 1404 func (s *state) ssaShiftOp(op Op, t *Type, u *Type) ssa.Op { 1405 etype1 := s.concreteEtype(t) 1406 etype2 := s.concreteEtype(u) 1407 x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}] 1408 if !ok { 1409 s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2) 1410 } 1411 return x 1412 } 1413 1414 func (s *state) ssaRotateOp(op Op, t *Type) ssa.Op { 1415 etype1 := s.concreteEtype(t) 1416 x, ok := opToSSA[opAndType{op, etype1}] 1417 if !ok { 1418 s.Fatalf("unhandled rotate op %v etype=%s", op, etype1) 1419 } 1420 return x 1421 } 1422 1423 // expr converts the expression n to ssa, adds it to s and returns the ssa result. 1424 func (s *state) expr(n *Node) *ssa.Value { 1425 if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) { 1426 // ONAMEs and named OLITERALs have the line number 1427 // of the decl, not the use. See issue 14742. 1428 s.pushLine(n.Pos) 1429 defer s.popLine() 1430 } 1431 1432 s.stmtList(n.Ninit) 1433 switch n.Op { 1434 case OARRAYBYTESTRTMP: 1435 slice := s.expr(n.Left) 1436 ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), slice) 1437 len := s.newValue1(ssa.OpSliceLen, Types[TINT], slice) 1438 return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) 1439 case OSTRARRAYBYTETMP: 1440 str := s.expr(n.Left) 1441 ptr := s.newValue1(ssa.OpStringPtr, ptrto(Types[TUINT8]), str) 1442 len := s.newValue1(ssa.OpStringLen, Types[TINT], str) 1443 return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len) 1444 case OCFUNC: 1445 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: n.Left.Sym}) 1446 return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb) 1447 case ONAME: 1448 if n.Class == PFUNC { 1449 // "value" of a function is the address of the function's closure 1450 sym := funcsym(n.Sym) 1451 aux := &ssa.ExternSymbol{Typ: n.Type, Sym: sym} 1452 return s.entryNewValue1A(ssa.OpAddr, ptrto(n.Type), aux, s.sb) 1453 } 1454 if s.canSSA(n) { 1455 return s.variable(n, n.Type) 1456 } 1457 addr, _ := s.addr(n, false) 1458 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1459 case OCLOSUREVAR: 1460 addr, _ := s.addr(n, false) 1461 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1462 case OLITERAL: 1463 switch u := n.Val().U.(type) { 1464 case *Mpint: 1465 i := u.Int64() 1466 switch n.Type.Size() { 1467 case 1: 1468 return s.constInt8(n.Type, int8(i)) 1469 case 2: 1470 return s.constInt16(n.Type, int16(i)) 1471 case 4: 1472 return s.constInt32(n.Type, int32(i)) 1473 case 8: 1474 return s.constInt64(n.Type, i) 1475 default: 1476 s.Fatalf("bad integer size %d", n.Type.Size()) 1477 return nil 1478 } 1479 case string: 1480 if u == "" { 1481 return s.constEmptyString(n.Type) 1482 } 1483 return s.entryNewValue0A(ssa.OpConstString, n.Type, u) 1484 case bool: 1485 return s.constBool(u) 1486 case *NilVal: 1487 t := n.Type 1488 switch { 1489 case t.IsSlice(): 1490 return s.constSlice(t) 1491 case t.IsInterface(): 1492 return s.constInterface(t) 1493 default: 1494 return s.constNil(t) 1495 } 1496 case *Mpflt: 1497 switch n.Type.Size() { 1498 case 4: 1499 return s.constFloat32(n.Type, u.Float32()) 1500 case 8: 1501 return s.constFloat64(n.Type, u.Float64()) 1502 default: 1503 s.Fatalf("bad float size %d", n.Type.Size()) 1504 return nil 1505 } 1506 case *Mpcplx: 1507 r := &u.Real 1508 i := &u.Imag 1509 switch n.Type.Size() { 1510 case 8: 1511 pt := Types[TFLOAT32] 1512 return s.newValue2(ssa.OpComplexMake, n.Type, 1513 s.constFloat32(pt, r.Float32()), 1514 s.constFloat32(pt, i.Float32())) 1515 case 16: 1516 pt := Types[TFLOAT64] 1517 return s.newValue2(ssa.OpComplexMake, n.Type, 1518 s.constFloat64(pt, r.Float64()), 1519 s.constFloat64(pt, i.Float64())) 1520 default: 1521 s.Fatalf("bad float size %d", n.Type.Size()) 1522 return nil 1523 } 1524 1525 default: 1526 s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype()) 1527 return nil 1528 } 1529 case OCONVNOP: 1530 to := n.Type 1531 from := n.Left.Type 1532 1533 // Assume everything will work out, so set up our return value. 1534 // Anything interesting that happens from here is a fatal. 1535 x := s.expr(n.Left) 1536 1537 // Special case for not confusing GC and liveness. 1538 // We don't want pointers accidentally classified 1539 // as not-pointers or vice-versa because of copy 1540 // elision. 1541 if to.IsPtrShaped() != from.IsPtrShaped() { 1542 return s.newValue2(ssa.OpConvert, to, x, s.mem()) 1543 } 1544 1545 v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type 1546 1547 // CONVNOP closure 1548 if to.Etype == TFUNC && from.IsPtrShaped() { 1549 return v 1550 } 1551 1552 // named <--> unnamed type or typed <--> untyped const 1553 if from.Etype == to.Etype { 1554 return v 1555 } 1556 1557 // unsafe.Pointer <--> *T 1558 if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() { 1559 return v 1560 } 1561 1562 dowidth(from) 1563 dowidth(to) 1564 if from.Width != to.Width { 1565 s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width) 1566 return nil 1567 } 1568 if etypesign(from.Etype) != etypesign(to.Etype) { 1569 s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype) 1570 return nil 1571 } 1572 1573 if instrumenting { 1574 // These appear to be fine, but they fail the 1575 // integer constraint below, so okay them here. 1576 // Sample non-integer conversion: map[string]string -> *uint8 1577 return v 1578 } 1579 1580 if etypesign(from.Etype) == 0 { 1581 s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to) 1582 return nil 1583 } 1584 1585 // integer, same width, same sign 1586 return v 1587 1588 case OCONV: 1589 x := s.expr(n.Left) 1590 ft := n.Left.Type // from type 1591 tt := n.Type // to type 1592 if ft.IsBoolean() && tt.IsKind(TUINT8) { 1593 // Bool -> uint8 is generated internally when indexing into runtime.staticbyte. 1594 return s.newValue1(ssa.OpCopy, n.Type, x) 1595 } 1596 if ft.IsInteger() && tt.IsInteger() { 1597 var op ssa.Op 1598 if tt.Size() == ft.Size() { 1599 op = ssa.OpCopy 1600 } else if tt.Size() < ft.Size() { 1601 // truncation 1602 switch 10*ft.Size() + tt.Size() { 1603 case 21: 1604 op = ssa.OpTrunc16to8 1605 case 41: 1606 op = ssa.OpTrunc32to8 1607 case 42: 1608 op = ssa.OpTrunc32to16 1609 case 81: 1610 op = ssa.OpTrunc64to8 1611 case 82: 1612 op = ssa.OpTrunc64to16 1613 case 84: 1614 op = ssa.OpTrunc64to32 1615 default: 1616 s.Fatalf("weird integer truncation %v -> %v", ft, tt) 1617 } 1618 } else if ft.IsSigned() { 1619 // sign extension 1620 switch 10*ft.Size() + tt.Size() { 1621 case 12: 1622 op = ssa.OpSignExt8to16 1623 case 14: 1624 op = ssa.OpSignExt8to32 1625 case 18: 1626 op = ssa.OpSignExt8to64 1627 case 24: 1628 op = ssa.OpSignExt16to32 1629 case 28: 1630 op = ssa.OpSignExt16to64 1631 case 48: 1632 op = ssa.OpSignExt32to64 1633 default: 1634 s.Fatalf("bad integer sign extension %v -> %v", ft, tt) 1635 } 1636 } else { 1637 // zero extension 1638 switch 10*ft.Size() + tt.Size() { 1639 case 12: 1640 op = ssa.OpZeroExt8to16 1641 case 14: 1642 op = ssa.OpZeroExt8to32 1643 case 18: 1644 op = ssa.OpZeroExt8to64 1645 case 24: 1646 op = ssa.OpZeroExt16to32 1647 case 28: 1648 op = ssa.OpZeroExt16to64 1649 case 48: 1650 op = ssa.OpZeroExt32to64 1651 default: 1652 s.Fatalf("weird integer sign extension %v -> %v", ft, tt) 1653 } 1654 } 1655 return s.newValue1(op, n.Type, x) 1656 } 1657 1658 if ft.IsFloat() || tt.IsFloat() { 1659 conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}] 1660 if s.config.IntSize == 4 && Thearch.LinkArch.Name != "amd64p32" && Thearch.LinkArch.Family != sys.MIPS { 1661 if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { 1662 conv = conv1 1663 } 1664 } 1665 if Thearch.LinkArch.Name == "arm64" { 1666 if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { 1667 conv = conv1 1668 } 1669 } 1670 1671 if Thearch.LinkArch.Family == sys.MIPS { 1672 if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() { 1673 // tt is float32 or float64, and ft is also unsigned 1674 if tt.Size() == 4 { 1675 return s.uint32Tofloat32(n, x, ft, tt) 1676 } 1677 if tt.Size() == 8 { 1678 return s.uint32Tofloat64(n, x, ft, tt) 1679 } 1680 } else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() { 1681 // ft is float32 or float64, and tt is unsigned integer 1682 if ft.Size() == 4 { 1683 return s.float32ToUint32(n, x, ft, tt) 1684 } 1685 if ft.Size() == 8 { 1686 return s.float64ToUint32(n, x, ft, tt) 1687 } 1688 } 1689 } 1690 1691 if !ok { 1692 s.Fatalf("weird float conversion %v -> %v", ft, tt) 1693 } 1694 op1, op2, it := conv.op1, conv.op2, conv.intermediateType 1695 1696 if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid { 1697 // normal case, not tripping over unsigned 64 1698 if op1 == ssa.OpCopy { 1699 if op2 == ssa.OpCopy { 1700 return x 1701 } 1702 return s.newValue1(op2, n.Type, x) 1703 } 1704 if op2 == ssa.OpCopy { 1705 return s.newValue1(op1, n.Type, x) 1706 } 1707 return s.newValue1(op2, n.Type, s.newValue1(op1, Types[it], x)) 1708 } 1709 // Tricky 64-bit unsigned cases. 1710 if ft.IsInteger() { 1711 // tt is float32 or float64, and ft is also unsigned 1712 if tt.Size() == 4 { 1713 return s.uint64Tofloat32(n, x, ft, tt) 1714 } 1715 if tt.Size() == 8 { 1716 return s.uint64Tofloat64(n, x, ft, tt) 1717 } 1718 s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt) 1719 } 1720 // ft is float32 or float64, and tt is unsigned integer 1721 if ft.Size() == 4 { 1722 return s.float32ToUint64(n, x, ft, tt) 1723 } 1724 if ft.Size() == 8 { 1725 return s.float64ToUint64(n, x, ft, tt) 1726 } 1727 s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt) 1728 return nil 1729 } 1730 1731 if ft.IsComplex() && tt.IsComplex() { 1732 var op ssa.Op 1733 if ft.Size() == tt.Size() { 1734 op = ssa.OpCopy 1735 } else if ft.Size() == 8 && tt.Size() == 16 { 1736 op = ssa.OpCvt32Fto64F 1737 } else if ft.Size() == 16 && tt.Size() == 8 { 1738 op = ssa.OpCvt64Fto32F 1739 } else { 1740 s.Fatalf("weird complex conversion %v -> %v", ft, tt) 1741 } 1742 ftp := floatForComplex(ft) 1743 ttp := floatForComplex(tt) 1744 return s.newValue2(ssa.OpComplexMake, tt, 1745 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)), 1746 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x))) 1747 } 1748 1749 s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype) 1750 return nil 1751 1752 case ODOTTYPE: 1753 res, _ := s.dottype(n, false) 1754 return res 1755 1756 // binary ops 1757 case OLT, OEQ, ONE, OLE, OGE, OGT: 1758 a := s.expr(n.Left) 1759 b := s.expr(n.Right) 1760 if n.Left.Type.IsComplex() { 1761 pt := floatForComplex(n.Left.Type) 1762 op := s.ssaOp(OEQ, pt) 1763 r := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)) 1764 i := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)) 1765 c := s.newValue2(ssa.OpAndB, Types[TBOOL], r, i) 1766 switch n.Op { 1767 case OEQ: 1768 return c 1769 case ONE: 1770 return s.newValue1(ssa.OpNot, Types[TBOOL], c) 1771 default: 1772 s.Fatalf("ordered complex compare %v", n.Op) 1773 } 1774 } 1775 return s.newValue2(s.ssaOp(n.Op, n.Left.Type), Types[TBOOL], a, b) 1776 case OMUL: 1777 a := s.expr(n.Left) 1778 b := s.expr(n.Right) 1779 if n.Type.IsComplex() { 1780 mulop := ssa.OpMul64F 1781 addop := ssa.OpAdd64F 1782 subop := ssa.OpSub64F 1783 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1784 wt := Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 1785 1786 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1787 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1788 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1789 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1790 1791 if pt != wt { // Widen for calculation 1792 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal) 1793 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal) 1794 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag) 1795 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag) 1796 } 1797 1798 xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag)) 1799 ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal)) 1800 1801 if pt != wt { // Narrow to store back 1802 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal) 1803 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag) 1804 } 1805 1806 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1807 } 1808 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1809 1810 case ODIV: 1811 a := s.expr(n.Left) 1812 b := s.expr(n.Right) 1813 if n.Type.IsComplex() { 1814 // TODO this is not executed because the front-end substitutes a runtime call. 1815 // That probably ought to change; with modest optimization the widen/narrow 1816 // conversions could all be elided in larger expression trees. 1817 mulop := ssa.OpMul64F 1818 addop := ssa.OpAdd64F 1819 subop := ssa.OpSub64F 1820 divop := ssa.OpDiv64F 1821 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1822 wt := Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 1823 1824 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1825 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1826 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1827 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1828 1829 if pt != wt { // Widen for calculation 1830 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal) 1831 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal) 1832 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag) 1833 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag) 1834 } 1835 1836 denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag)) 1837 xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag)) 1838 ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag)) 1839 1840 // TODO not sure if this is best done in wide precision or narrow 1841 // Double-rounding might be an issue. 1842 // Note that the pre-SSA implementation does the entire calculation 1843 // in wide format, so wide is compatible. 1844 xreal = s.newValue2(divop, wt, xreal, denom) 1845 ximag = s.newValue2(divop, wt, ximag, denom) 1846 1847 if pt != wt { // Narrow to store back 1848 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal) 1849 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag) 1850 } 1851 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1852 } 1853 if n.Type.IsFloat() { 1854 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1855 } 1856 return s.intDivide(n, a, b) 1857 case OMOD: 1858 a := s.expr(n.Left) 1859 b := s.expr(n.Right) 1860 return s.intDivide(n, a, b) 1861 case OADD, OSUB: 1862 a := s.expr(n.Left) 1863 b := s.expr(n.Right) 1864 if n.Type.IsComplex() { 1865 pt := floatForComplex(n.Type) 1866 op := s.ssaOp(n.Op, pt) 1867 return s.newValue2(ssa.OpComplexMake, n.Type, 1868 s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)), 1869 s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))) 1870 } 1871 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1872 case OAND, OOR, OHMUL, OXOR: 1873 a := s.expr(n.Left) 1874 b := s.expr(n.Right) 1875 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1876 case OLSH, ORSH: 1877 a := s.expr(n.Left) 1878 b := s.expr(n.Right) 1879 return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b) 1880 case OANDAND, OOROR: 1881 // To implement OANDAND (and OOROR), we introduce a 1882 // new temporary variable to hold the result. The 1883 // variable is associated with the OANDAND node in the 1884 // s.vars table (normally variables are only 1885 // associated with ONAME nodes). We convert 1886 // A && B 1887 // to 1888 // var = A 1889 // if var { 1890 // var = B 1891 // } 1892 // Using var in the subsequent block introduces the 1893 // necessary phi variable. 1894 el := s.expr(n.Left) 1895 s.vars[n] = el 1896 1897 b := s.endBlock() 1898 b.Kind = ssa.BlockIf 1899 b.SetControl(el) 1900 // In theory, we should set b.Likely here based on context. 1901 // However, gc only gives us likeliness hints 1902 // in a single place, for plain OIF statements, 1903 // and passing around context is finnicky, so don't bother for now. 1904 1905 bRight := s.f.NewBlock(ssa.BlockPlain) 1906 bResult := s.f.NewBlock(ssa.BlockPlain) 1907 if n.Op == OANDAND { 1908 b.AddEdgeTo(bRight) 1909 b.AddEdgeTo(bResult) 1910 } else if n.Op == OOROR { 1911 b.AddEdgeTo(bResult) 1912 b.AddEdgeTo(bRight) 1913 } 1914 1915 s.startBlock(bRight) 1916 er := s.expr(n.Right) 1917 s.vars[n] = er 1918 1919 b = s.endBlock() 1920 b.AddEdgeTo(bResult) 1921 1922 s.startBlock(bResult) 1923 return s.variable(n, Types[TBOOL]) 1924 case OCOMPLEX: 1925 r := s.expr(n.Left) 1926 i := s.expr(n.Right) 1927 return s.newValue2(ssa.OpComplexMake, n.Type, r, i) 1928 1929 // unary ops 1930 case OMINUS: 1931 a := s.expr(n.Left) 1932 if n.Type.IsComplex() { 1933 tp := floatForComplex(n.Type) 1934 negop := s.ssaOp(n.Op, tp) 1935 return s.newValue2(ssa.OpComplexMake, n.Type, 1936 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)), 1937 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a))) 1938 } 1939 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 1940 case ONOT, OCOM: 1941 a := s.expr(n.Left) 1942 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 1943 case OIMAG, OREAL: 1944 a := s.expr(n.Left) 1945 return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a) 1946 case OPLUS: 1947 return s.expr(n.Left) 1948 1949 case OADDR: 1950 a, _ := s.addr(n.Left, n.Bounded) 1951 // Note we know the volatile result is false because you can't write &f() in Go. 1952 return a 1953 1954 case OINDREGSP: 1955 addr := s.entryNewValue1I(ssa.OpOffPtr, ptrto(n.Type), n.Xoffset, s.sp) 1956 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1957 1958 case OIND: 1959 p := s.exprPtr(n.Left, false, n.Pos) 1960 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1961 1962 case ODOT: 1963 t := n.Left.Type 1964 if canSSAType(t) { 1965 v := s.expr(n.Left) 1966 return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v) 1967 } 1968 p, _ := s.addr(n, false) 1969 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1970 1971 case ODOTPTR: 1972 p := s.exprPtr(n.Left, false, n.Pos) 1973 p = s.newValue1I(ssa.OpOffPtr, p.Type, n.Xoffset, p) 1974 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1975 1976 case OINDEX: 1977 switch { 1978 case n.Left.Type.IsString(): 1979 if n.Bounded && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) { 1980 // Replace "abc"[1] with 'b'. 1981 // Delayed until now because "abc"[1] is not an ideal constant. 1982 // See test/fixedbugs/issue11370.go. 1983 return s.newValue0I(ssa.OpConst8, Types[TUINT8], int64(int8(n.Left.Val().U.(string)[n.Right.Int64()]))) 1984 } 1985 a := s.expr(n.Left) 1986 i := s.expr(n.Right) 1987 i = s.extendIndex(i, panicindex) 1988 if !n.Bounded { 1989 len := s.newValue1(ssa.OpStringLen, Types[TINT], a) 1990 s.boundsCheck(i, len) 1991 } 1992 ptrtyp := ptrto(Types[TUINT8]) 1993 ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a) 1994 if Isconst(n.Right, CTINT) { 1995 ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr) 1996 } else { 1997 ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i) 1998 } 1999 return s.newValue2(ssa.OpLoad, Types[TUINT8], ptr, s.mem()) 2000 case n.Left.Type.IsSlice(): 2001 p, _ := s.addr(n, false) 2002 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 2003 case n.Left.Type.IsArray(): 2004 if bound := n.Left.Type.NumElem(); bound <= 1 { 2005 // SSA can handle arrays of length at most 1. 2006 a := s.expr(n.Left) 2007 i := s.expr(n.Right) 2008 if bound == 0 { 2009 // Bounds check will never succeed. Might as well 2010 // use constants for the bounds check. 2011 z := s.constInt(Types[TINT], 0) 2012 s.boundsCheck(z, z) 2013 // The return value won't be live, return junk. 2014 return s.newValue0(ssa.OpUnknown, n.Type) 2015 } 2016 i = s.extendIndex(i, panicindex) 2017 s.boundsCheck(i, s.constInt(Types[TINT], bound)) 2018 return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a) 2019 } 2020 p, _ := s.addr(n, false) 2021 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 2022 default: 2023 s.Fatalf("bad type for index %v", n.Left.Type) 2024 return nil 2025 } 2026 2027 case OLEN, OCAP: 2028 switch { 2029 case n.Left.Type.IsSlice(): 2030 op := ssa.OpSliceLen 2031 if n.Op == OCAP { 2032 op = ssa.OpSliceCap 2033 } 2034 return s.newValue1(op, Types[TINT], s.expr(n.Left)) 2035 case n.Left.Type.IsString(): // string; not reachable for OCAP 2036 return s.newValue1(ssa.OpStringLen, Types[TINT], s.expr(n.Left)) 2037 case n.Left.Type.IsMap(), n.Left.Type.IsChan(): 2038 return s.referenceTypeBuiltin(n, s.expr(n.Left)) 2039 default: // array 2040 return s.constInt(Types[TINT], n.Left.Type.NumElem()) 2041 } 2042 2043 case OSPTR: 2044 a := s.expr(n.Left) 2045 if n.Left.Type.IsSlice() { 2046 return s.newValue1(ssa.OpSlicePtr, n.Type, a) 2047 } else { 2048 return s.newValue1(ssa.OpStringPtr, n.Type, a) 2049 } 2050 2051 case OITAB: 2052 a := s.expr(n.Left) 2053 return s.newValue1(ssa.OpITab, n.Type, a) 2054 2055 case OIDATA: 2056 a := s.expr(n.Left) 2057 return s.newValue1(ssa.OpIData, n.Type, a) 2058 2059 case OEFACE: 2060 tab := s.expr(n.Left) 2061 data := s.expr(n.Right) 2062 return s.newValue2(ssa.OpIMake, n.Type, tab, data) 2063 2064 case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR: 2065 v := s.expr(n.Left) 2066 var i, j, k *ssa.Value 2067 low, high, max := n.SliceBounds() 2068 if low != nil { 2069 i = s.extendIndex(s.expr(low), panicslice) 2070 } 2071 if high != nil { 2072 j = s.extendIndex(s.expr(high), panicslice) 2073 } 2074 if max != nil { 2075 k = s.extendIndex(s.expr(max), panicslice) 2076 } 2077 p, l, c := s.slice(n.Left.Type, v, i, j, k) 2078 return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c) 2079 2080 case OSLICESTR: 2081 v := s.expr(n.Left) 2082 var i, j *ssa.Value 2083 low, high, _ := n.SliceBounds() 2084 if low != nil { 2085 i = s.extendIndex(s.expr(low), panicslice) 2086 } 2087 if high != nil { 2088 j = s.extendIndex(s.expr(high), panicslice) 2089 } 2090 p, l, _ := s.slice(n.Left.Type, v, i, j, nil) 2091 return s.newValue2(ssa.OpStringMake, n.Type, p, l) 2092 2093 case OCALLFUNC: 2094 if isIntrinsicCall(n) { 2095 return s.intrinsicCall(n) 2096 } 2097 fallthrough 2098 2099 case OCALLINTER, OCALLMETH: 2100 a := s.call(n, callNormal) 2101 return s.newValue2(ssa.OpLoad, n.Type, a, s.mem()) 2102 2103 case OGETG: 2104 return s.newValue1(ssa.OpGetG, n.Type, s.mem()) 2105 2106 case OAPPEND: 2107 return s.append(n, false) 2108 2109 default: 2110 s.Fatalf("unhandled expr %v", n.Op) 2111 return nil 2112 } 2113 } 2114 2115 // append converts an OAPPEND node to SSA. 2116 // If inplace is false, it converts the OAPPEND expression n to an ssa.Value, 2117 // adds it to s, and returns the Value. 2118 // If inplace is true, it writes the result of the OAPPEND expression n 2119 // back to the slice being appended to, and returns nil. 2120 // inplace MUST be set to false if the slice can be SSA'd. 2121 func (s *state) append(n *Node, inplace bool) *ssa.Value { 2122 // If inplace is false, process as expression "append(s, e1, e2, e3)": 2123 // 2124 // ptr, len, cap := s 2125 // newlen := len + 3 2126 // if newlen > cap { 2127 // ptr, len, cap = growslice(s, newlen) 2128 // newlen = len + 3 // recalculate to avoid a spill 2129 // } 2130 // // with write barriers, if needed: 2131 // *(ptr+len) = e1 2132 // *(ptr+len+1) = e2 2133 // *(ptr+len+2) = e3 2134 // return makeslice(ptr, newlen, cap) 2135 // 2136 // 2137 // If inplace is true, process as statement "s = append(s, e1, e2, e3)": 2138 // 2139 // a := &s 2140 // ptr, len, cap := s 2141 // newlen := len + 3 2142 // if newlen > cap { 2143 // newptr, len, newcap = growslice(ptr, len, cap, newlen) 2144 // vardef(a) // if necessary, advise liveness we are writing a new a 2145 // *a.cap = newcap // write before ptr to avoid a spill 2146 // *a.ptr = newptr // with write barrier 2147 // } 2148 // newlen = len + 3 // recalculate to avoid a spill 2149 // *a.len = newlen 2150 // // with write barriers, if needed: 2151 // *(ptr+len) = e1 2152 // *(ptr+len+1) = e2 2153 // *(ptr+len+2) = e3 2154 2155 et := n.Type.Elem() 2156 pt := ptrto(et) 2157 2158 // Evaluate slice 2159 sn := n.List.First() // the slice node is the first in the list 2160 2161 var slice, addr *ssa.Value 2162 if inplace { 2163 addr, _ = s.addr(sn, false) 2164 slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 2165 } else { 2166 slice = s.expr(sn) 2167 } 2168 2169 // Allocate new blocks 2170 grow := s.f.NewBlock(ssa.BlockPlain) 2171 assign := s.f.NewBlock(ssa.BlockPlain) 2172 2173 // Decide if we need to grow 2174 nargs := int64(n.List.Len() - 1) 2175 p := s.newValue1(ssa.OpSlicePtr, pt, slice) 2176 l := s.newValue1(ssa.OpSliceLen, Types[TINT], slice) 2177 c := s.newValue1(ssa.OpSliceCap, Types[TINT], slice) 2178 nl := s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs)) 2179 2180 cmp := s.newValue2(s.ssaOp(OGT, Types[TINT]), Types[TBOOL], nl, c) 2181 s.vars[&ptrVar] = p 2182 2183 if !inplace { 2184 s.vars[&newlenVar] = nl 2185 s.vars[&capVar] = c 2186 } else { 2187 s.vars[&lenVar] = l 2188 } 2189 2190 b := s.endBlock() 2191 b.Kind = ssa.BlockIf 2192 b.Likely = ssa.BranchUnlikely 2193 b.SetControl(cmp) 2194 b.AddEdgeTo(grow) 2195 b.AddEdgeTo(assign) 2196 2197 // Call growslice 2198 s.startBlock(grow) 2199 taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(n.Type.Elem())}, s.sb) 2200 2201 r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl) 2202 2203 if inplace { 2204 if sn.Op == ONAME { 2205 // Tell liveness we're about to build a new slice 2206 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, sn, s.mem()) 2207 } 2208 capaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(array_cap), addr) 2209 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capaddr, r[2], s.mem()) 2210 if ssa.IsStackAddr(addr) { 2211 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, pt.Size(), addr, r[0], s.mem()) 2212 } else { 2213 s.insertWBstore(pt, addr, r[0], n.Pos, 0) 2214 } 2215 // load the value we just stored to avoid having to spill it 2216 s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem()) 2217 s.vars[&lenVar] = r[1] // avoid a spill in the fast path 2218 } else { 2219 s.vars[&ptrVar] = r[0] 2220 s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], r[1], s.constInt(Types[TINT], nargs)) 2221 s.vars[&capVar] = r[2] 2222 } 2223 2224 b = s.endBlock() 2225 b.AddEdgeTo(assign) 2226 2227 // assign new elements to slots 2228 s.startBlock(assign) 2229 2230 if inplace { 2231 l = s.variable(&lenVar, Types[TINT]) // generates phi for len 2232 nl = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs)) 2233 lenaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(array_nel), addr) 2234 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenaddr, nl, s.mem()) 2235 } 2236 2237 // Evaluate args 2238 type argRec struct { 2239 // if store is true, we're appending the value v. If false, we're appending the 2240 // value at *v. If store==false, isVolatile reports whether the source 2241 // is in the outargs section of the stack frame. 2242 v *ssa.Value 2243 store bool 2244 isVolatile bool 2245 } 2246 args := make([]argRec, 0, nargs) 2247 for _, n := range n.List.Slice()[1:] { 2248 if canSSAType(n.Type) { 2249 args = append(args, argRec{v: s.expr(n), store: true}) 2250 } else { 2251 v, isVolatile := s.addr(n, false) 2252 args = append(args, argRec{v: v, isVolatile: isVolatile}) 2253 } 2254 } 2255 2256 p = s.variable(&ptrVar, pt) // generates phi for ptr 2257 if !inplace { 2258 nl = s.variable(&newlenVar, Types[TINT]) // generates phi for nl 2259 c = s.variable(&capVar, Types[TINT]) // generates phi for cap 2260 } 2261 p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l) 2262 // TODO: just one write barrier call for all of these writes? 2263 // TODO: maybe just one writeBarrier.enabled check? 2264 for i, arg := range args { 2265 addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(Types[TINT], int64(i))) 2266 if arg.store { 2267 if haspointers(et) { 2268 s.insertWBstore(et, addr, arg.v, n.Pos, 0) 2269 } else { 2270 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, et.Size(), addr, arg.v, s.mem()) 2271 } 2272 } else { 2273 if haspointers(et) { 2274 s.insertWBmove(et, addr, arg.v, n.Pos, arg.isVolatile) 2275 } else { 2276 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, sizeAlignAuxInt(et), addr, arg.v, s.mem()) 2277 } 2278 } 2279 } 2280 2281 delete(s.vars, &ptrVar) 2282 if inplace { 2283 delete(s.vars, &lenVar) 2284 return nil 2285 } 2286 delete(s.vars, &newlenVar) 2287 delete(s.vars, &capVar) 2288 // make result 2289 return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c) 2290 } 2291 2292 // condBranch evaluates the boolean expression cond and branches to yes 2293 // if cond is true and no if cond is false. 2294 // This function is intended to handle && and || better than just calling 2295 // s.expr(cond) and branching on the result. 2296 func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) { 2297 if cond.Op == OANDAND { 2298 mid := s.f.NewBlock(ssa.BlockPlain) 2299 s.stmtList(cond.Ninit) 2300 s.condBranch(cond.Left, mid, no, max8(likely, 0)) 2301 s.startBlock(mid) 2302 s.condBranch(cond.Right, yes, no, likely) 2303 return 2304 // Note: if likely==1, then both recursive calls pass 1. 2305 // If likely==-1, then we don't have enough information to decide 2306 // whether the first branch is likely or not. So we pass 0 for 2307 // the likeliness of the first branch. 2308 // TODO: have the frontend give us branch prediction hints for 2309 // OANDAND and OOROR nodes (if it ever has such info). 2310 } 2311 if cond.Op == OOROR { 2312 mid := s.f.NewBlock(ssa.BlockPlain) 2313 s.stmtList(cond.Ninit) 2314 s.condBranch(cond.Left, yes, mid, min8(likely, 0)) 2315 s.startBlock(mid) 2316 s.condBranch(cond.Right, yes, no, likely) 2317 return 2318 // Note: if likely==-1, then both recursive calls pass -1. 2319 // If likely==1, then we don't have enough info to decide 2320 // the likelihood of the first branch. 2321 } 2322 if cond.Op == ONOT { 2323 s.stmtList(cond.Ninit) 2324 s.condBranch(cond.Left, no, yes, -likely) 2325 return 2326 } 2327 c := s.expr(cond) 2328 b := s.endBlock() 2329 b.Kind = ssa.BlockIf 2330 b.SetControl(c) 2331 b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness 2332 b.AddEdgeTo(yes) 2333 b.AddEdgeTo(no) 2334 } 2335 2336 type skipMask uint8 2337 2338 const ( 2339 skipPtr skipMask = 1 << iota 2340 skipLen 2341 skipCap 2342 ) 2343 2344 // assign does left = right. 2345 // Right has already been evaluated to ssa, left has not. 2346 // If deref is true, then we do left = *right instead (and right has already been nil-checked). 2347 // If deref is true and right == nil, just do left = 0. 2348 // If deref is true, rightIsVolatile reports whether right points to volatile (clobbered by a call) storage. 2349 // Include a write barrier if wb is true. 2350 // skip indicates assignments (at the top level) that can be avoided. 2351 func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, line src.XPos, skip skipMask, rightIsVolatile bool) { 2352 if left.Op == ONAME && isblank(left) { 2353 return 2354 } 2355 t := left.Type 2356 dowidth(t) 2357 if s.canSSA(left) { 2358 if deref { 2359 s.Fatalf("can SSA LHS %v but not RHS %s", left, right) 2360 } 2361 if left.Op == ODOT { 2362 // We're assigning to a field of an ssa-able value. 2363 // We need to build a new structure with the new value for the 2364 // field we're assigning and the old values for the other fields. 2365 // For instance: 2366 // type T struct {a, b, c int} 2367 // var T x 2368 // x.b = 5 2369 // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c} 2370 2371 // Grab information about the structure type. 2372 t := left.Left.Type 2373 nf := t.NumFields() 2374 idx := fieldIdx(left) 2375 2376 // Grab old value of structure. 2377 old := s.expr(left.Left) 2378 2379 // Make new structure. 2380 new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t) 2381 2382 // Add fields as args. 2383 for i := 0; i < nf; i++ { 2384 if i == idx { 2385 new.AddArg(right) 2386 } else { 2387 new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old)) 2388 } 2389 } 2390 2391 // Recursively assign the new value we've made to the base of the dot op. 2392 s.assign(left.Left, new, false, false, line, 0, rightIsVolatile) 2393 // TODO: do we need to update named values here? 2394 return 2395 } 2396 if left.Op == OINDEX && left.Left.Type.IsArray() { 2397 // We're assigning to an element of an ssa-able array. 2398 // a[i] = v 2399 t := left.Left.Type 2400 n := t.NumElem() 2401 2402 i := s.expr(left.Right) // index 2403 if n == 0 { 2404 // The bounds check must fail. Might as well 2405 // ignore the actual index and just use zeros. 2406 z := s.constInt(Types[TINT], 0) 2407 s.boundsCheck(z, z) 2408 return 2409 } 2410 if n != 1 { 2411 s.Fatalf("assigning to non-1-length array") 2412 } 2413 // Rewrite to a = [1]{v} 2414 i = s.extendIndex(i, panicindex) 2415 s.boundsCheck(i, s.constInt(Types[TINT], 1)) 2416 v := s.newValue1(ssa.OpArrayMake1, t, right) 2417 s.assign(left.Left, v, false, false, line, 0, rightIsVolatile) 2418 return 2419 } 2420 // Update variable assignment. 2421 s.vars[left] = right 2422 s.addNamedValue(left, right) 2423 return 2424 } 2425 // Left is not ssa-able. Compute its address. 2426 addr, _ := s.addr(left, false) 2427 if left.Op == ONAME && skip == 0 { 2428 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, left, s.mem()) 2429 } 2430 if deref { 2431 // Treat as a mem->mem move. 2432 if wb && !ssa.IsStackAddr(addr) { 2433 s.insertWBmove(t, addr, right, line, rightIsVolatile) 2434 return 2435 } 2436 if right == nil { 2437 s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, sizeAlignAuxInt(t), addr, s.mem()) 2438 return 2439 } 2440 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, sizeAlignAuxInt(t), addr, right, s.mem()) 2441 return 2442 } 2443 // Treat as a store. 2444 if wb && !ssa.IsStackAddr(addr) { 2445 if skip&skipPtr != 0 { 2446 // Special case: if we don't write back the pointers, don't bother 2447 // doing the write barrier check. 2448 s.storeTypeScalars(t, addr, right, skip) 2449 return 2450 } 2451 s.insertWBstore(t, addr, right, line, skip) 2452 return 2453 } 2454 if skip != 0 { 2455 if skip&skipPtr == 0 { 2456 s.storeTypePtrs(t, addr, right) 2457 } 2458 s.storeTypeScalars(t, addr, right, skip) 2459 return 2460 } 2461 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), addr, right, s.mem()) 2462 } 2463 2464 // zeroVal returns the zero value for type t. 2465 func (s *state) zeroVal(t *Type) *ssa.Value { 2466 switch { 2467 case t.IsInteger(): 2468 switch t.Size() { 2469 case 1: 2470 return s.constInt8(t, 0) 2471 case 2: 2472 return s.constInt16(t, 0) 2473 case 4: 2474 return s.constInt32(t, 0) 2475 case 8: 2476 return s.constInt64(t, 0) 2477 default: 2478 s.Fatalf("bad sized integer type %v", t) 2479 } 2480 case t.IsFloat(): 2481 switch t.Size() { 2482 case 4: 2483 return s.constFloat32(t, 0) 2484 case 8: 2485 return s.constFloat64(t, 0) 2486 default: 2487 s.Fatalf("bad sized float type %v", t) 2488 } 2489 case t.IsComplex(): 2490 switch t.Size() { 2491 case 8: 2492 z := s.constFloat32(Types[TFLOAT32], 0) 2493 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2494 case 16: 2495 z := s.constFloat64(Types[TFLOAT64], 0) 2496 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2497 default: 2498 s.Fatalf("bad sized complex type %v", t) 2499 } 2500 2501 case t.IsString(): 2502 return s.constEmptyString(t) 2503 case t.IsPtrShaped(): 2504 return s.constNil(t) 2505 case t.IsBoolean(): 2506 return s.constBool(false) 2507 case t.IsInterface(): 2508 return s.constInterface(t) 2509 case t.IsSlice(): 2510 return s.constSlice(t) 2511 case t.IsStruct(): 2512 n := t.NumFields() 2513 v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t) 2514 for i := 0; i < n; i++ { 2515 v.AddArg(s.zeroVal(t.FieldType(i).(*Type))) 2516 } 2517 return v 2518 case t.IsArray(): 2519 switch t.NumElem() { 2520 case 0: 2521 return s.entryNewValue0(ssa.OpArrayMake0, t) 2522 case 1: 2523 return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem())) 2524 } 2525 } 2526 s.Fatalf("zero for type %v not implemented", t) 2527 return nil 2528 } 2529 2530 type callKind int8 2531 2532 const ( 2533 callNormal callKind = iota 2534 callDefer 2535 callGo 2536 ) 2537 2538 // TODO: make this a field of a configuration object instead of a global. 2539 var intrinsics *intrinsicInfo 2540 2541 type intrinsicInfo struct { 2542 std map[intrinsicKey]intrinsicBuilder 2543 intSized map[sizedIntrinsicKey]intrinsicBuilder 2544 ptrSized map[sizedIntrinsicKey]intrinsicBuilder 2545 } 2546 2547 // An intrinsicBuilder converts a call node n into an ssa value that 2548 // implements that call as an intrinsic. args is a list of arguments to the func. 2549 type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value 2550 2551 type intrinsicKey struct { 2552 pkg string 2553 fn string 2554 } 2555 2556 type sizedIntrinsicKey struct { 2557 pkg string 2558 fn string 2559 size int 2560 } 2561 2562 // disableForInstrumenting returns nil when instrumenting, fn otherwise 2563 func disableForInstrumenting(fn intrinsicBuilder) intrinsicBuilder { 2564 if instrumenting { 2565 return nil 2566 } 2567 return fn 2568 } 2569 2570 // enableOnArch returns fn on given archs, nil otherwise 2571 func enableOnArch(fn intrinsicBuilder, archs ...sys.ArchFamily) intrinsicBuilder { 2572 if Thearch.LinkArch.InFamily(archs...) { 2573 return fn 2574 } 2575 return nil 2576 } 2577 2578 func intrinsicInit() { 2579 i := &intrinsicInfo{} 2580 intrinsics = i 2581 2582 // initial set of intrinsics. 2583 i.std = map[intrinsicKey]intrinsicBuilder{ 2584 /******** runtime ********/ 2585 intrinsicKey{"runtime", "slicebytetostringtmp"}: disableForInstrumenting(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2586 // Compiler frontend optimizations emit OARRAYBYTESTRTMP nodes 2587 // for the backend instead of slicebytetostringtmp calls 2588 // when not instrumenting. 2589 slice := args[0] 2590 ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), slice) 2591 len := s.newValue1(ssa.OpSliceLen, Types[TINT], slice) 2592 return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) 2593 }), 2594 intrinsicKey{"runtime", "KeepAlive"}: func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2595 data := s.newValue1(ssa.OpIData, ptrto(Types[TUINT8]), args[0]) 2596 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, data, s.mem()) 2597 return nil 2598 }, 2599 2600 /******** runtime/internal/sys ********/ 2601 intrinsicKey{"runtime/internal/sys", "Ctz32"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2602 return s.newValue1(ssa.OpCtz32, Types[TUINT32], args[0]) 2603 }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS), 2604 intrinsicKey{"runtime/internal/sys", "Ctz64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2605 return s.newValue1(ssa.OpCtz64, Types[TUINT64], args[0]) 2606 }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS), 2607 intrinsicKey{"runtime/internal/sys", "Bswap32"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2608 return s.newValue1(ssa.OpBswap32, Types[TUINT32], args[0]) 2609 }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X), 2610 intrinsicKey{"runtime/internal/sys", "Bswap64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2611 return s.newValue1(ssa.OpBswap64, Types[TUINT64], args[0]) 2612 }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X), 2613 2614 /******** runtime/internal/atomic ********/ 2615 intrinsicKey{"runtime/internal/atomic", "Load"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2616 v := s.newValue2(ssa.OpAtomicLoad32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], s.mem()) 2617 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2618 return s.newValue1(ssa.OpSelect0, Types[TUINT32], v) 2619 }, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS), 2620 intrinsicKey{"runtime/internal/atomic", "Load64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2621 v := s.newValue2(ssa.OpAtomicLoad64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], s.mem()) 2622 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2623 return s.newValue1(ssa.OpSelect0, Types[TUINT64], v) 2624 }, sys.AMD64, sys.ARM64, sys.S390X), 2625 intrinsicKey{"runtime/internal/atomic", "Loadp"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2626 v := s.newValue2(ssa.OpAtomicLoadPtr, ssa.MakeTuple(ptrto(Types[TUINT8]), ssa.TypeMem), args[0], s.mem()) 2627 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2628 return s.newValue1(ssa.OpSelect0, ptrto(Types[TUINT8]), v) 2629 }, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS), 2630 2631 intrinsicKey{"runtime/internal/atomic", "Store"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2632 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, ssa.TypeMem, args[0], args[1], s.mem()) 2633 return nil 2634 }, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS), 2635 intrinsicKey{"runtime/internal/atomic", "Store64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2636 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, ssa.TypeMem, args[0], args[1], s.mem()) 2637 return nil 2638 }, sys.AMD64, sys.ARM64, sys.S390X), 2639 intrinsicKey{"runtime/internal/atomic", "StorepNoWB"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2640 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, ssa.TypeMem, args[0], args[1], s.mem()) 2641 return nil 2642 }, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS), 2643 2644 intrinsicKey{"runtime/internal/atomic", "Xchg"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2645 v := s.newValue3(ssa.OpAtomicExchange32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem()) 2646 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2647 return s.newValue1(ssa.OpSelect0, Types[TUINT32], v) 2648 }, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS), 2649 intrinsicKey{"runtime/internal/atomic", "Xchg64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2650 v := s.newValue3(ssa.OpAtomicExchange64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem()) 2651 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2652 return s.newValue1(ssa.OpSelect0, Types[TUINT64], v) 2653 }, sys.AMD64, sys.ARM64, sys.S390X), 2654 2655 intrinsicKey{"runtime/internal/atomic", "Xadd"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2656 v := s.newValue3(ssa.OpAtomicAdd32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem()) 2657 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2658 return s.newValue1(ssa.OpSelect0, Types[TUINT32], v) 2659 }, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS), 2660 intrinsicKey{"runtime/internal/atomic", "Xadd64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2661 v := s.newValue3(ssa.OpAtomicAdd64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem()) 2662 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2663 return s.newValue1(ssa.OpSelect0, Types[TUINT64], v) 2664 }, sys.AMD64, sys.ARM64, sys.S390X), 2665 2666 intrinsicKey{"runtime/internal/atomic", "Cas"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2667 v := s.newValue4(ssa.OpAtomicCompareAndSwap32, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem()) 2668 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2669 return s.newValue1(ssa.OpSelect0, Types[TBOOL], v) 2670 }, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS), 2671 intrinsicKey{"runtime/internal/atomic", "Cas64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2672 v := s.newValue4(ssa.OpAtomicCompareAndSwap64, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem()) 2673 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2674 return s.newValue1(ssa.OpSelect0, Types[TBOOL], v) 2675 }, sys.AMD64, sys.ARM64, sys.S390X), 2676 2677 intrinsicKey{"runtime/internal/atomic", "And8"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2678 s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, ssa.TypeMem, args[0], args[1], s.mem()) 2679 return nil 2680 }, sys.AMD64, sys.ARM64, sys.MIPS), 2681 intrinsicKey{"runtime/internal/atomic", "Or8"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2682 s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, ssa.TypeMem, args[0], args[1], s.mem()) 2683 return nil 2684 }, sys.AMD64, sys.ARM64, sys.MIPS), 2685 2686 /******** math ********/ 2687 intrinsicKey{"math", "Sqrt"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2688 return s.newValue1(ssa.OpSqrt, Types[TFLOAT64], args[0]) 2689 }, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X), 2690 } 2691 2692 // aliases internal to runtime/internal/atomic 2693 i.std[intrinsicKey{"runtime/internal/atomic", "Loadint64"}] = 2694 i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}] 2695 i.std[intrinsicKey{"runtime/internal/atomic", "Xaddint64"}] = 2696 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}] 2697 2698 // intrinsics which vary depending on the size of int/ptr. 2699 i.intSized = map[sizedIntrinsicKey]intrinsicBuilder{ 2700 sizedIntrinsicKey{"runtime/internal/atomic", "Loaduint", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Load"}], 2701 sizedIntrinsicKey{"runtime/internal/atomic", "Loaduint", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}], 2702 } 2703 i.ptrSized = map[sizedIntrinsicKey]intrinsicBuilder{ 2704 sizedIntrinsicKey{"runtime/internal/atomic", "Loaduintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Load"}], 2705 sizedIntrinsicKey{"runtime/internal/atomic", "Loaduintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}], 2706 sizedIntrinsicKey{"runtime/internal/atomic", "Storeuintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Store"}], 2707 sizedIntrinsicKey{"runtime/internal/atomic", "Storeuintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Store64"}], 2708 sizedIntrinsicKey{"runtime/internal/atomic", "Xchguintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Xchg"}], 2709 sizedIntrinsicKey{"runtime/internal/atomic", "Xchguintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Xchg64"}], 2710 sizedIntrinsicKey{"runtime/internal/atomic", "Xadduintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Xadd"}], 2711 sizedIntrinsicKey{"runtime/internal/atomic", "Xadduintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}], 2712 sizedIntrinsicKey{"runtime/internal/atomic", "Casuintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}], 2713 sizedIntrinsicKey{"runtime/internal/atomic", "Casuintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}], 2714 sizedIntrinsicKey{"runtime/internal/atomic", "Casp1", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}], 2715 sizedIntrinsicKey{"runtime/internal/atomic", "Casp1", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}], 2716 } 2717 2718 /******** sync/atomic ********/ 2719 if flag_race { 2720 // The race detector needs to be able to intercept these calls. 2721 // We can't intrinsify them. 2722 return 2723 } 2724 // these are all aliases to runtime/internal/atomic implementations. 2725 i.std[intrinsicKey{"sync/atomic", "LoadInt32"}] = 2726 i.std[intrinsicKey{"runtime/internal/atomic", "Load"}] 2727 i.std[intrinsicKey{"sync/atomic", "LoadInt64"}] = 2728 i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}] 2729 i.std[intrinsicKey{"sync/atomic", "LoadPointer"}] = 2730 i.std[intrinsicKey{"runtime/internal/atomic", "Loadp"}] 2731 i.std[intrinsicKey{"sync/atomic", "LoadUint32"}] = 2732 i.std[intrinsicKey{"runtime/internal/atomic", "Load"}] 2733 i.std[intrinsicKey{"sync/atomic", "LoadUint64"}] = 2734 i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}] 2735 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "LoadUintptr", 4}] = 2736 i.std[intrinsicKey{"runtime/internal/atomic", "Load"}] 2737 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "LoadUintptr", 8}] = 2738 i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}] 2739 2740 i.std[intrinsicKey{"sync/atomic", "StoreInt32"}] = 2741 i.std[intrinsicKey{"runtime/internal/atomic", "Store"}] 2742 i.std[intrinsicKey{"sync/atomic", "StoreInt64"}] = 2743 i.std[intrinsicKey{"runtime/internal/atomic", "Store64"}] 2744 // Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap. 2745 i.std[intrinsicKey{"sync/atomic", "StoreUint32"}] = 2746 i.std[intrinsicKey{"runtime/internal/atomic", "Store"}] 2747 i.std[intrinsicKey{"sync/atomic", "StoreUint64"}] = 2748 i.std[intrinsicKey{"runtime/internal/atomic", "Store64"}] 2749 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "StoreUintptr", 4}] = 2750 i.std[intrinsicKey{"runtime/internal/atomic", "Store"}] 2751 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "StoreUintptr", 8}] = 2752 i.std[intrinsicKey{"runtime/internal/atomic", "Store64"}] 2753 2754 i.std[intrinsicKey{"sync/atomic", "SwapInt32"}] = 2755 i.std[intrinsicKey{"runtime/internal/atomic", "Xchg"}] 2756 i.std[intrinsicKey{"sync/atomic", "SwapInt64"}] = 2757 i.std[intrinsicKey{"runtime/internal/atomic", "Xchg64"}] 2758 i.std[intrinsicKey{"sync/atomic", "SwapUint32"}] = 2759 i.std[intrinsicKey{"runtime/internal/atomic", "Xchg"}] 2760 i.std[intrinsicKey{"sync/atomic", "SwapUint64"}] = 2761 i.std[intrinsicKey{"runtime/internal/atomic", "Xchg64"}] 2762 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "SwapUintptr", 4}] = 2763 i.std[intrinsicKey{"runtime/internal/atomic", "Xchg"}] 2764 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "SwapUintptr", 8}] = 2765 i.std[intrinsicKey{"runtime/internal/atomic", "Xchg64"}] 2766 2767 i.std[intrinsicKey{"sync/atomic", "CompareAndSwapInt32"}] = 2768 i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}] 2769 i.std[intrinsicKey{"sync/atomic", "CompareAndSwapInt64"}] = 2770 i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}] 2771 i.std[intrinsicKey{"sync/atomic", "CompareAndSwapUint32"}] = 2772 i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}] 2773 i.std[intrinsicKey{"sync/atomic", "CompareAndSwapUint64"}] = 2774 i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}] 2775 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "CompareAndSwapUintptr", 4}] = 2776 i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}] 2777 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "CompareAndSwapUintptr", 8}] = 2778 i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}] 2779 2780 i.std[intrinsicKey{"sync/atomic", "AddInt32"}] = 2781 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd"}] 2782 i.std[intrinsicKey{"sync/atomic", "AddInt64"}] = 2783 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}] 2784 i.std[intrinsicKey{"sync/atomic", "AddUint32"}] = 2785 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd"}] 2786 i.std[intrinsicKey{"sync/atomic", "AddUint64"}] = 2787 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}] 2788 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "AddUintptr", 4}] = 2789 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd"}] 2790 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "AddUintptr", 8}] = 2791 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}] 2792 2793 /******** math/big ********/ 2794 i.intSized[sizedIntrinsicKey{"math/big", "mulWW", 8}] = 2795 enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2796 return s.newValue2(ssa.OpMul64uhilo, ssa.MakeTuple(Types[TUINT64], Types[TUINT64]), args[0], args[1]) 2797 }, sys.AMD64) 2798 i.intSized[sizedIntrinsicKey{"math/big", "divWW", 8}] = 2799 enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2800 return s.newValue3(ssa.OpDiv128u, ssa.MakeTuple(Types[TUINT64], Types[TUINT64]), args[0], args[1], args[2]) 2801 }, sys.AMD64) 2802 } 2803 2804 // findIntrinsic returns a function which builds the SSA equivalent of the 2805 // function identified by the symbol sym. If sym is not an intrinsic call, returns nil. 2806 func findIntrinsic(sym *Sym) intrinsicBuilder { 2807 if ssa.IntrinsicsDisable { 2808 return nil 2809 } 2810 if sym == nil || sym.Pkg == nil { 2811 return nil 2812 } 2813 if intrinsics == nil { 2814 intrinsicInit() 2815 } 2816 pkg := sym.Pkg.Path 2817 if sym.Pkg == localpkg { 2818 pkg = myimportpath 2819 } 2820 fn := sym.Name 2821 f := intrinsics.std[intrinsicKey{pkg, fn}] 2822 if f != nil { 2823 return f 2824 } 2825 f = intrinsics.intSized[sizedIntrinsicKey{pkg, fn, Widthint}] 2826 if f != nil { 2827 return f 2828 } 2829 return intrinsics.ptrSized[sizedIntrinsicKey{pkg, fn, Widthptr}] 2830 } 2831 2832 func isIntrinsicCall(n *Node) bool { 2833 if n == nil || n.Left == nil { 2834 return false 2835 } 2836 return findIntrinsic(n.Left.Sym) != nil 2837 } 2838 2839 // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation. 2840 func (s *state) intrinsicCall(n *Node) *ssa.Value { 2841 v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n)) 2842 if ssa.IntrinsicsDebug > 0 { 2843 x := v 2844 if x == nil { 2845 x = s.mem() 2846 } 2847 if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 { 2848 x = x.Args[0] 2849 } 2850 Warnl(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString()) 2851 } 2852 return v 2853 } 2854 2855 type callArg struct { 2856 offset int64 2857 v *ssa.Value 2858 } 2859 type byOffset []callArg 2860 2861 func (x byOffset) Len() int { return len(x) } 2862 func (x byOffset) Swap(i, j int) { x[i], x[j] = x[j], x[i] } 2863 func (x byOffset) Less(i, j int) bool { 2864 return x[i].offset < x[j].offset 2865 } 2866 2867 // intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them. 2868 func (s *state) intrinsicArgs(n *Node) []*ssa.Value { 2869 // This code is complicated because of how walk transforms calls. For a call node, 2870 // each entry in n.List is either an assignment to OINDREGSP which actually 2871 // stores an arg, or an assignment to a temporary which computes an arg 2872 // which is later assigned. 2873 // The args can also be out of order. 2874 // TODO: when walk goes away someday, this code can go away also. 2875 var args []callArg 2876 temps := map[*Node]*ssa.Value{} 2877 for _, a := range n.List.Slice() { 2878 if a.Op != OAS { 2879 s.Fatalf("non-assignment as a function argument %s", opnames[a.Op]) 2880 } 2881 l, r := a.Left, a.Right 2882 switch l.Op { 2883 case ONAME: 2884 // Evaluate and store to "temporary". 2885 // Walk ensures these temporaries are dead outside of n. 2886 temps[l] = s.expr(r) 2887 case OINDREGSP: 2888 // Store a value to an argument slot. 2889 var v *ssa.Value 2890 if x, ok := temps[r]; ok { 2891 // This is a previously computed temporary. 2892 v = x 2893 } else { 2894 // This is an explicit value; evaluate it. 2895 v = s.expr(r) 2896 } 2897 args = append(args, callArg{l.Xoffset, v}) 2898 default: 2899 s.Fatalf("function argument assignment target not allowed: %s", opnames[l.Op]) 2900 } 2901 } 2902 sort.Sort(byOffset(args)) 2903 res := make([]*ssa.Value, len(args)) 2904 for i, a := range args { 2905 res[i] = a.v 2906 } 2907 return res 2908 } 2909 2910 // Calls the function n using the specified call type. 2911 // Returns the address of the return value (or nil if none). 2912 func (s *state) call(n *Node, k callKind) *ssa.Value { 2913 var sym *Sym // target symbol (if static) 2914 var closure *ssa.Value // ptr to closure to run (if dynamic) 2915 var codeptr *ssa.Value // ptr to target code (if dynamic) 2916 var rcvr *ssa.Value // receiver to set 2917 fn := n.Left 2918 switch n.Op { 2919 case OCALLFUNC: 2920 if k == callNormal && fn.Op == ONAME && fn.Class == PFUNC { 2921 sym = fn.Sym 2922 break 2923 } 2924 closure = s.expr(fn) 2925 case OCALLMETH: 2926 if fn.Op != ODOTMETH { 2927 Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) 2928 } 2929 if k == callNormal { 2930 sym = fn.Sym 2931 break 2932 } 2933 // Make a name n2 for the function. 2934 // fn.Sym might be sync.(*Mutex).Unlock. 2935 // Make a PFUNC node out of that, then evaluate it. 2936 // We get back an SSA value representing &sync.(*Mutex).Unlock·f. 2937 // We can then pass that to defer or go. 2938 n2 := newname(fn.Sym) 2939 n2.Class = PFUNC 2940 n2.Pos = fn.Pos 2941 n2.Type = Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it. 2942 closure = s.expr(n2) 2943 // Note: receiver is already assigned in n.List, so we don't 2944 // want to set it here. 2945 case OCALLINTER: 2946 if fn.Op != ODOTINTER { 2947 Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op) 2948 } 2949 i := s.expr(fn.Left) 2950 itab := s.newValue1(ssa.OpITab, Types[TUINTPTR], i) 2951 if k != callNormal { 2952 s.nilCheck(itab) 2953 } 2954 itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab 2955 itab = s.newValue1I(ssa.OpOffPtr, ptrto(Types[TUINTPTR]), itabidx, itab) 2956 if k == callNormal { 2957 codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], itab, s.mem()) 2958 } else { 2959 closure = itab 2960 } 2961 rcvr = s.newValue1(ssa.OpIData, Types[TUINTPTR], i) 2962 } 2963 dowidth(fn.Type) 2964 stksize := fn.Type.ArgWidth() // includes receiver 2965 2966 // Run all argument assignments. The arg slots have already 2967 // been offset by the appropriate amount (+2*widthptr for go/defer, 2968 // +widthptr for interface calls). 2969 // For OCALLMETH, the receiver is set in these statements. 2970 s.stmtList(n.List) 2971 2972 // Set receiver (for interface calls) 2973 if rcvr != nil { 2974 argStart := Ctxt.FixedFrameSize() 2975 if k != callNormal { 2976 argStart += int64(2 * Widthptr) 2977 } 2978 addr := s.entryNewValue1I(ssa.OpOffPtr, ptrto(Types[TUINTPTR]), argStart, s.sp) 2979 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, rcvr, s.mem()) 2980 } 2981 2982 // Defer/go args 2983 if k != callNormal { 2984 // Write argsize and closure (args to Newproc/Deferproc). 2985 argStart := Ctxt.FixedFrameSize() 2986 argsize := s.constInt32(Types[TUINT32], int32(stksize)) 2987 addr := s.entryNewValue1I(ssa.OpOffPtr, ptrto(Types[TUINT32]), argStart, s.sp) 2988 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, 4, addr, argsize, s.mem()) 2989 addr = s.entryNewValue1I(ssa.OpOffPtr, ptrto(Types[TUINTPTR]), argStart+int64(Widthptr), s.sp) 2990 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, closure, s.mem()) 2991 stksize += 2 * int64(Widthptr) 2992 } 2993 2994 // call target 2995 var call *ssa.Value 2996 switch { 2997 case k == callDefer: 2998 call = s.newValue1(ssa.OpDeferCall, ssa.TypeMem, s.mem()) 2999 case k == callGo: 3000 call = s.newValue1(ssa.OpGoCall, ssa.TypeMem, s.mem()) 3001 case closure != nil: 3002 codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], closure, s.mem()) 3003 call = s.newValue3(ssa.OpClosureCall, ssa.TypeMem, codeptr, closure, s.mem()) 3004 case codeptr != nil: 3005 call = s.newValue2(ssa.OpInterCall, ssa.TypeMem, codeptr, s.mem()) 3006 case sym != nil: 3007 call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, sym, s.mem()) 3008 default: 3009 Fatalf("bad call type %v %v", n.Op, n) 3010 } 3011 call.AuxInt = stksize // Call operations carry the argsize of the callee along with them 3012 s.vars[&memVar] = call 3013 3014 // Finish block for defers 3015 if k == callDefer { 3016 b := s.endBlock() 3017 b.Kind = ssa.BlockDefer 3018 b.SetControl(call) 3019 bNext := s.f.NewBlock(ssa.BlockPlain) 3020 b.AddEdgeTo(bNext) 3021 // Add recover edge to exit code. 3022 r := s.f.NewBlock(ssa.BlockPlain) 3023 s.startBlock(r) 3024 s.exit() 3025 b.AddEdgeTo(r) 3026 b.Likely = ssa.BranchLikely 3027 s.startBlock(bNext) 3028 } 3029 3030 res := n.Left.Type.Results() 3031 if res.NumFields() == 0 || k != callNormal { 3032 // call has no return value. Continue with the next statement. 3033 return nil 3034 } 3035 fp := res.Field(0) 3036 return s.entryNewValue1I(ssa.OpOffPtr, ptrto(fp.Type), fp.Offset+Ctxt.FixedFrameSize(), s.sp) 3037 } 3038 3039 // etypesign returns the signed-ness of e, for integer/pointer etypes. 3040 // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer. 3041 func etypesign(e EType) int8 { 3042 switch e { 3043 case TINT8, TINT16, TINT32, TINT64, TINT: 3044 return -1 3045 case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR: 3046 return +1 3047 } 3048 return 0 3049 } 3050 3051 // lookupSymbol is used to retrieve the symbol (Extern, Arg or Auto) used for a particular node. 3052 // This improves the effectiveness of cse by using the same Aux values for the 3053 // same symbols. 3054 func (s *state) lookupSymbol(n *Node, sym interface{}) interface{} { 3055 switch sym.(type) { 3056 default: 3057 s.Fatalf("sym %v is of uknown type %T", sym, sym) 3058 case *ssa.ExternSymbol, *ssa.ArgSymbol, *ssa.AutoSymbol: 3059 // these are the only valid types 3060 } 3061 3062 if lsym, ok := s.varsyms[n]; ok { 3063 return lsym 3064 } else { 3065 s.varsyms[n] = sym 3066 return sym 3067 } 3068 } 3069 3070 // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result. 3071 // Also returns a bool reporting whether the returned value is "volatile", that is it 3072 // points to the outargs section and thus the referent will be clobbered by any call. 3073 // The value that the returned Value represents is guaranteed to be non-nil. 3074 // If bounded is true then this address does not require a nil check for its operand 3075 // even if that would otherwise be implied. 3076 func (s *state) addr(n *Node, bounded bool) (*ssa.Value, bool) { 3077 t := ptrto(n.Type) 3078 switch n.Op { 3079 case ONAME: 3080 switch n.Class { 3081 case PEXTERN: 3082 // global variable 3083 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: n.Sym}) 3084 v := s.entryNewValue1A(ssa.OpAddr, t, aux, s.sb) 3085 // TODO: Make OpAddr use AuxInt as well as Aux. 3086 if n.Xoffset != 0 { 3087 v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v) 3088 } 3089 return v, false 3090 case PPARAM: 3091 // parameter slot 3092 v := s.decladdrs[n] 3093 if v != nil { 3094 return v, false 3095 } 3096 if n == nodfp { 3097 // Special arg that points to the frame pointer (Used by ORECOVER). 3098 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n}) 3099 return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp), false 3100 } 3101 s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs) 3102 return nil, false 3103 case PAUTO: 3104 aux := s.lookupSymbol(n, &ssa.AutoSymbol{Typ: n.Type, Node: n}) 3105 return s.newValue1A(ssa.OpAddr, t, aux, s.sp), false 3106 case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early. 3107 // ensure that we reuse symbols for out parameters so 3108 // that cse works on their addresses 3109 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n}) 3110 return s.newValue1A(ssa.OpAddr, t, aux, s.sp), false 3111 default: 3112 s.Fatalf("variable address class %v not implemented", classnames[n.Class]) 3113 return nil, false 3114 } 3115 case OINDREGSP: 3116 // indirect off REGSP 3117 // used for storing/loading arguments/returns to/from callees 3118 return s.entryNewValue1I(ssa.OpOffPtr, t, n.Xoffset, s.sp), true 3119 case OINDEX: 3120 if n.Left.Type.IsSlice() { 3121 a := s.expr(n.Left) 3122 i := s.expr(n.Right) 3123 i = s.extendIndex(i, panicindex) 3124 len := s.newValue1(ssa.OpSliceLen, Types[TINT], a) 3125 if !n.Bounded { 3126 s.boundsCheck(i, len) 3127 } 3128 p := s.newValue1(ssa.OpSlicePtr, t, a) 3129 return s.newValue2(ssa.OpPtrIndex, t, p, i), false 3130 } else { // array 3131 a, isVolatile := s.addr(n.Left, bounded) 3132 i := s.expr(n.Right) 3133 i = s.extendIndex(i, panicindex) 3134 len := s.constInt(Types[TINT], n.Left.Type.NumElem()) 3135 if !n.Bounded { 3136 s.boundsCheck(i, len) 3137 } 3138 return s.newValue2(ssa.OpPtrIndex, ptrto(n.Left.Type.Elem()), a, i), isVolatile 3139 } 3140 case OIND: 3141 return s.exprPtr(n.Left, bounded, n.Pos), false 3142 case ODOT: 3143 p, isVolatile := s.addr(n.Left, bounded) 3144 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p), isVolatile 3145 case ODOTPTR: 3146 p := s.exprPtr(n.Left, bounded, n.Pos) 3147 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p), false 3148 case OCLOSUREVAR: 3149 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, 3150 s.entryNewValue0(ssa.OpGetClosurePtr, ptrto(Types[TUINT8]))), false 3151 case OCONVNOP: 3152 addr, isVolatile := s.addr(n.Left, bounded) 3153 return s.newValue1(ssa.OpCopy, t, addr), isVolatile // ensure that addr has the right type 3154 case OCALLFUNC, OCALLINTER, OCALLMETH: 3155 return s.call(n, callNormal), true 3156 case ODOTTYPE: 3157 v, _ := s.dottype(n, false) 3158 if v.Op != ssa.OpLoad { 3159 s.Fatalf("dottype of non-load") 3160 } 3161 if v.Args[1] != s.mem() { 3162 s.Fatalf("memory no longer live from dottype load") 3163 } 3164 return v.Args[0], false 3165 default: 3166 s.Fatalf("unhandled addr %v", n.Op) 3167 return nil, false 3168 } 3169 } 3170 3171 // canSSA reports whether n is SSA-able. 3172 // n must be an ONAME (or an ODOT sequence with an ONAME base). 3173 func (s *state) canSSA(n *Node) bool { 3174 if Debug['N'] != 0 { 3175 return false 3176 } 3177 for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) { 3178 n = n.Left 3179 } 3180 if n.Op != ONAME { 3181 return false 3182 } 3183 if n.Addrtaken { 3184 return false 3185 } 3186 if n.isParamHeapCopy() { 3187 return false 3188 } 3189 if n.Class == PAUTOHEAP { 3190 Fatalf("canSSA of PAUTOHEAP %v", n) 3191 } 3192 switch n.Class { 3193 case PEXTERN: 3194 return false 3195 case PPARAMOUT: 3196 if hasdefer { 3197 // TODO: handle this case? Named return values must be 3198 // in memory so that the deferred function can see them. 3199 // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false } 3200 // Or maybe not, see issue 18860. Even unnamed return values 3201 // must be written back so if a defer recovers, the caller can see them. 3202 return false 3203 } 3204 if s.cgoUnsafeArgs { 3205 // Cgo effectively takes the address of all result args, 3206 // but the compiler can't see that. 3207 return false 3208 } 3209 } 3210 if n.Class == PPARAM && n.String() == ".this" { 3211 // wrappers generated by genwrapper need to update 3212 // the .this pointer in place. 3213 // TODO: treat as a PPARMOUT? 3214 return false 3215 } 3216 return canSSAType(n.Type) 3217 // TODO: try to make more variables SSAable? 3218 } 3219 3220 // canSSA reports whether variables of type t are SSA-able. 3221 func canSSAType(t *Type) bool { 3222 dowidth(t) 3223 if t.Width > int64(4*Widthptr) { 3224 // 4*Widthptr is an arbitrary constant. We want it 3225 // to be at least 3*Widthptr so slices can be registerized. 3226 // Too big and we'll introduce too much register pressure. 3227 return false 3228 } 3229 switch t.Etype { 3230 case TARRAY: 3231 // We can't do larger arrays because dynamic indexing is 3232 // not supported on SSA variables. 3233 // TODO: allow if all indexes are constant. 3234 if t.NumElem() == 0 { 3235 return true 3236 } 3237 if t.NumElem() == 1 { 3238 return canSSAType(t.Elem()) 3239 } 3240 return false 3241 case TSTRUCT: 3242 if t.NumFields() > ssa.MaxStruct { 3243 return false 3244 } 3245 for _, t1 := range t.Fields().Slice() { 3246 if !canSSAType(t1.Type) { 3247 return false 3248 } 3249 } 3250 return true 3251 default: 3252 return true 3253 } 3254 } 3255 3256 // exprPtr evaluates n to a pointer and nil-checks it. 3257 func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value { 3258 p := s.expr(n) 3259 if bounded || n.NonNil { 3260 if s.f.Config.Debug_checknil() && lineno.Line() > 1 { 3261 s.f.Config.Warnl(lineno, "removed nil check") 3262 } 3263 return p 3264 } 3265 s.nilCheck(p) 3266 return p 3267 } 3268 3269 // nilCheck generates nil pointer checking code. 3270 // Used only for automatically inserted nil checks, 3271 // not for user code like 'x != nil'. 3272 func (s *state) nilCheck(ptr *ssa.Value) { 3273 if disable_checknil != 0 { 3274 return 3275 } 3276 s.newValue2(ssa.OpNilCheck, ssa.TypeVoid, ptr, s.mem()) 3277 } 3278 3279 // boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not. 3280 // Starts a new block on return. 3281 // idx is already converted to full int width. 3282 func (s *state) boundsCheck(idx, len *ssa.Value) { 3283 if Debug['B'] != 0 { 3284 return 3285 } 3286 3287 // bounds check 3288 cmp := s.newValue2(ssa.OpIsInBounds, Types[TBOOL], idx, len) 3289 s.check(cmp, panicindex) 3290 } 3291 3292 // sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not. 3293 // Starts a new block on return. 3294 // idx and len are already converted to full int width. 3295 func (s *state) sliceBoundsCheck(idx, len *ssa.Value) { 3296 if Debug['B'] != 0 { 3297 return 3298 } 3299 3300 // bounds check 3301 cmp := s.newValue2(ssa.OpIsSliceInBounds, Types[TBOOL], idx, len) 3302 s.check(cmp, panicslice) 3303 } 3304 3305 // If cmp (a bool) is false, panic using the given function. 3306 func (s *state) check(cmp *ssa.Value, fn *Node) { 3307 b := s.endBlock() 3308 b.Kind = ssa.BlockIf 3309 b.SetControl(cmp) 3310 b.Likely = ssa.BranchLikely 3311 bNext := s.f.NewBlock(ssa.BlockPlain) 3312 line := s.peekPos() 3313 bPanic := s.panics[funcLine{fn, line}] 3314 if bPanic == nil { 3315 bPanic = s.f.NewBlock(ssa.BlockPlain) 3316 s.panics[funcLine{fn, line}] = bPanic 3317 s.startBlock(bPanic) 3318 // The panic call takes/returns memory to ensure that the right 3319 // memory state is observed if the panic happens. 3320 s.rtcall(fn, false, nil) 3321 } 3322 b.AddEdgeTo(bNext) 3323 b.AddEdgeTo(bPanic) 3324 s.startBlock(bNext) 3325 } 3326 3327 func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value { 3328 needcheck := true 3329 switch b.Op { 3330 case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64: 3331 if b.AuxInt != 0 { 3332 needcheck = false 3333 } 3334 } 3335 if needcheck { 3336 // do a size-appropriate check for zero 3337 cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type)) 3338 s.check(cmp, panicdivide) 3339 } 3340 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 3341 } 3342 3343 // rtcall issues a call to the given runtime function fn with the listed args. 3344 // Returns a slice of results of the given result types. 3345 // The call is added to the end of the current block. 3346 // If returns is false, the block is marked as an exit block. 3347 func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Value) []*ssa.Value { 3348 // Write args to the stack 3349 off := Ctxt.FixedFrameSize() 3350 for _, arg := range args { 3351 t := arg.Type 3352 off = Rnd(off, t.Alignment()) 3353 ptr := s.sp 3354 if off != 0 { 3355 ptr = s.newValue1I(ssa.OpOffPtr, t.PtrTo(), off, s.sp) 3356 } 3357 size := t.Size() 3358 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, size, ptr, arg, s.mem()) 3359 off += size 3360 } 3361 off = Rnd(off, int64(Widthptr)) 3362 if Thearch.LinkArch.Name == "amd64p32" { 3363 // amd64p32 wants 8-byte alignment of the start of the return values. 3364 off = Rnd(off, 8) 3365 } 3366 3367 // Issue call 3368 call := s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, fn.Sym, s.mem()) 3369 s.vars[&memVar] = call 3370 3371 if !returns { 3372 // Finish block 3373 b := s.endBlock() 3374 b.Kind = ssa.BlockExit 3375 b.SetControl(call) 3376 call.AuxInt = off - Ctxt.FixedFrameSize() 3377 if len(results) > 0 { 3378 Fatalf("panic call can't have results") 3379 } 3380 return nil 3381 } 3382 3383 // Load results 3384 res := make([]*ssa.Value, len(results)) 3385 for i, t := range results { 3386 off = Rnd(off, t.Alignment()) 3387 ptr := s.sp 3388 if off != 0 { 3389 ptr = s.newValue1I(ssa.OpOffPtr, ptrto(t), off, s.sp) 3390 } 3391 res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem()) 3392 off += t.Size() 3393 } 3394 off = Rnd(off, int64(Widthptr)) 3395 3396 // Remember how much callee stack space we needed. 3397 call.AuxInt = off 3398 3399 return res 3400 } 3401 3402 // insertWBmove inserts the assignment *left = *right including a write barrier. 3403 // t is the type being assigned. 3404 // If right == nil, then we're zeroing *left. 3405 func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line src.XPos, rightIsVolatile bool) { 3406 // if writeBarrier.enabled { 3407 // typedmemmove(&t, left, right) 3408 // } else { 3409 // *left = *right 3410 // } 3411 // 3412 // or 3413 // 3414 // if writeBarrier.enabled { 3415 // typedmemclr(&t, left) 3416 // } else { 3417 // *left = zeroValue 3418 // } 3419 3420 if s.noWB { 3421 s.Error("write barrier prohibited") 3422 } 3423 if !s.WBPos.IsKnown() { 3424 s.WBPos = left.Pos 3425 } 3426 3427 var val *ssa.Value 3428 if right == nil { 3429 val = s.newValue2I(ssa.OpZeroWB, ssa.TypeMem, sizeAlignAuxInt(t), left, s.mem()) 3430 } else { 3431 var op ssa.Op 3432 if rightIsVolatile { 3433 op = ssa.OpMoveWBVolatile 3434 } else { 3435 op = ssa.OpMoveWB 3436 } 3437 val = s.newValue3I(op, ssa.TypeMem, sizeAlignAuxInt(t), left, right, s.mem()) 3438 } 3439 val.Aux = &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(t)} 3440 s.vars[&memVar] = val 3441 3442 // WB ops will be expanded to branches at writebarrier phase. 3443 // To make it easy, we put WB ops at the end of a block, so 3444 // that it does not need to split a block into two parts when 3445 // expanding WB ops. 3446 b := s.f.NewBlock(ssa.BlockPlain) 3447 s.endBlock().AddEdgeTo(b) 3448 s.startBlock(b) 3449 } 3450 3451 // insertWBstore inserts the assignment *left = right including a write barrier. 3452 // t is the type being assigned. 3453 func (s *state) insertWBstore(t *Type, left, right *ssa.Value, line src.XPos, skip skipMask) { 3454 // store scalar fields 3455 // if writeBarrier.enabled { 3456 // writebarrierptr for pointer fields 3457 // } else { 3458 // store pointer fields 3459 // } 3460 3461 if s.noWB { 3462 s.Error("write barrier prohibited") 3463 } 3464 if !s.WBPos.IsKnown() { 3465 s.WBPos = left.Pos 3466 } 3467 s.storeTypeScalars(t, left, right, skip) 3468 s.storeTypePtrsWB(t, left, right) 3469 3470 // WB ops will be expanded to branches at writebarrier phase. 3471 // To make it easy, we put WB ops at the end of a block, so 3472 // that it does not need to split a block into two parts when 3473 // expanding WB ops. 3474 b := s.f.NewBlock(ssa.BlockPlain) 3475 s.endBlock().AddEdgeTo(b) 3476 s.startBlock(b) 3477 } 3478 3479 // do *left = right for all scalar (non-pointer) parts of t. 3480 func (s *state) storeTypeScalars(t *Type, left, right *ssa.Value, skip skipMask) { 3481 switch { 3482 case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex(): 3483 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), left, right, s.mem()) 3484 case t.IsPtrShaped(): 3485 // no scalar fields. 3486 case t.IsString(): 3487 if skip&skipLen != 0 { 3488 return 3489 } 3490 len := s.newValue1(ssa.OpStringLen, Types[TINT], right) 3491 lenAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TINT]), s.config.IntSize, left) 3492 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem()) 3493 case t.IsSlice(): 3494 if skip&skipLen == 0 { 3495 len := s.newValue1(ssa.OpSliceLen, Types[TINT], right) 3496 lenAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TINT]), s.config.IntSize, left) 3497 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem()) 3498 } 3499 if skip&skipCap == 0 { 3500 cap := s.newValue1(ssa.OpSliceCap, Types[TINT], right) 3501 capAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TINT]), 2*s.config.IntSize, left) 3502 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capAddr, cap, s.mem()) 3503 } 3504 case t.IsInterface(): 3505 // itab field doesn't need a write barrier (even though it is a pointer). 3506 itab := s.newValue1(ssa.OpITab, ptrto(Types[TUINT8]), right) 3507 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, left, itab, s.mem()) 3508 case t.IsStruct(): 3509 n := t.NumFields() 3510 for i := 0; i < n; i++ { 3511 ft := t.FieldType(i) 3512 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3513 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3514 s.storeTypeScalars(ft.(*Type), addr, val, 0) 3515 } 3516 case t.IsArray() && t.NumElem() == 0: 3517 // nothing 3518 case t.IsArray() && t.NumElem() == 1: 3519 s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0) 3520 default: 3521 s.Fatalf("bad write barrier type %v", t) 3522 } 3523 } 3524 3525 // do *left = right for all pointer parts of t. 3526 func (s *state) storeTypePtrs(t *Type, left, right *ssa.Value) { 3527 switch { 3528 case t.IsPtrShaped(): 3529 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, right, s.mem()) 3530 case t.IsString(): 3531 ptr := s.newValue1(ssa.OpStringPtr, ptrto(Types[TUINT8]), right) 3532 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem()) 3533 case t.IsSlice(): 3534 ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), right) 3535 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem()) 3536 case t.IsInterface(): 3537 // itab field is treated as a scalar. 3538 idata := s.newValue1(ssa.OpIData, ptrto(Types[TUINT8]), right) 3539 idataAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TUINT8]), s.config.PtrSize, left) 3540 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, idataAddr, idata, s.mem()) 3541 case t.IsStruct(): 3542 n := t.NumFields() 3543 for i := 0; i < n; i++ { 3544 ft := t.FieldType(i) 3545 if !haspointers(ft.(*Type)) { 3546 continue 3547 } 3548 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3549 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3550 s.storeTypePtrs(ft.(*Type), addr, val) 3551 } 3552 case t.IsArray() && t.NumElem() == 0: 3553 // nothing 3554 case t.IsArray() && t.NumElem() == 1: 3555 s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right)) 3556 default: 3557 s.Fatalf("bad write barrier type %v", t) 3558 } 3559 } 3560 3561 // do *left = right for all pointer parts of t, with write barriers if necessary. 3562 func (s *state) storeTypePtrsWB(t *Type, left, right *ssa.Value) { 3563 switch { 3564 case t.IsPtrShaped(): 3565 s.vars[&memVar] = s.newValue3I(ssa.OpStoreWB, ssa.TypeMem, s.config.PtrSize, left, right, s.mem()) 3566 case t.IsString(): 3567 ptr := s.newValue1(ssa.OpStringPtr, ptrto(Types[TUINT8]), right) 3568 s.vars[&memVar] = s.newValue3I(ssa.OpStoreWB, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem()) 3569 case t.IsSlice(): 3570 ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), right) 3571 s.vars[&memVar] = s.newValue3I(ssa.OpStoreWB, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem()) 3572 case t.IsInterface(): 3573 // itab field is treated as a scalar. 3574 idata := s.newValue1(ssa.OpIData, ptrto(Types[TUINT8]), right) 3575 idataAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TUINT8]), s.config.PtrSize, left) 3576 s.vars[&memVar] = s.newValue3I(ssa.OpStoreWB, ssa.TypeMem, s.config.PtrSize, idataAddr, idata, s.mem()) 3577 case t.IsStruct(): 3578 n := t.NumFields() 3579 for i := 0; i < n; i++ { 3580 ft := t.FieldType(i) 3581 if !haspointers(ft.(*Type)) { 3582 continue 3583 } 3584 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3585 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3586 s.storeTypePtrsWB(ft.(*Type), addr, val) 3587 } 3588 case t.IsArray() && t.NumElem() == 0: 3589 // nothing 3590 case t.IsArray() && t.NumElem() == 1: 3591 s.storeTypePtrsWB(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right)) 3592 default: 3593 s.Fatalf("bad write barrier type %v", t) 3594 } 3595 } 3596 3597 // slice computes the slice v[i:j:k] and returns ptr, len, and cap of result. 3598 // i,j,k may be nil, in which case they are set to their default value. 3599 // t is a slice, ptr to array, or string type. 3600 func (s *state) slice(t *Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) { 3601 var elemtype *Type 3602 var ptrtype *Type 3603 var ptr *ssa.Value 3604 var len *ssa.Value 3605 var cap *ssa.Value 3606 zero := s.constInt(Types[TINT], 0) 3607 switch { 3608 case t.IsSlice(): 3609 elemtype = t.Elem() 3610 ptrtype = ptrto(elemtype) 3611 ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v) 3612 len = s.newValue1(ssa.OpSliceLen, Types[TINT], v) 3613 cap = s.newValue1(ssa.OpSliceCap, Types[TINT], v) 3614 case t.IsString(): 3615 elemtype = Types[TUINT8] 3616 ptrtype = ptrto(elemtype) 3617 ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v) 3618 len = s.newValue1(ssa.OpStringLen, Types[TINT], v) 3619 cap = len 3620 case t.IsPtr(): 3621 if !t.Elem().IsArray() { 3622 s.Fatalf("bad ptr to array in slice %v\n", t) 3623 } 3624 elemtype = t.Elem().Elem() 3625 ptrtype = ptrto(elemtype) 3626 s.nilCheck(v) 3627 ptr = v 3628 len = s.constInt(Types[TINT], t.Elem().NumElem()) 3629 cap = len 3630 default: 3631 s.Fatalf("bad type in slice %v\n", t) 3632 } 3633 3634 // Set default values 3635 if i == nil { 3636 i = zero 3637 } 3638 if j == nil { 3639 j = len 3640 } 3641 if k == nil { 3642 k = cap 3643 } 3644 3645 // Panic if slice indices are not in bounds. 3646 s.sliceBoundsCheck(i, j) 3647 if j != k { 3648 s.sliceBoundsCheck(j, k) 3649 } 3650 if k != cap { 3651 s.sliceBoundsCheck(k, cap) 3652 } 3653 3654 // Generate the following code assuming that indexes are in bounds. 3655 // The masking is to make sure that we don't generate a slice 3656 // that points to the next object in memory. 3657 // rlen = j - i 3658 // rcap = k - i 3659 // delta = i * elemsize 3660 // rptr = p + delta&mask(rcap) 3661 // result = (SliceMake rptr rlen rcap) 3662 // where mask(x) is 0 if x==0 and -1 if x>0. 3663 subOp := s.ssaOp(OSUB, Types[TINT]) 3664 mulOp := s.ssaOp(OMUL, Types[TINT]) 3665 andOp := s.ssaOp(OAND, Types[TINT]) 3666 rlen := s.newValue2(subOp, Types[TINT], j, i) 3667 var rcap *ssa.Value 3668 switch { 3669 case t.IsString(): 3670 // Capacity of the result is unimportant. However, we use 3671 // rcap to test if we've generated a zero-length slice. 3672 // Use length of strings for that. 3673 rcap = rlen 3674 case j == k: 3675 rcap = rlen 3676 default: 3677 rcap = s.newValue2(subOp, Types[TINT], k, i) 3678 } 3679 3680 var rptr *ssa.Value 3681 if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 { 3682 // No pointer arithmetic necessary. 3683 rptr = ptr 3684 } else { 3685 // delta = # of bytes to offset pointer by. 3686 delta := s.newValue2(mulOp, Types[TINT], i, s.constInt(Types[TINT], elemtype.Width)) 3687 // If we're slicing to the point where the capacity is zero, 3688 // zero out the delta. 3689 mask := s.newValue1(ssa.OpSlicemask, Types[TINT], rcap) 3690 delta = s.newValue2(andOp, Types[TINT], delta, mask) 3691 // Compute rptr = ptr + delta 3692 rptr = s.newValue2(ssa.OpAddPtr, ptrtype, ptr, delta) 3693 } 3694 3695 return rptr, rlen, rcap 3696 } 3697 3698 type u642fcvtTab struct { 3699 geq, cvt2F, and, rsh, or, add ssa.Op 3700 one func(*state, ssa.Type, int64) *ssa.Value 3701 } 3702 3703 var u64_f64 u642fcvtTab = u642fcvtTab{ 3704 geq: ssa.OpGeq64, 3705 cvt2F: ssa.OpCvt64to64F, 3706 and: ssa.OpAnd64, 3707 rsh: ssa.OpRsh64Ux64, 3708 or: ssa.OpOr64, 3709 add: ssa.OpAdd64F, 3710 one: (*state).constInt64, 3711 } 3712 3713 var u64_f32 u642fcvtTab = u642fcvtTab{ 3714 geq: ssa.OpGeq64, 3715 cvt2F: ssa.OpCvt64to32F, 3716 and: ssa.OpAnd64, 3717 rsh: ssa.OpRsh64Ux64, 3718 or: ssa.OpOr64, 3719 add: ssa.OpAdd32F, 3720 one: (*state).constInt64, 3721 } 3722 3723 func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3724 return s.uint64Tofloat(&u64_f64, n, x, ft, tt) 3725 } 3726 3727 func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3728 return s.uint64Tofloat(&u64_f32, n, x, ft, tt) 3729 } 3730 3731 func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3732 // if x >= 0 { 3733 // result = (floatY) x 3734 // } else { 3735 // y = uintX(x) ; y = x & 1 3736 // z = uintX(x) ; z = z >> 1 3737 // z = z >> 1 3738 // z = z | y 3739 // result = floatY(z) 3740 // result = result + result 3741 // } 3742 // 3743 // Code borrowed from old code generator. 3744 // What's going on: large 64-bit "unsigned" looks like 3745 // negative number to hardware's integer-to-float 3746 // conversion. However, because the mantissa is only 3747 // 63 bits, we don't need the LSB, so instead we do an 3748 // unsigned right shift (divide by two), convert, and 3749 // double. However, before we do that, we need to be 3750 // sure that we do not lose a "1" if that made the 3751 // difference in the resulting rounding. Therefore, we 3752 // preserve it, and OR (not ADD) it back in. The case 3753 // that matters is when the eleven discarded bits are 3754 // equal to 10000000001; that rounds up, and the 1 cannot 3755 // be lost else it would round down if the LSB of the 3756 // candidate mantissa is 0. 3757 cmp := s.newValue2(cvttab.geq, Types[TBOOL], x, s.zeroVal(ft)) 3758 b := s.endBlock() 3759 b.Kind = ssa.BlockIf 3760 b.SetControl(cmp) 3761 b.Likely = ssa.BranchLikely 3762 3763 bThen := s.f.NewBlock(ssa.BlockPlain) 3764 bElse := s.f.NewBlock(ssa.BlockPlain) 3765 bAfter := s.f.NewBlock(ssa.BlockPlain) 3766 3767 b.AddEdgeTo(bThen) 3768 s.startBlock(bThen) 3769 a0 := s.newValue1(cvttab.cvt2F, tt, x) 3770 s.vars[n] = a0 3771 s.endBlock() 3772 bThen.AddEdgeTo(bAfter) 3773 3774 b.AddEdgeTo(bElse) 3775 s.startBlock(bElse) 3776 one := cvttab.one(s, ft, 1) 3777 y := s.newValue2(cvttab.and, ft, x, one) 3778 z := s.newValue2(cvttab.rsh, ft, x, one) 3779 z = s.newValue2(cvttab.or, ft, z, y) 3780 a := s.newValue1(cvttab.cvt2F, tt, z) 3781 a1 := s.newValue2(cvttab.add, tt, a, a) 3782 s.vars[n] = a1 3783 s.endBlock() 3784 bElse.AddEdgeTo(bAfter) 3785 3786 s.startBlock(bAfter) 3787 return s.variable(n, n.Type) 3788 } 3789 3790 type u322fcvtTab struct { 3791 cvtI2F, cvtF2F ssa.Op 3792 } 3793 3794 var u32_f64 u322fcvtTab = u322fcvtTab{ 3795 cvtI2F: ssa.OpCvt32to64F, 3796 cvtF2F: ssa.OpCopy, 3797 } 3798 3799 var u32_f32 u322fcvtTab = u322fcvtTab{ 3800 cvtI2F: ssa.OpCvt32to32F, 3801 cvtF2F: ssa.OpCvt64Fto32F, 3802 } 3803 3804 func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3805 return s.uint32Tofloat(&u32_f64, n, x, ft, tt) 3806 } 3807 3808 func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3809 return s.uint32Tofloat(&u32_f32, n, x, ft, tt) 3810 } 3811 3812 func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3813 // if x >= 0 { 3814 // result = floatY(x) 3815 // } else { 3816 // result = floatY(float64(x) + (1<<32)) 3817 // } 3818 cmp := s.newValue2(ssa.OpGeq32, Types[TBOOL], x, s.zeroVal(ft)) 3819 b := s.endBlock() 3820 b.Kind = ssa.BlockIf 3821 b.SetControl(cmp) 3822 b.Likely = ssa.BranchLikely 3823 3824 bThen := s.f.NewBlock(ssa.BlockPlain) 3825 bElse := s.f.NewBlock(ssa.BlockPlain) 3826 bAfter := s.f.NewBlock(ssa.BlockPlain) 3827 3828 b.AddEdgeTo(bThen) 3829 s.startBlock(bThen) 3830 a0 := s.newValue1(cvttab.cvtI2F, tt, x) 3831 s.vars[n] = a0 3832 s.endBlock() 3833 bThen.AddEdgeTo(bAfter) 3834 3835 b.AddEdgeTo(bElse) 3836 s.startBlock(bElse) 3837 a1 := s.newValue1(ssa.OpCvt32to64F, Types[TFLOAT64], x) 3838 twoToThe32 := s.constFloat64(Types[TFLOAT64], float64(1<<32)) 3839 a2 := s.newValue2(ssa.OpAdd64F, Types[TFLOAT64], a1, twoToThe32) 3840 a3 := s.newValue1(cvttab.cvtF2F, tt, a2) 3841 3842 s.vars[n] = a3 3843 s.endBlock() 3844 bElse.AddEdgeTo(bAfter) 3845 3846 s.startBlock(bAfter) 3847 return s.variable(n, n.Type) 3848 } 3849 3850 // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels. 3851 func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value { 3852 if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() { 3853 s.Fatalf("node must be a map or a channel") 3854 } 3855 // if n == nil { 3856 // return 0 3857 // } else { 3858 // // len 3859 // return *((*int)n) 3860 // // cap 3861 // return *(((*int)n)+1) 3862 // } 3863 lenType := n.Type 3864 nilValue := s.constNil(Types[TUINTPTR]) 3865 cmp := s.newValue2(ssa.OpEqPtr, Types[TBOOL], x, nilValue) 3866 b := s.endBlock() 3867 b.Kind = ssa.BlockIf 3868 b.SetControl(cmp) 3869 b.Likely = ssa.BranchUnlikely 3870 3871 bThen := s.f.NewBlock(ssa.BlockPlain) 3872 bElse := s.f.NewBlock(ssa.BlockPlain) 3873 bAfter := s.f.NewBlock(ssa.BlockPlain) 3874 3875 // length/capacity of a nil map/chan is zero 3876 b.AddEdgeTo(bThen) 3877 s.startBlock(bThen) 3878 s.vars[n] = s.zeroVal(lenType) 3879 s.endBlock() 3880 bThen.AddEdgeTo(bAfter) 3881 3882 b.AddEdgeTo(bElse) 3883 s.startBlock(bElse) 3884 if n.Op == OLEN { 3885 // length is stored in the first word for map/chan 3886 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem()) 3887 } else if n.Op == OCAP { 3888 // capacity is stored in the second word for chan 3889 sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x) 3890 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem()) 3891 } else { 3892 s.Fatalf("op must be OLEN or OCAP") 3893 } 3894 s.endBlock() 3895 bElse.AddEdgeTo(bAfter) 3896 3897 s.startBlock(bAfter) 3898 return s.variable(n, lenType) 3899 } 3900 3901 type f2uCvtTab struct { 3902 ltf, cvt2U, subf, or ssa.Op 3903 floatValue func(*state, ssa.Type, float64) *ssa.Value 3904 intValue func(*state, ssa.Type, int64) *ssa.Value 3905 cutoff uint64 3906 } 3907 3908 var f32_u64 f2uCvtTab = f2uCvtTab{ 3909 ltf: ssa.OpLess32F, 3910 cvt2U: ssa.OpCvt32Fto64, 3911 subf: ssa.OpSub32F, 3912 or: ssa.OpOr64, 3913 floatValue: (*state).constFloat32, 3914 intValue: (*state).constInt64, 3915 cutoff: 9223372036854775808, 3916 } 3917 3918 var f64_u64 f2uCvtTab = f2uCvtTab{ 3919 ltf: ssa.OpLess64F, 3920 cvt2U: ssa.OpCvt64Fto64, 3921 subf: ssa.OpSub64F, 3922 or: ssa.OpOr64, 3923 floatValue: (*state).constFloat64, 3924 intValue: (*state).constInt64, 3925 cutoff: 9223372036854775808, 3926 } 3927 3928 var f32_u32 f2uCvtTab = f2uCvtTab{ 3929 ltf: ssa.OpLess32F, 3930 cvt2U: ssa.OpCvt32Fto32, 3931 subf: ssa.OpSub32F, 3932 or: ssa.OpOr32, 3933 floatValue: (*state).constFloat32, 3934 intValue: func(s *state, t ssa.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) }, 3935 cutoff: 2147483648, 3936 } 3937 3938 var f64_u32 f2uCvtTab = f2uCvtTab{ 3939 ltf: ssa.OpLess64F, 3940 cvt2U: ssa.OpCvt64Fto32, 3941 subf: ssa.OpSub64F, 3942 or: ssa.OpOr32, 3943 floatValue: (*state).constFloat64, 3944 intValue: func(s *state, t ssa.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) }, 3945 cutoff: 2147483648, 3946 } 3947 3948 func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3949 return s.floatToUint(&f32_u64, n, x, ft, tt) 3950 } 3951 func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3952 return s.floatToUint(&f64_u64, n, x, ft, tt) 3953 } 3954 3955 func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3956 return s.floatToUint(&f32_u32, n, x, ft, tt) 3957 } 3958 3959 func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3960 return s.floatToUint(&f64_u32, n, x, ft, tt) 3961 } 3962 3963 func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3964 // cutoff:=1<<(intY_Size-1) 3965 // if x < floatX(cutoff) { 3966 // result = uintY(x) 3967 // } else { 3968 // y = x - floatX(cutoff) 3969 // z = uintY(y) 3970 // result = z | -(cutoff) 3971 // } 3972 cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff)) 3973 cmp := s.newValue2(cvttab.ltf, Types[TBOOL], x, cutoff) 3974 b := s.endBlock() 3975 b.Kind = ssa.BlockIf 3976 b.SetControl(cmp) 3977 b.Likely = ssa.BranchLikely 3978 3979 bThen := s.f.NewBlock(ssa.BlockPlain) 3980 bElse := s.f.NewBlock(ssa.BlockPlain) 3981 bAfter := s.f.NewBlock(ssa.BlockPlain) 3982 3983 b.AddEdgeTo(bThen) 3984 s.startBlock(bThen) 3985 a0 := s.newValue1(cvttab.cvt2U, tt, x) 3986 s.vars[n] = a0 3987 s.endBlock() 3988 bThen.AddEdgeTo(bAfter) 3989 3990 b.AddEdgeTo(bElse) 3991 s.startBlock(bElse) 3992 y := s.newValue2(cvttab.subf, ft, x, cutoff) 3993 y = s.newValue1(cvttab.cvt2U, tt, y) 3994 z := cvttab.intValue(s, tt, int64(-cvttab.cutoff)) 3995 a1 := s.newValue2(cvttab.or, tt, y, z) 3996 s.vars[n] = a1 3997 s.endBlock() 3998 bElse.AddEdgeTo(bAfter) 3999 4000 s.startBlock(bAfter) 4001 return s.variable(n, n.Type) 4002 } 4003 4004 // ifaceType returns the value for the word containing the type. 4005 // t is the type of the interface expression. 4006 // v is the corresponding value. 4007 func (s *state) ifaceType(t *Type, v *ssa.Value) *ssa.Value { 4008 byteptr := ptrto(Types[TUINT8]) // type used in runtime prototypes for runtime type (*byte) 4009 4010 if t.IsEmptyInterface() { 4011 // Have eface. The type is the first word in the struct. 4012 return s.newValue1(ssa.OpITab, byteptr, v) 4013 } 4014 4015 // Have iface. 4016 // The first word in the struct is the itab. 4017 // If the itab is nil, return 0. 4018 // Otherwise, the second word in the itab is the type. 4019 4020 tab := s.newValue1(ssa.OpITab, byteptr, v) 4021 s.vars[&typVar] = tab 4022 isnonnil := s.newValue2(ssa.OpNeqPtr, Types[TBOOL], tab, s.constNil(byteptr)) 4023 b := s.endBlock() 4024 b.Kind = ssa.BlockIf 4025 b.SetControl(isnonnil) 4026 b.Likely = ssa.BranchLikely 4027 4028 bLoad := s.f.NewBlock(ssa.BlockPlain) 4029 bEnd := s.f.NewBlock(ssa.BlockPlain) 4030 4031 b.AddEdgeTo(bLoad) 4032 b.AddEdgeTo(bEnd) 4033 bLoad.AddEdgeTo(bEnd) 4034 4035 s.startBlock(bLoad) 4036 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), tab) 4037 s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem()) 4038 s.endBlock() 4039 4040 s.startBlock(bEnd) 4041 typ := s.variable(&typVar, byteptr) 4042 delete(s.vars, &typVar) 4043 return typ 4044 } 4045 4046 // dottype generates SSA for a type assertion node. 4047 // commaok indicates whether to panic or return a bool. 4048 // If commaok is false, resok will be nil. 4049 func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { 4050 iface := s.expr(n.Left) // input interface 4051 target := s.expr(typename(n.Type)) // target type 4052 byteptr := ptrto(Types[TUINT8]) 4053 4054 if n.Type.IsInterface() { 4055 if n.Type.IsEmptyInterface() { 4056 // Converting to an empty interface. 4057 // Input could be an empty or nonempty interface. 4058 if Debug_typeassert > 0 { 4059 Warnl(n.Pos, "type assertion inlined") 4060 } 4061 4062 // Get itab/type field from input. 4063 itab := s.newValue1(ssa.OpITab, byteptr, iface) 4064 // Conversion succeeds iff that field is not nil. 4065 cond := s.newValue2(ssa.OpNeqPtr, Types[TBOOL], itab, s.constNil(byteptr)) 4066 4067 if n.Left.Type.IsEmptyInterface() && commaok { 4068 // Converting empty interface to empty interface with ,ok is just a nil check. 4069 return iface, cond 4070 } 4071 4072 // Branch on nilness. 4073 b := s.endBlock() 4074 b.Kind = ssa.BlockIf 4075 b.SetControl(cond) 4076 b.Likely = ssa.BranchLikely 4077 bOk := s.f.NewBlock(ssa.BlockPlain) 4078 bFail := s.f.NewBlock(ssa.BlockPlain) 4079 b.AddEdgeTo(bOk) 4080 b.AddEdgeTo(bFail) 4081 4082 if !commaok { 4083 // On failure, panic by calling panicnildottype. 4084 s.startBlock(bFail) 4085 s.rtcall(panicnildottype, false, nil, target) 4086 4087 // On success, return (perhaps modified) input interface. 4088 s.startBlock(bOk) 4089 if n.Left.Type.IsEmptyInterface() { 4090 res = iface // Use input interface unchanged. 4091 return 4092 } 4093 // Load type out of itab, build interface with existing idata. 4094 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) 4095 typ := s.newValue2(ssa.OpLoad, byteptr, off, s.mem()) 4096 idata := s.newValue1(ssa.OpIData, n.Type, iface) 4097 res = s.newValue2(ssa.OpIMake, n.Type, typ, idata) 4098 return 4099 } 4100 4101 s.startBlock(bOk) 4102 // nonempty -> empty 4103 // Need to load type from itab 4104 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) 4105 s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem()) 4106 s.endBlock() 4107 4108 // itab is nil, might as well use that as the nil result. 4109 s.startBlock(bFail) 4110 s.vars[&typVar] = itab 4111 s.endBlock() 4112 4113 // Merge point. 4114 bEnd := s.f.NewBlock(ssa.BlockPlain) 4115 bOk.AddEdgeTo(bEnd) 4116 bFail.AddEdgeTo(bEnd) 4117 s.startBlock(bEnd) 4118 idata := s.newValue1(ssa.OpIData, n.Type, iface) 4119 res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata) 4120 resok = cond 4121 delete(s.vars, &typVar) 4122 return 4123 } 4124 // converting to a nonempty interface needs a runtime call. 4125 if Debug_typeassert > 0 { 4126 Warnl(n.Pos, "type assertion not inlined") 4127 } 4128 if n.Left.Type.IsEmptyInterface() { 4129 if commaok { 4130 call := s.rtcall(assertE2I2, true, []*Type{n.Type, Types[TBOOL]}, target, iface) 4131 return call[0], call[1] 4132 } 4133 return s.rtcall(assertE2I, true, []*Type{n.Type}, target, iface)[0], nil 4134 } 4135 if commaok { 4136 call := s.rtcall(assertI2I2, true, []*Type{n.Type, Types[TBOOL]}, target, iface) 4137 return call[0], call[1] 4138 } 4139 return s.rtcall(assertI2I, true, []*Type{n.Type}, target, iface)[0], nil 4140 } 4141 4142 if Debug_typeassert > 0 { 4143 Warnl(n.Pos, "type assertion inlined") 4144 } 4145 4146 // Converting to a concrete type. 4147 direct := isdirectiface(n.Type) 4148 typ := s.ifaceType(n.Left.Type, iface) // actual concrete type of input interface 4149 4150 if Debug_typeassert > 0 { 4151 Warnl(n.Pos, "type assertion inlined") 4152 } 4153 4154 var tmp *Node // temporary for use with large types 4155 var addr *ssa.Value // address of tmp 4156 if commaok && !canSSAType(n.Type) { 4157 // unSSAable type, use temporary. 4158 // TODO: get rid of some of these temporaries. 4159 tmp = temp(n.Type) 4160 addr, _ = s.addr(tmp, false) 4161 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, tmp, s.mem()) 4162 } 4163 4164 // TODO: If we have a nonempty interface and its itab field is nil, 4165 // then this test is redundant and ifaceType should just branch directly to bFail. 4166 cond := s.newValue2(ssa.OpEqPtr, Types[TBOOL], typ, target) 4167 b := s.endBlock() 4168 b.Kind = ssa.BlockIf 4169 b.SetControl(cond) 4170 b.Likely = ssa.BranchLikely 4171 4172 bOk := s.f.NewBlock(ssa.BlockPlain) 4173 bFail := s.f.NewBlock(ssa.BlockPlain) 4174 b.AddEdgeTo(bOk) 4175 b.AddEdgeTo(bFail) 4176 4177 if !commaok { 4178 // on failure, panic by calling panicdottype 4179 s.startBlock(bFail) 4180 taddr := s.newValue1A(ssa.OpAddr, byteptr, &ssa.ExternSymbol{Typ: byteptr, Sym: typenamesym(n.Left.Type)}, s.sb) 4181 s.rtcall(panicdottype, false, nil, typ, target, taddr) 4182 4183 // on success, return data from interface 4184 s.startBlock(bOk) 4185 if direct { 4186 return s.newValue1(ssa.OpIData, n.Type, iface), nil 4187 } 4188 p := s.newValue1(ssa.OpIData, ptrto(n.Type), iface) 4189 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()), nil 4190 } 4191 4192 // commaok is the more complicated case because we have 4193 // a control flow merge point. 4194 bEnd := s.f.NewBlock(ssa.BlockPlain) 4195 // Note that we need a new valVar each time (unlike okVar where we can 4196 // reuse the variable) because it might have a different type every time. 4197 valVar := &Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "val"}} 4198 4199 // type assertion succeeded 4200 s.startBlock(bOk) 4201 if tmp == nil { 4202 if direct { 4203 s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface) 4204 } else { 4205 p := s.newValue1(ssa.OpIData, ptrto(n.Type), iface) 4206 s.vars[valVar] = s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 4207 } 4208 } else { 4209 p := s.newValue1(ssa.OpIData, ptrto(n.Type), iface) 4210 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, sizeAlignAuxInt(n.Type), addr, p, s.mem()) 4211 } 4212 s.vars[&okVar] = s.constBool(true) 4213 s.endBlock() 4214 bOk.AddEdgeTo(bEnd) 4215 4216 // type assertion failed 4217 s.startBlock(bFail) 4218 if tmp == nil { 4219 s.vars[valVar] = s.zeroVal(n.Type) 4220 } else { 4221 s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, sizeAlignAuxInt(n.Type), addr, s.mem()) 4222 } 4223 s.vars[&okVar] = s.constBool(false) 4224 s.endBlock() 4225 bFail.AddEdgeTo(bEnd) 4226 4227 // merge point 4228 s.startBlock(bEnd) 4229 if tmp == nil { 4230 res = s.variable(valVar, n.Type) 4231 delete(s.vars, valVar) 4232 } else { 4233 res = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 4234 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, tmp, s.mem()) 4235 } 4236 resok = s.variable(&okVar, Types[TBOOL]) 4237 delete(s.vars, &okVar) 4238 return res, resok 4239 } 4240 4241 // checkgoto checks that a goto from from to to does not 4242 // jump into a block or jump over variable declarations. 4243 // It is a copy of checkgoto in the pre-SSA backend, 4244 // modified only for line number handling. 4245 // TODO: document how this works and why it is designed the way it is. 4246 func (s *state) checkgoto(from *Node, to *Node) { 4247 if from.Sym == to.Sym { 4248 return 4249 } 4250 4251 nf := 0 4252 for fs := from.Sym; fs != nil; fs = fs.Link { 4253 nf++ 4254 } 4255 nt := 0 4256 for fs := to.Sym; fs != nil; fs = fs.Link { 4257 nt++ 4258 } 4259 fs := from.Sym 4260 for ; nf > nt; nf-- { 4261 fs = fs.Link 4262 } 4263 if fs != to.Sym { 4264 // decide what to complain about. 4265 // prefer to complain about 'into block' over declarations, 4266 // so scan backward to find most recent block or else dcl. 4267 var block *Sym 4268 4269 var dcl *Sym 4270 ts := to.Sym 4271 for ; nt > nf; nt-- { 4272 if ts.Pkg == nil { 4273 block = ts 4274 } else { 4275 dcl = ts 4276 } 4277 ts = ts.Link 4278 } 4279 4280 for ts != fs { 4281 if ts.Pkg == nil { 4282 block = ts 4283 } else { 4284 dcl = ts 4285 } 4286 ts = ts.Link 4287 fs = fs.Link 4288 } 4289 4290 lno := from.Left.Pos 4291 if block != nil { 4292 yyerrorl(lno, "goto %v jumps into block starting at %v", from.Left.Sym, linestr(block.Lastlineno)) 4293 } else { 4294 yyerrorl(lno, "goto %v jumps over declaration of %v at %v", from.Left.Sym, dcl, linestr(dcl.Lastlineno)) 4295 } 4296 } 4297 } 4298 4299 // variable returns the value of a variable at the current location. 4300 func (s *state) variable(name *Node, t ssa.Type) *ssa.Value { 4301 v := s.vars[name] 4302 if v != nil { 4303 return v 4304 } 4305 v = s.fwdVars[name] 4306 if v != nil { 4307 return v 4308 } 4309 4310 if s.curBlock == s.f.Entry { 4311 // No variable should be live at entry. 4312 s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, name, v) 4313 } 4314 // Make a FwdRef, which records a value that's live on block input. 4315 // We'll find the matching definition as part of insertPhis. 4316 v = s.newValue0A(ssa.OpFwdRef, t, name) 4317 s.fwdVars[name] = v 4318 s.addNamedValue(name, v) 4319 return v 4320 } 4321 4322 func (s *state) mem() *ssa.Value { 4323 return s.variable(&memVar, ssa.TypeMem) 4324 } 4325 4326 func (s *state) addNamedValue(n *Node, v *ssa.Value) { 4327 if n.Class == Pxxx { 4328 // Don't track our dummy nodes (&memVar etc.). 4329 return 4330 } 4331 if n.IsAutoTmp() { 4332 // Don't track temporary variables. 4333 return 4334 } 4335 if n.Class == PPARAMOUT { 4336 // Don't track named output values. This prevents return values 4337 // from being assigned too early. See #14591 and #14762. TODO: allow this. 4338 return 4339 } 4340 if n.Class == PAUTO && n.Xoffset != 0 { 4341 s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset) 4342 } 4343 loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0} 4344 values, ok := s.f.NamedValues[loc] 4345 if !ok { 4346 s.f.Names = append(s.f.Names, loc) 4347 } 4348 s.f.NamedValues[loc] = append(values, v) 4349 } 4350 4351 // Branch is an unresolved branch. 4352 type Branch struct { 4353 P *obj.Prog // branch instruction 4354 B *ssa.Block // target 4355 } 4356 4357 // SSAGenState contains state needed during Prog generation. 4358 type SSAGenState struct { 4359 // Branches remembers all the branch instructions we've seen 4360 // and where they would like to go. 4361 Branches []Branch 4362 4363 // bstart remembers where each block starts (indexed by block ID) 4364 bstart []*obj.Prog 4365 4366 // 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?) 4367 SSEto387 map[int16]int16 4368 // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8. 4369 ScratchFpMem *Node 4370 } 4371 4372 // Pc returns the current Prog. 4373 func (s *SSAGenState) Pc() *obj.Prog { 4374 return pc 4375 } 4376 4377 // SetPos sets the current source position. 4378 func (s *SSAGenState) SetPos(pos src.XPos) { 4379 lineno = pos 4380 } 4381 4382 // genssa appends entries to ptxt for each instruction in f. 4383 // gcargs and gclocals are filled in with pointer maps for the frame. 4384 func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) { 4385 var s SSAGenState 4386 4387 e := f.Config.Frontend().(*ssaExport) 4388 4389 // Remember where each block starts. 4390 s.bstart = make([]*obj.Prog, f.NumBlocks()) 4391 4392 var valueProgs map[*obj.Prog]*ssa.Value 4393 var blockProgs map[*obj.Prog]*ssa.Block 4394 var logProgs = e.log 4395 if logProgs { 4396 valueProgs = make(map[*obj.Prog]*ssa.Value, f.NumValues()) 4397 blockProgs = make(map[*obj.Prog]*ssa.Block, f.NumBlocks()) 4398 f.Logf("genssa %s\n", f.Name) 4399 blockProgs[pc] = f.Blocks[0] 4400 } 4401 4402 if Thearch.Use387 { 4403 s.SSEto387 = map[int16]int16{} 4404 } 4405 4406 s.ScratchFpMem = scratchFpMem 4407 scratchFpMem = nil 4408 4409 // Emit basic blocks 4410 for i, b := range f.Blocks { 4411 s.bstart[b.ID] = pc 4412 // Emit values in block 4413 Thearch.SSAMarkMoves(&s, b) 4414 for _, v := range b.Values { 4415 x := pc 4416 Thearch.SSAGenValue(&s, v) 4417 if logProgs { 4418 for ; x != pc; x = x.Link { 4419 valueProgs[x] = v 4420 } 4421 } 4422 } 4423 // Emit control flow instructions for block 4424 var next *ssa.Block 4425 if i < len(f.Blocks)-1 && Debug['N'] == 0 { 4426 // If -N, leave next==nil so every block with successors 4427 // ends in a JMP (except call blocks - plive doesn't like 4428 // select{send,recv} followed by a JMP call). Helps keep 4429 // line numbers for otherwise empty blocks. 4430 next = f.Blocks[i+1] 4431 } 4432 x := pc 4433 Thearch.SSAGenBlock(&s, b, next) 4434 if logProgs { 4435 for ; x != pc; x = x.Link { 4436 blockProgs[x] = b 4437 } 4438 } 4439 } 4440 4441 // Resolve branches 4442 for _, br := range s.Branches { 4443 br.P.To.Val = s.bstart[br.B.ID] 4444 } 4445 4446 if logProgs { 4447 for p := ptxt; p != nil; p = p.Link { 4448 var s string 4449 if v, ok := valueProgs[p]; ok { 4450 s = v.String() 4451 } else if b, ok := blockProgs[p]; ok { 4452 s = b.String() 4453 } else { 4454 s = " " // most value and branch strings are 2-3 characters long 4455 } 4456 f.Logf("%s\t%s\n", s, p) 4457 } 4458 if f.Config.HTML != nil { 4459 // LineHist is defunct now - this code won't do 4460 // anything. 4461 // TODO: fix this (ideally without a global variable) 4462 // saved := ptxt.Ctxt.LineHist.PrintFilenameOnly 4463 // ptxt.Ctxt.LineHist.PrintFilenameOnly = true 4464 var buf bytes.Buffer 4465 buf.WriteString("<code>") 4466 buf.WriteString("<dl class=\"ssa-gen\">") 4467 for p := ptxt; p != nil; p = p.Link { 4468 buf.WriteString("<dt class=\"ssa-prog-src\">") 4469 if v, ok := valueProgs[p]; ok { 4470 buf.WriteString(v.HTML()) 4471 } else if b, ok := blockProgs[p]; ok { 4472 buf.WriteString(b.HTML()) 4473 } 4474 buf.WriteString("</dt>") 4475 buf.WriteString("<dd class=\"ssa-prog\">") 4476 buf.WriteString(html.EscapeString(p.String())) 4477 buf.WriteString("</dd>") 4478 buf.WriteString("</li>") 4479 } 4480 buf.WriteString("</dl>") 4481 buf.WriteString("</code>") 4482 f.Config.HTML.WriteColumn("genssa", buf.String()) 4483 // ptxt.Ctxt.LineHist.PrintFilenameOnly = saved 4484 } 4485 } 4486 4487 // Generate gc bitmaps. 4488 liveness(Curfn, ptxt, gcargs, gclocals) 4489 4490 // Add frame prologue. Zero ambiguously live variables. 4491 Thearch.Defframe(ptxt) 4492 if Debug['f'] != 0 { 4493 frame(0) 4494 } 4495 4496 // Remove leftover instrumentation from the instruction stream. 4497 removevardef(ptxt) 4498 4499 f.Config.HTML.Close() 4500 f.Config.HTML = nil 4501 } 4502 4503 type FloatingEQNEJump struct { 4504 Jump obj.As 4505 Index int 4506 } 4507 4508 func oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPrediction, branches []Branch) []Branch { 4509 p := Prog(jumps.Jump) 4510 p.To.Type = obj.TYPE_BRANCH 4511 to := jumps.Index 4512 branches = append(branches, Branch{p, b.Succs[to].Block()}) 4513 if to == 1 { 4514 likely = -likely 4515 } 4516 // liblink reorders the instruction stream as it sees fit. 4517 // Pass along what we know so liblink can make use of it. 4518 // TODO: Once we've fully switched to SSA, 4519 // make liblink leave our output alone. 4520 switch likely { 4521 case ssa.BranchUnlikely: 4522 p.From.Type = obj.TYPE_CONST 4523 p.From.Offset = 0 4524 case ssa.BranchLikely: 4525 p.From.Type = obj.TYPE_CONST 4526 p.From.Offset = 1 4527 } 4528 return branches 4529 } 4530 4531 func SSAGenFPJump(s *SSAGenState, b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) { 4532 likely := b.Likely 4533 switch next { 4534 case b.Succs[0].Block(): 4535 s.Branches = oneFPJump(b, &jumps[0][0], likely, s.Branches) 4536 s.Branches = oneFPJump(b, &jumps[0][1], likely, s.Branches) 4537 case b.Succs[1].Block(): 4538 s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches) 4539 s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches) 4540 default: 4541 s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches) 4542 s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches) 4543 q := Prog(obj.AJMP) 4544 q.To.Type = obj.TYPE_BRANCH 4545 s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()}) 4546 } 4547 } 4548 4549 func AuxOffset(v *ssa.Value) (offset int64) { 4550 if v.Aux == nil { 4551 return 0 4552 } 4553 switch sym := v.Aux.(type) { 4554 4555 case *ssa.AutoSymbol: 4556 n := sym.Node.(*Node) 4557 return n.Xoffset 4558 } 4559 return 0 4560 } 4561 4562 // AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a. 4563 func AddAux(a *obj.Addr, v *ssa.Value) { 4564 AddAux2(a, v, v.AuxInt) 4565 } 4566 func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { 4567 if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR { 4568 v.Fatalf("bad AddAux addr %v", a) 4569 } 4570 // add integer offset 4571 a.Offset += offset 4572 4573 // If no additional symbol offset, we're done. 4574 if v.Aux == nil { 4575 return 4576 } 4577 // Add symbol's offset from its base register. 4578 switch sym := v.Aux.(type) { 4579 case *ssa.ExternSymbol: 4580 a.Name = obj.NAME_EXTERN 4581 switch s := sym.Sym.(type) { 4582 case *Sym: 4583 a.Sym = Linksym(s) 4584 case *obj.LSym: 4585 a.Sym = s 4586 default: 4587 v.Fatalf("ExternSymbol.Sym is %T", s) 4588 } 4589 case *ssa.ArgSymbol: 4590 n := sym.Node.(*Node) 4591 a.Name = obj.NAME_PARAM 4592 a.Node = n 4593 a.Sym = Linksym(n.Orig.Sym) 4594 a.Offset += n.Xoffset 4595 case *ssa.AutoSymbol: 4596 n := sym.Node.(*Node) 4597 a.Name = obj.NAME_AUTO 4598 a.Node = n 4599 a.Sym = Linksym(n.Sym) 4600 a.Offset += n.Xoffset 4601 default: 4602 v.Fatalf("aux in %s not implemented %#v", v, v.Aux) 4603 } 4604 } 4605 4606 // sizeAlignAuxInt returns an AuxInt encoding the size and alignment of type t. 4607 func sizeAlignAuxInt(t *Type) int64 { 4608 return ssa.MakeSizeAndAlign(t.Size(), t.Alignment()).Int64() 4609 } 4610 4611 // extendIndex extends v to a full int width. 4612 // panic using the given function if v does not fit in an int (only on 32-bit archs). 4613 func (s *state) extendIndex(v *ssa.Value, panicfn *Node) *ssa.Value { 4614 size := v.Type.Size() 4615 if size == s.config.IntSize { 4616 return v 4617 } 4618 if size > s.config.IntSize { 4619 // truncate 64-bit indexes on 32-bit pointer archs. Test the 4620 // high word and branch to out-of-bounds failure if it is not 0. 4621 if Debug['B'] == 0 { 4622 hi := s.newValue1(ssa.OpInt64Hi, Types[TUINT32], v) 4623 cmp := s.newValue2(ssa.OpEq32, Types[TBOOL], hi, s.constInt32(Types[TUINT32], 0)) 4624 s.check(cmp, panicfn) 4625 } 4626 return s.newValue1(ssa.OpTrunc64to32, Types[TINT], v) 4627 } 4628 4629 // Extend value to the required size 4630 var op ssa.Op 4631 if v.Type.IsSigned() { 4632 switch 10*size + s.config.IntSize { 4633 case 14: 4634 op = ssa.OpSignExt8to32 4635 case 18: 4636 op = ssa.OpSignExt8to64 4637 case 24: 4638 op = ssa.OpSignExt16to32 4639 case 28: 4640 op = ssa.OpSignExt16to64 4641 case 48: 4642 op = ssa.OpSignExt32to64 4643 default: 4644 s.Fatalf("bad signed index extension %s", v.Type) 4645 } 4646 } else { 4647 switch 10*size + s.config.IntSize { 4648 case 14: 4649 op = ssa.OpZeroExt8to32 4650 case 18: 4651 op = ssa.OpZeroExt8to64 4652 case 24: 4653 op = ssa.OpZeroExt16to32 4654 case 28: 4655 op = ssa.OpZeroExt16to64 4656 case 48: 4657 op = ssa.OpZeroExt32to64 4658 default: 4659 s.Fatalf("bad unsigned index extension %s", v.Type) 4660 } 4661 } 4662 return s.newValue1(op, Types[TINT], v) 4663 } 4664 4665 // CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values. 4666 // Called during ssaGenValue. 4667 func CheckLoweredPhi(v *ssa.Value) { 4668 if v.Op != ssa.OpPhi { 4669 v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString()) 4670 } 4671 if v.Type.IsMemory() { 4672 return 4673 } 4674 f := v.Block.Func 4675 loc := f.RegAlloc[v.ID] 4676 for _, a := range v.Args { 4677 if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead? 4678 v.Fatalf("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n%s\n", v, loc, a, aloc, v.Block.Func) 4679 } 4680 } 4681 } 4682 4683 // CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block. 4684 // The output of LoweredGetClosurePtr is generally hardwired to the correct register. 4685 // That register contains the closure pointer on closure entry. 4686 func CheckLoweredGetClosurePtr(v *ssa.Value) { 4687 entry := v.Block.Func.Entry 4688 if entry != v.Block || entry.Values[0] != v { 4689 Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v) 4690 } 4691 } 4692 4693 // KeepAlive marks the variable referenced by OpKeepAlive as live. 4694 // Called during ssaGenValue. 4695 func KeepAlive(v *ssa.Value) { 4696 if v.Op != ssa.OpKeepAlive { 4697 v.Fatalf("KeepAlive called with non-KeepAlive value: %v", v.LongString()) 4698 } 4699 if !v.Args[0].Type.IsPtrShaped() { 4700 v.Fatalf("keeping non-pointer alive %v", v.Args[0]) 4701 } 4702 n, _ := AutoVar(v.Args[0]) 4703 if n == nil { 4704 v.Fatalf("KeepAlive with non-spilled value %s %s", v, v.Args[0]) 4705 } 4706 // Note: KeepAlive arg may be a small part of a larger variable n. We keep the 4707 // whole variable n alive at this point. (Typically, this happens when 4708 // we are requested to keep the idata portion of an interface{} alive, and 4709 // we end up keeping the whole interface{} alive. That's ok.) 4710 Gvarlive(n) 4711 } 4712 4713 // AutoVar returns a *Node and int64 representing the auto variable and offset within it 4714 // where v should be spilled. 4715 func AutoVar(v *ssa.Value) (*Node, int64) { 4716 loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot) 4717 if v.Type.Size() > loc.Type.Size() { 4718 v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) 4719 } 4720 return loc.N.(*Node), loc.Off 4721 } 4722 4723 func AddrAuto(a *obj.Addr, v *ssa.Value) { 4724 n, off := AutoVar(v) 4725 a.Type = obj.TYPE_MEM 4726 a.Node = n 4727 a.Sym = Linksym(n.Sym) 4728 a.Offset = n.Xoffset + off 4729 if n.Class == PPARAM || n.Class == PPARAMOUT { 4730 a.Name = obj.NAME_PARAM 4731 } else { 4732 a.Name = obj.NAME_AUTO 4733 } 4734 } 4735 4736 func (s *SSAGenState) AddrScratch(a *obj.Addr) { 4737 if s.ScratchFpMem == nil { 4738 panic("no scratch memory available; forgot to declare usesScratch for Op?") 4739 } 4740 a.Type = obj.TYPE_MEM 4741 a.Name = obj.NAME_AUTO 4742 a.Node = s.ScratchFpMem 4743 a.Sym = Linksym(s.ScratchFpMem.Sym) 4744 a.Reg = int16(Thearch.REGSP) 4745 a.Offset = s.ScratchFpMem.Xoffset 4746 } 4747 4748 // fieldIdx finds the index of the field referred to by the ODOT node n. 4749 func fieldIdx(n *Node) int { 4750 t := n.Left.Type 4751 f := n.Sym 4752 if !t.IsStruct() { 4753 panic("ODOT's LHS is not a struct") 4754 } 4755 4756 var i int 4757 for _, t1 := range t.Fields().Slice() { 4758 if t1.Sym != f { 4759 i++ 4760 continue 4761 } 4762 if t1.Offset != n.Xoffset { 4763 panic("field offset doesn't match") 4764 } 4765 return i 4766 } 4767 panic(fmt.Sprintf("can't find field in expr %v\n", n)) 4768 4769 // TODO: keep the result of this function somewhere in the ODOT Node 4770 // so we don't have to recompute it each time we need it. 4771 } 4772 4773 // ssaExport exports a bunch of compiler services for the ssa backend. 4774 type ssaExport struct { 4775 log bool 4776 } 4777 4778 func (s *ssaExport) TypeBool() ssa.Type { return Types[TBOOL] } 4779 func (s *ssaExport) TypeInt8() ssa.Type { return Types[TINT8] } 4780 func (s *ssaExport) TypeInt16() ssa.Type { return Types[TINT16] } 4781 func (s *ssaExport) TypeInt32() ssa.Type { return Types[TINT32] } 4782 func (s *ssaExport) TypeInt64() ssa.Type { return Types[TINT64] } 4783 func (s *ssaExport) TypeUInt8() ssa.Type { return Types[TUINT8] } 4784 func (s *ssaExport) TypeUInt16() ssa.Type { return Types[TUINT16] } 4785 func (s *ssaExport) TypeUInt32() ssa.Type { return Types[TUINT32] } 4786 func (s *ssaExport) TypeUInt64() ssa.Type { return Types[TUINT64] } 4787 func (s *ssaExport) TypeFloat32() ssa.Type { return Types[TFLOAT32] } 4788 func (s *ssaExport) TypeFloat64() ssa.Type { return Types[TFLOAT64] } 4789 func (s *ssaExport) TypeInt() ssa.Type { return Types[TINT] } 4790 func (s *ssaExport) TypeUintptr() ssa.Type { return Types[TUINTPTR] } 4791 func (s *ssaExport) TypeString() ssa.Type { return Types[TSTRING] } 4792 func (s *ssaExport) TypeBytePtr() ssa.Type { return ptrto(Types[TUINT8]) } 4793 4794 // StringData returns a symbol (a *Sym wrapped in an interface) which 4795 // is the data component of a global string constant containing s. 4796 func (*ssaExport) StringData(s string) interface{} { 4797 // TODO: is idealstring correct? It might not matter... 4798 data := stringsym(s) 4799 return &ssa.ExternSymbol{Typ: idealstring, Sym: data} 4800 } 4801 4802 func (e *ssaExport) Auto(t ssa.Type) ssa.GCNode { 4803 n := temp(t.(*Type)) // Note: adds new auto to Curfn.Func.Dcl list 4804 return n 4805 } 4806 4807 func (e *ssaExport) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4808 n := name.N.(*Node) 4809 ptrType := ptrto(Types[TUINT8]) 4810 lenType := Types[TINT] 4811 if n.Class == PAUTO && !n.Addrtaken { 4812 // Split this string up into two separate variables. 4813 p := e.namedAuto(n.Sym.Name+".ptr", ptrType) 4814 l := e.namedAuto(n.Sym.Name+".len", lenType) 4815 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0} 4816 } 4817 // Return the two parts of the larger variable. 4818 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)} 4819 } 4820 4821 func (e *ssaExport) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4822 n := name.N.(*Node) 4823 t := ptrto(Types[TUINT8]) 4824 if n.Class == PAUTO && !n.Addrtaken { 4825 // Split this interface up into two separate variables. 4826 f := ".itab" 4827 if n.Type.IsEmptyInterface() { 4828 f = ".type" 4829 } 4830 c := e.namedAuto(n.Sym.Name+f, t) 4831 d := e.namedAuto(n.Sym.Name+".data", t) 4832 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0} 4833 } 4834 // Return the two parts of the larger variable. 4835 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)} 4836 } 4837 4838 func (e *ssaExport) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) { 4839 n := name.N.(*Node) 4840 ptrType := ptrto(name.Type.ElemType().(*Type)) 4841 lenType := Types[TINT] 4842 if n.Class == PAUTO && !n.Addrtaken { 4843 // Split this slice up into three separate variables. 4844 p := e.namedAuto(n.Sym.Name+".ptr", ptrType) 4845 l := e.namedAuto(n.Sym.Name+".len", lenType) 4846 c := e.namedAuto(n.Sym.Name+".cap", lenType) 4847 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}, ssa.LocalSlot{N: c, Type: lenType, Off: 0} 4848 } 4849 // Return the three parts of the larger variable. 4850 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, 4851 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}, 4852 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)} 4853 } 4854 4855 func (e *ssaExport) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4856 n := name.N.(*Node) 4857 s := name.Type.Size() / 2 4858 var t *Type 4859 if s == 8 { 4860 t = Types[TFLOAT64] 4861 } else { 4862 t = Types[TFLOAT32] 4863 } 4864 if n.Class == PAUTO && !n.Addrtaken { 4865 // Split this complex up into two separate variables. 4866 c := e.namedAuto(n.Sym.Name+".real", t) 4867 d := e.namedAuto(n.Sym.Name+".imag", t) 4868 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0} 4869 } 4870 // Return the two parts of the larger variable. 4871 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s} 4872 } 4873 4874 func (e *ssaExport) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4875 n := name.N.(*Node) 4876 var t *Type 4877 if name.Type.IsSigned() { 4878 t = Types[TINT32] 4879 } else { 4880 t = Types[TUINT32] 4881 } 4882 if n.Class == PAUTO && !n.Addrtaken { 4883 // Split this int64 up into two separate variables. 4884 h := e.namedAuto(n.Sym.Name+".hi", t) 4885 l := e.namedAuto(n.Sym.Name+".lo", Types[TUINT32]) 4886 return ssa.LocalSlot{N: h, Type: t, Off: 0}, ssa.LocalSlot{N: l, Type: Types[TUINT32], Off: 0} 4887 } 4888 // Return the two parts of the larger variable. 4889 if Thearch.LinkArch.ByteOrder == binary.BigEndian { 4890 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: Types[TUINT32], Off: name.Off + 4} 4891 } 4892 return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: Types[TUINT32], Off: name.Off} 4893 } 4894 4895 func (e *ssaExport) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot { 4896 n := name.N.(*Node) 4897 st := name.Type 4898 ft := st.FieldType(i) 4899 if n.Class == PAUTO && !n.Addrtaken { 4900 // Note: the _ field may appear several times. But 4901 // have no fear, identically-named but distinct Autos are 4902 // ok, albeit maybe confusing for a debugger. 4903 x := e.namedAuto(n.Sym.Name+"."+st.FieldName(i), ft) 4904 return ssa.LocalSlot{N: x, Type: ft, Off: 0} 4905 } 4906 return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)} 4907 } 4908 4909 func (e *ssaExport) SplitArray(name ssa.LocalSlot) ssa.LocalSlot { 4910 n := name.N.(*Node) 4911 at := name.Type 4912 if at.NumElem() != 1 { 4913 Fatalf("bad array size") 4914 } 4915 et := at.ElemType() 4916 if n.Class == PAUTO && !n.Addrtaken { 4917 x := e.namedAuto(n.Sym.Name+"[0]", et) 4918 return ssa.LocalSlot{N: x, Type: et, Off: 0} 4919 } 4920 return ssa.LocalSlot{N: n, Type: et, Off: name.Off} 4921 } 4922 4923 // namedAuto returns a new AUTO variable with the given name and type. 4924 // These are exposed to the debugger. 4925 func (e *ssaExport) namedAuto(name string, typ ssa.Type) ssa.GCNode { 4926 t := typ.(*Type) 4927 s := &Sym{Name: name, Pkg: localpkg} 4928 n := nod(ONAME, nil, nil) 4929 s.Def = n 4930 s.Def.Used = true 4931 n.Sym = s 4932 n.Type = t 4933 n.Class = PAUTO 4934 n.Addable = true 4935 n.Ullman = 1 4936 n.Esc = EscNever 4937 n.Xoffset = 0 4938 n.Name.Curfn = Curfn 4939 Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) 4940 4941 dowidth(t) 4942 return n 4943 } 4944 4945 func (e *ssaExport) CanSSA(t ssa.Type) bool { 4946 return canSSAType(t.(*Type)) 4947 } 4948 4949 func (e *ssaExport) Line(pos src.XPos) string { 4950 return linestr(pos) 4951 } 4952 4953 // Log logs a message from the compiler. 4954 func (e *ssaExport) Logf(msg string, args ...interface{}) { 4955 if e.log { 4956 fmt.Printf(msg, args...) 4957 } 4958 } 4959 4960 func (e *ssaExport) Log() bool { 4961 return e.log 4962 } 4963 4964 // Fatal reports a compiler error and exits. 4965 func (e *ssaExport) Fatalf(pos src.XPos, msg string, args ...interface{}) { 4966 lineno = pos 4967 Fatalf(msg, args...) 4968 } 4969 4970 // Warnl reports a "warning", which is usually flag-triggered 4971 // logging output for the benefit of tests. 4972 func (e *ssaExport) Warnl(pos src.XPos, fmt_ string, args ...interface{}) { 4973 Warnl(pos, fmt_, args...) 4974 } 4975 4976 func (e *ssaExport) Debug_checknil() bool { 4977 return Debug_checknil != 0 4978 } 4979 4980 func (e *ssaExport) Debug_wb() bool { 4981 return Debug_wb != 0 4982 } 4983 4984 func (e *ssaExport) Syslook(name string) interface{} { 4985 return syslook(name).Sym 4986 } 4987 4988 func (n *Node) Typ() ssa.Type { 4989 return n.Type 4990 }