github.com/rakyll/go@v0.0.0-20170216000551-64c02460d703/src/cmd/compile/internal/gc/ssa.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "bytes" 9 "encoding/binary" 10 "fmt" 11 "html" 12 "os" 13 "sort" 14 15 "cmd/compile/internal/ssa" 16 "cmd/internal/obj" 17 "cmd/internal/src" 18 "cmd/internal/sys" 19 ) 20 21 var ssaConfig *ssa.Config 22 var ssaExp ssaExport 23 24 func initssa() *ssa.Config { 25 if ssaConfig == nil { 26 ssaConfig = ssa.NewConfig(Thearch.LinkArch.Name, &ssaExp, Ctxt, Debug['N'] == 0) 27 if Thearch.LinkArch.Name == "386" { 28 ssaConfig.Set387(Thearch.Use387) 29 } 30 } 31 ssaConfig.HTML = nil 32 return ssaConfig 33 } 34 35 // buildssa builds an SSA function. 36 func buildssa(fn *Node) *ssa.Func { 37 name := fn.Func.Nname.Sym.Name 38 printssa := name == os.Getenv("GOSSAFUNC") 39 if printssa { 40 fmt.Println("generating SSA for", name) 41 dumplist("buildssa-enter", fn.Func.Enter) 42 dumplist("buildssa-body", fn.Nbody) 43 dumplist("buildssa-exit", fn.Func.Exit) 44 } 45 46 var s state 47 s.pushLine(fn.Pos) 48 defer s.popLine() 49 50 if fn.Func.Pragma&CgoUnsafeArgs != 0 { 51 s.cgoUnsafeArgs = true 52 } 53 if fn.Func.Pragma&Nowritebarrier != 0 { 54 s.noWB = true 55 } 56 defer func() { 57 if s.WBPos.IsKnown() { 58 fn.Func.WBPos = s.WBPos 59 } 60 }() 61 // TODO(khr): build config just once at the start of the compiler binary 62 63 ssaExp.log = printssa 64 65 s.config = initssa() 66 s.f = s.config.NewFunc() 67 s.f.Name = name 68 if fn.Func.Pragma&Nosplit != 0 { 69 s.f.NoSplit = true 70 } 71 s.exitCode = fn.Func.Exit 72 s.panics = map[funcLine]*ssa.Block{} 73 s.config.DebugTest = s.config.DebugHashMatch("GOSSAHASH", name) 74 75 if name == os.Getenv("GOSSAFUNC") { 76 // TODO: tempfile? it is handy to have the location 77 // of this file be stable, so you can just reload in the browser. 78 s.config.HTML = ssa.NewHTMLWriter("ssa.html", s.config, name) 79 // TODO: generate and print a mapping from nodes to values and blocks 80 } 81 82 // Allocate starting block 83 s.f.Entry = s.f.NewBlock(ssa.BlockPlain) 84 85 // Allocate starting values 86 s.labels = map[string]*ssaLabel{} 87 s.labeledNodes = map[*Node]*ssaLabel{} 88 s.fwdVars = map[*Node]*ssa.Value{} 89 s.startmem = s.entryNewValue0(ssa.OpInitMem, ssa.TypeMem) 90 s.sp = s.entryNewValue0(ssa.OpSP, Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead 91 s.sb = s.entryNewValue0(ssa.OpSB, Types[TUINTPTR]) 92 93 s.startBlock(s.f.Entry) 94 s.vars[&memVar] = s.startmem 95 96 s.varsyms = map[*Node]interface{}{} 97 98 // Generate addresses of local declarations 99 s.decladdrs = map[*Node]*ssa.Value{} 100 for _, n := range fn.Func.Dcl { 101 switch n.Class { 102 case PPARAM, PPARAMOUT: 103 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n}) 104 s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, ptrto(n.Type), aux, s.sp) 105 if n.Class == PPARAMOUT && s.canSSA(n) { 106 // Save ssa-able PPARAMOUT variables so we can 107 // store them back to the stack at the end of 108 // the function. 109 s.returns = append(s.returns, n) 110 } 111 case PAUTO: 112 // processed at each use, to prevent Addr coming 113 // before the decl. 114 case PAUTOHEAP: 115 // moved to heap - already handled by frontend 116 case PFUNC: 117 // local function - already handled by frontend 118 default: 119 s.Fatalf("local variable with class %s unimplemented", classnames[n.Class]) 120 } 121 } 122 123 // Populate SSAable arguments. 124 for _, n := range fn.Func.Dcl { 125 if n.Class == PPARAM && s.canSSA(n) { 126 s.vars[n] = s.newValue0A(ssa.OpArg, n.Type, n) 127 } 128 } 129 130 // Convert the AST-based IR to the SSA-based IR 131 s.stmtList(fn.Func.Enter) 132 s.stmtList(fn.Nbody) 133 134 // fallthrough to exit 135 if s.curBlock != nil { 136 s.pushLine(fn.Func.Endlineno) 137 s.exit() 138 s.popLine() 139 } 140 141 // Check that we used all labels 142 for name, lab := range s.labels { 143 if !lab.used() && !lab.reported && !lab.defNode.Used { 144 yyerrorl(lab.defNode.Pos, "label %v defined and not used", name) 145 lab.reported = true 146 } 147 if lab.used() && !lab.defined() && !lab.reported { 148 yyerrorl(lab.useNode.Pos, "label %v not defined", name) 149 lab.reported = true 150 } 151 } 152 153 // Check any forward gotos. Non-forward gotos have already been checked. 154 for _, n := range s.fwdGotos { 155 lab := s.labels[n.Left.Sym.Name] 156 // If the label is undefined, we have already have printed an error. 157 if lab.defined() { 158 s.checkgoto(n, lab.defNode) 159 } 160 } 161 162 if nerrors > 0 { 163 s.f.Free() 164 return nil 165 } 166 167 s.insertPhis() 168 169 // Don't carry reference this around longer than necessary 170 s.exitCode = Nodes{} 171 172 // Main call to ssa package to compile function 173 ssa.Compile(s.f) 174 175 return s.f 176 } 177 178 type state struct { 179 // configuration (arch) information 180 config *ssa.Config 181 182 // function we're building 183 f *ssa.Func 184 185 // labels and labeled control flow nodes (OFOR, OSWITCH, OSELECT) in f 186 labels map[string]*ssaLabel 187 labeledNodes map[*Node]*ssaLabel 188 189 // gotos that jump forward; required for deferred checkgoto calls 190 fwdGotos []*Node 191 // Code that must precede any return 192 // (e.g., copying value of heap-escaped paramout back to true paramout) 193 exitCode Nodes 194 195 // unlabeled break and continue statement tracking 196 breakTo *ssa.Block // current target for plain break statement 197 continueTo *ssa.Block // current target for plain continue statement 198 199 // current location where we're interpreting the AST 200 curBlock *ssa.Block 201 202 // variable assignments in the current block (map from variable symbol to ssa value) 203 // *Node is the unique identifier (an ONAME Node) for the variable. 204 // TODO: keep a single varnum map, then make all of these maps slices instead? 205 vars map[*Node]*ssa.Value 206 207 // fwdVars are variables that are used before they are defined in the current block. 208 // This map exists just to coalesce multiple references into a single FwdRef op. 209 // *Node is the unique identifier (an ONAME Node) for the variable. 210 fwdVars map[*Node]*ssa.Value 211 212 // all defined variables at the end of each block. Indexed by block ID. 213 defvars []map[*Node]*ssa.Value 214 215 // addresses of PPARAM and PPARAMOUT variables. 216 decladdrs map[*Node]*ssa.Value 217 218 // symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused. 219 varsyms map[*Node]interface{} 220 221 // starting values. Memory, stack pointer, and globals pointer 222 startmem *ssa.Value 223 sp *ssa.Value 224 sb *ssa.Value 225 226 // line number stack. The current line number is top of stack 227 line []src.XPos 228 229 // list of panic calls by function name and line number. 230 // Used to deduplicate panic calls. 231 panics map[funcLine]*ssa.Block 232 233 // list of PPARAMOUT (return) variables. 234 returns []*Node 235 236 // A dummy value used during phi construction. 237 placeholder *ssa.Value 238 239 cgoUnsafeArgs bool 240 noWB bool 241 WBPos src.XPos // line number of first write barrier. 0=no write barriers 242 } 243 244 type funcLine struct { 245 f *obj.LSym 246 line src.XPos 247 } 248 249 type ssaLabel struct { 250 target *ssa.Block // block identified by this label 251 breakTarget *ssa.Block // block to break to in control flow node identified by this label 252 continueTarget *ssa.Block // block to continue to in control flow node identified by this label 253 defNode *Node // label definition Node (OLABEL) 254 // Label use Node (OGOTO, OBREAK, OCONTINUE). 255 // Used only for error detection and reporting. 256 // There might be multiple uses, but we only need to track one. 257 useNode *Node 258 reported bool // reported indicates whether an error has already been reported for this label 259 } 260 261 // defined reports whether the label has a definition (OLABEL node). 262 func (l *ssaLabel) defined() bool { return l.defNode != nil } 263 264 // used reports whether the label has a use (OGOTO, OBREAK, or OCONTINUE node). 265 func (l *ssaLabel) used() bool { return l.useNode != nil } 266 267 // label returns the label associated with sym, creating it if necessary. 268 func (s *state) label(sym *Sym) *ssaLabel { 269 lab := s.labels[sym.Name] 270 if lab == nil { 271 lab = new(ssaLabel) 272 s.labels[sym.Name] = lab 273 } 274 return lab 275 } 276 277 func (s *state) Logf(msg string, args ...interface{}) { s.config.Logf(msg, args...) } 278 func (s *state) Log() bool { return s.config.Log() } 279 func (s *state) Fatalf(msg string, args ...interface{}) { s.config.Fatalf(s.peekPos(), msg, args...) } 280 func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { 281 s.config.Warnl(pos, msg, args...) 282 } 283 func (s *state) Debug_checknil() bool { return s.config.Debug_checknil() } 284 285 var ( 286 // dummy node for the memory variable 287 memVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "mem"}} 288 289 // dummy nodes for temporary variables 290 ptrVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ptr"}} 291 lenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "len"}} 292 newlenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "newlen"}} 293 capVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "cap"}} 294 typVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "typ"}} 295 okVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ok"}} 296 ) 297 298 // startBlock sets the current block we're generating code in to b. 299 func (s *state) startBlock(b *ssa.Block) { 300 if s.curBlock != nil { 301 s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock) 302 } 303 s.curBlock = b 304 s.vars = map[*Node]*ssa.Value{} 305 for n := range s.fwdVars { 306 delete(s.fwdVars, n) 307 } 308 } 309 310 // endBlock marks the end of generating code for the current block. 311 // Returns the (former) current block. Returns nil if there is no current 312 // block, i.e. if no code flows to the current execution point. 313 func (s *state) endBlock() *ssa.Block { 314 b := s.curBlock 315 if b == nil { 316 return nil 317 } 318 for len(s.defvars) <= int(b.ID) { 319 s.defvars = append(s.defvars, nil) 320 } 321 s.defvars[b.ID] = s.vars 322 s.curBlock = nil 323 s.vars = nil 324 b.Pos = s.peekPos() 325 return b 326 } 327 328 // pushLine pushes a line number on the line number stack. 329 func (s *state) pushLine(line src.XPos) { 330 if !line.IsKnown() { 331 // the frontend may emit node with line number missing, 332 // use the parent line number in this case. 333 line = s.peekPos() 334 if Debug['K'] != 0 { 335 Warn("buildssa: unknown position (line 0)") 336 } 337 } 338 s.line = append(s.line, line) 339 } 340 341 // popLine pops the top of the line number stack. 342 func (s *state) popLine() { 343 s.line = s.line[:len(s.line)-1] 344 } 345 346 // peekPos peeks the top of the line number stack. 347 func (s *state) peekPos() src.XPos { 348 return s.line[len(s.line)-1] 349 } 350 351 func (s *state) Error(msg string, args ...interface{}) { 352 yyerrorl(s.peekPos(), msg, args...) 353 } 354 355 // newValue0 adds a new value with no arguments to the current block. 356 func (s *state) newValue0(op ssa.Op, t ssa.Type) *ssa.Value { 357 return s.curBlock.NewValue0(s.peekPos(), op, t) 358 } 359 360 // newValue0A adds a new value with no arguments and an aux value to the current block. 361 func (s *state) newValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value { 362 return s.curBlock.NewValue0A(s.peekPos(), op, t, aux) 363 } 364 365 // newValue0I adds a new value with no arguments and an auxint value to the current block. 366 func (s *state) newValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value { 367 return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint) 368 } 369 370 // newValue1 adds a new value with one argument to the current block. 371 func (s *state) newValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value { 372 return s.curBlock.NewValue1(s.peekPos(), op, t, arg) 373 } 374 375 // newValue1A adds a new value with one argument and an aux value to the current block. 376 func (s *state) newValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 377 return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg) 378 } 379 380 // newValue1I adds a new value with one argument and an auxint value to the current block. 381 func (s *state) newValue1I(op ssa.Op, t ssa.Type, aux int64, arg *ssa.Value) *ssa.Value { 382 return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg) 383 } 384 385 // newValue2 adds a new value with two arguments to the current block. 386 func (s *state) newValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value { 387 return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1) 388 } 389 390 // newValue2I adds a new value with two arguments and an auxint value to the current block. 391 func (s *state) newValue2I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value { 392 return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1) 393 } 394 395 // newValue3 adds a new value with three arguments to the current block. 396 func (s *state) newValue3(op ssa.Op, t ssa.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 397 return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2) 398 } 399 400 // newValue3I adds a new value with three arguments and an auxint value to the current block. 401 func (s *state) newValue3I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 402 return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2) 403 } 404 405 // newValue4 adds a new value with four arguments to the current block. 406 func (s *state) newValue4(op ssa.Op, t ssa.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value { 407 return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3) 408 } 409 410 // entryNewValue0 adds a new value with no arguments to the entry block. 411 func (s *state) entryNewValue0(op ssa.Op, t ssa.Type) *ssa.Value { 412 return s.f.Entry.NewValue0(s.peekPos(), op, t) 413 } 414 415 // entryNewValue0A adds a new value with no arguments and an aux value to the entry block. 416 func (s *state) entryNewValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value { 417 return s.f.Entry.NewValue0A(s.peekPos(), op, t, aux) 418 } 419 420 // entryNewValue0I adds a new value with no arguments and an auxint value to the entry block. 421 func (s *state) entryNewValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value { 422 return s.f.Entry.NewValue0I(s.peekPos(), op, t, auxint) 423 } 424 425 // entryNewValue1 adds a new value with one argument to the entry block. 426 func (s *state) entryNewValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value { 427 return s.f.Entry.NewValue1(s.peekPos(), op, t, arg) 428 } 429 430 // entryNewValue1 adds a new value with one argument and an auxint value to the entry block. 431 func (s *state) entryNewValue1I(op ssa.Op, t ssa.Type, auxint int64, arg *ssa.Value) *ssa.Value { 432 return s.f.Entry.NewValue1I(s.peekPos(), op, t, auxint, arg) 433 } 434 435 // entryNewValue1A adds a new value with one argument and an aux value to the entry block. 436 func (s *state) entryNewValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 437 return s.f.Entry.NewValue1A(s.peekPos(), op, t, aux, arg) 438 } 439 440 // entryNewValue2 adds a new value with two arguments to the entry block. 441 func (s *state) entryNewValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value { 442 return s.f.Entry.NewValue2(s.peekPos(), op, t, arg0, arg1) 443 } 444 445 // const* routines add a new const value to the entry block. 446 func (s *state) constSlice(t ssa.Type) *ssa.Value { return s.f.ConstSlice(s.peekPos(), t) } 447 func (s *state) constInterface(t ssa.Type) *ssa.Value { return s.f.ConstInterface(s.peekPos(), t) } 448 func (s *state) constNil(t ssa.Type) *ssa.Value { return s.f.ConstNil(s.peekPos(), t) } 449 func (s *state) constEmptyString(t ssa.Type) *ssa.Value { return s.f.ConstEmptyString(s.peekPos(), t) } 450 func (s *state) constBool(c bool) *ssa.Value { 451 return s.f.ConstBool(s.peekPos(), Types[TBOOL], c) 452 } 453 func (s *state) constInt8(t ssa.Type, c int8) *ssa.Value { 454 return s.f.ConstInt8(s.peekPos(), t, c) 455 } 456 func (s *state) constInt16(t ssa.Type, c int16) *ssa.Value { 457 return s.f.ConstInt16(s.peekPos(), t, c) 458 } 459 func (s *state) constInt32(t ssa.Type, c int32) *ssa.Value { 460 return s.f.ConstInt32(s.peekPos(), t, c) 461 } 462 func (s *state) constInt64(t ssa.Type, c int64) *ssa.Value { 463 return s.f.ConstInt64(s.peekPos(), t, c) 464 } 465 func (s *state) constFloat32(t ssa.Type, c float64) *ssa.Value { 466 return s.f.ConstFloat32(s.peekPos(), t, c) 467 } 468 func (s *state) constFloat64(t ssa.Type, c float64) *ssa.Value { 469 return s.f.ConstFloat64(s.peekPos(), t, c) 470 } 471 func (s *state) constInt(t ssa.Type, c int64) *ssa.Value { 472 if s.config.IntSize == 8 { 473 return s.constInt64(t, c) 474 } 475 if int64(int32(c)) != c { 476 s.Fatalf("integer constant too big %d", c) 477 } 478 return s.constInt32(t, int32(c)) 479 } 480 481 // stmtList converts the statement list n to SSA and adds it to s. 482 func (s *state) stmtList(l Nodes) { 483 for _, n := range l.Slice() { 484 s.stmt(n) 485 } 486 } 487 488 // stmt converts the statement n to SSA and adds it to s. 489 func (s *state) stmt(n *Node) { 490 s.pushLine(n.Pos) 491 defer s.popLine() 492 493 // If s.curBlock is nil, then we're about to generate dead code. 494 // We can't just short-circuit here, though, 495 // because we check labels and gotos as part of SSA generation. 496 // Provide a block for the dead code so that we don't have 497 // to add special cases everywhere else. 498 if s.curBlock == nil { 499 dead := s.f.NewBlock(ssa.BlockPlain) 500 s.startBlock(dead) 501 } 502 503 s.stmtList(n.Ninit) 504 switch n.Op { 505 506 case OBLOCK: 507 s.stmtList(n.List) 508 509 // No-ops 510 case OEMPTY, ODCLCONST, ODCLTYPE, OFALL: 511 512 // Expression statements 513 case OCALLFUNC: 514 if isIntrinsicCall(n) { 515 s.intrinsicCall(n) 516 return 517 } 518 fallthrough 519 520 case OCALLMETH, OCALLINTER: 521 s.call(n, callNormal) 522 if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class == PFUNC { 523 if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" || 524 n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "selectgo" || fn == "block") { 525 m := s.mem() 526 b := s.endBlock() 527 b.Kind = ssa.BlockExit 528 b.SetControl(m) 529 // TODO: never rewrite OPANIC to OCALLFUNC in the 530 // first place. Need to wait until all backends 531 // go through SSA. 532 } 533 } 534 case ODEFER: 535 s.call(n.Left, callDefer) 536 case OPROC: 537 s.call(n.Left, callGo) 538 539 case OAS2DOTTYPE: 540 res, resok := s.dottype(n.Rlist.First(), true) 541 deref := false 542 if !canSSAType(n.Rlist.First().Type) { 543 if res.Op != ssa.OpLoad { 544 s.Fatalf("dottype of non-load") 545 } 546 mem := s.mem() 547 if mem.Op == ssa.OpVarKill { 548 mem = mem.Args[0] 549 } 550 if res.Args[1] != mem { 551 s.Fatalf("memory no longer live from 2-result dottype load") 552 } 553 deref = true 554 res = res.Args[0] 555 } 556 s.assign(n.List.First(), res, needwritebarrier(n.List.First(), n.Rlist.First()), deref, n.Pos, 0, false) 557 s.assign(n.List.Second(), resok, false, false, n.Pos, 0, false) 558 return 559 560 case OAS2FUNC: 561 // We come here only when it is an intrinsic call returning two values. 562 if !isIntrinsicCall(n.Rlist.First()) { 563 s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Rlist.First()) 564 } 565 v := s.intrinsicCall(n.Rlist.First()) 566 v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v) 567 v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v) 568 // Make a fake node to mimic loading return value, ONLY for write barrier test. 569 // This is future-proofing against non-scalar 2-result intrinsics. 570 // Currently we only have scalar ones, which result in no write barrier. 571 fakeret := &Node{Op: OINDREGSP} 572 s.assign(n.List.First(), v1, needwritebarrier(n.List.First(), fakeret), false, n.Pos, 0, false) 573 s.assign(n.List.Second(), v2, needwritebarrier(n.List.Second(), fakeret), false, n.Pos, 0, false) 574 return 575 576 case ODCL: 577 if n.Left.Class == PAUTOHEAP { 578 Fatalf("DCL %v", n) 579 } 580 581 case OLABEL: 582 sym := n.Left.Sym 583 584 if isblanksym(sym) { 585 // Empty identifier is valid but useless. 586 // See issues 11589, 11593. 587 return 588 } 589 590 lab := s.label(sym) 591 592 // Associate label with its control flow node, if any 593 if ctl := n.Name.Defn; ctl != nil { 594 switch ctl.Op { 595 case OFOR, OSWITCH, OSELECT: 596 s.labeledNodes[ctl] = lab 597 } 598 } 599 600 if !lab.defined() { 601 lab.defNode = n 602 } else { 603 s.Error("label %v already defined at %v", sym, linestr(lab.defNode.Pos)) 604 lab.reported = true 605 } 606 // The label might already have a target block via a goto. 607 if lab.target == nil { 608 lab.target = s.f.NewBlock(ssa.BlockPlain) 609 } 610 611 // go to that label (we pretend "label:" is preceded by "goto label") 612 b := s.endBlock() 613 b.AddEdgeTo(lab.target) 614 s.startBlock(lab.target) 615 616 case OGOTO: 617 sym := n.Left.Sym 618 619 lab := s.label(sym) 620 if lab.target == nil { 621 lab.target = s.f.NewBlock(ssa.BlockPlain) 622 } 623 if !lab.used() { 624 lab.useNode = n 625 } 626 627 if lab.defined() { 628 s.checkgoto(n, lab.defNode) 629 } else { 630 s.fwdGotos = append(s.fwdGotos, n) 631 } 632 633 b := s.endBlock() 634 b.AddEdgeTo(lab.target) 635 636 case OAS: 637 // Generate static data rather than code, if possible. 638 if n.IsStatic { 639 if !genAsInitNoCheck(n) { 640 Dump("\ngen_as_init", n) 641 Fatalf("gen_as_init couldn't generate static data") 642 } 643 return 644 } 645 646 if n.Left == n.Right && n.Left.Op == ONAME { 647 // An x=x assignment. No point in doing anything 648 // here. In addition, skipping this assignment 649 // prevents generating: 650 // VARDEF x 651 // COPY x -> x 652 // which is bad because x is incorrectly considered 653 // dead before the vardef. See issue #14904. 654 return 655 } 656 657 var t *Type 658 if n.Right != nil { 659 t = n.Right.Type 660 } else { 661 t = n.Left.Type 662 } 663 664 // Evaluate RHS. 665 rhs := n.Right 666 if rhs != nil { 667 switch rhs.Op { 668 case OSTRUCTLIT, OARRAYLIT, OSLICELIT: 669 // All literals with nonzero fields have already been 670 // rewritten during walk. Any that remain are just T{} 671 // or equivalents. Use the zero value. 672 if !iszero(rhs) { 673 Fatalf("literal with nonzero value in SSA: %v", rhs) 674 } 675 rhs = nil 676 case OAPPEND: 677 // If we're writing the result of an append back to the same slice, 678 // handle it specially to avoid write barriers on the fast (non-growth) path. 679 // If the slice can be SSA'd, it'll be on the stack, 680 // so there will be no write barriers, 681 // so there's no need to attempt to prevent them. 682 if samesafeexpr(n.Left, rhs.List.First()) { 683 if !s.canSSA(n.Left) { 684 if Debug_append > 0 { 685 Warnl(n.Pos, "append: len-only update") 686 } 687 s.append(rhs, true) 688 return 689 } else { 690 if Debug_append > 0 { // replicating old diagnostic message 691 Warnl(n.Pos, "append: len-only update (in local slice)") 692 } 693 } 694 } 695 } 696 } 697 var r *ssa.Value 698 var isVolatile bool 699 needwb := n.Right != nil && needwritebarrier(n.Left, n.Right) 700 deref := !canSSAType(t) 701 if deref { 702 if rhs == nil { 703 r = nil // Signal assign to use OpZero. 704 } else { 705 r, isVolatile = s.addr(rhs, false) 706 } 707 } else { 708 if rhs == nil { 709 r = s.zeroVal(t) 710 } else { 711 r = s.expr(rhs) 712 } 713 } 714 if rhs != nil && rhs.Op == OAPPEND && needwritebarrier(n.Left, rhs) { 715 // The frontend gets rid of the write barrier to enable the special OAPPEND 716 // handling above, but since this is not a special case, we need it. 717 // TODO: just add a ptr graying to the end of growslice? 718 // TODO: check whether we need to provide special handling and a write barrier 719 // for ODOTTYPE and ORECV also. 720 // They get similar wb-removal treatment in walk.go:OAS. 721 needwb = true 722 } 723 if needwb && Debug_wb > 1 { 724 Warnl(n.Pos, "marking %v for barrier", n.Left) 725 } 726 727 var skip skipMask 728 if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) { 729 // We're assigning a slicing operation back to its source. 730 // Don't write back fields we aren't changing. See issue #14855. 731 i, j, k := rhs.SliceBounds() 732 if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) { 733 // [0:...] is the same as [:...] 734 i = nil 735 } 736 // TODO: detect defaults for len/cap also. 737 // Currently doesn't really work because (*p)[:len(*p)] appears here as: 738 // tmp = len(*p) 739 // (*p)[:tmp] 740 //if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) { 741 // j = nil 742 //} 743 //if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) { 744 // k = nil 745 //} 746 if i == nil { 747 skip |= skipPtr 748 if j == nil { 749 skip |= skipLen 750 } 751 if k == nil { 752 skip |= skipCap 753 } 754 } 755 } 756 757 s.assign(n.Left, r, needwb, deref, n.Pos, skip, isVolatile) 758 759 case OIF: 760 bThen := s.f.NewBlock(ssa.BlockPlain) 761 bEnd := s.f.NewBlock(ssa.BlockPlain) 762 var bElse *ssa.Block 763 if n.Rlist.Len() != 0 { 764 bElse = s.f.NewBlock(ssa.BlockPlain) 765 s.condBranch(n.Left, bThen, bElse, n.Likely) 766 } else { 767 s.condBranch(n.Left, bThen, bEnd, n.Likely) 768 } 769 770 s.startBlock(bThen) 771 s.stmtList(n.Nbody) 772 if b := s.endBlock(); b != nil { 773 b.AddEdgeTo(bEnd) 774 } 775 776 if n.Rlist.Len() != 0 { 777 s.startBlock(bElse) 778 s.stmtList(n.Rlist) 779 if b := s.endBlock(); b != nil { 780 b.AddEdgeTo(bEnd) 781 } 782 } 783 s.startBlock(bEnd) 784 785 case ORETURN: 786 s.stmtList(n.List) 787 s.exit() 788 case ORETJMP: 789 s.stmtList(n.List) 790 b := s.exit() 791 b.Kind = ssa.BlockRetJmp // override BlockRet 792 b.Aux = Linksym(n.Left.Sym) 793 794 case OCONTINUE, OBREAK: 795 var op string 796 var to *ssa.Block 797 switch n.Op { 798 case OCONTINUE: 799 op = "continue" 800 to = s.continueTo 801 case OBREAK: 802 op = "break" 803 to = s.breakTo 804 } 805 if n.Left == nil { 806 // plain break/continue 807 if to == nil { 808 s.Error("%s is not in a loop", op) 809 return 810 } 811 // nothing to do; "to" is already the correct target 812 } else { 813 // labeled break/continue; look up the target 814 sym := n.Left.Sym 815 lab := s.label(sym) 816 if !lab.used() { 817 lab.useNode = n.Left 818 } 819 if !lab.defined() { 820 s.Error("%s label not defined: %v", op, sym) 821 lab.reported = true 822 return 823 } 824 switch n.Op { 825 case OCONTINUE: 826 to = lab.continueTarget 827 case OBREAK: 828 to = lab.breakTarget 829 } 830 if to == nil { 831 // Valid label but not usable with a break/continue here, e.g.: 832 // for { 833 // continue abc 834 // } 835 // abc: 836 // for {} 837 s.Error("invalid %s label %v", op, sym) 838 lab.reported = true 839 return 840 } 841 } 842 843 b := s.endBlock() 844 b.AddEdgeTo(to) 845 846 case OFOR: 847 // OFOR: for Ninit; Left; Right { Nbody } 848 bCond := s.f.NewBlock(ssa.BlockPlain) 849 bBody := s.f.NewBlock(ssa.BlockPlain) 850 bIncr := s.f.NewBlock(ssa.BlockPlain) 851 bEnd := s.f.NewBlock(ssa.BlockPlain) 852 853 // first, jump to condition test 854 b := s.endBlock() 855 b.AddEdgeTo(bCond) 856 857 // generate code to test condition 858 s.startBlock(bCond) 859 if n.Left != nil { 860 s.condBranch(n.Left, bBody, bEnd, 1) 861 } else { 862 b := s.endBlock() 863 b.Kind = ssa.BlockPlain 864 b.AddEdgeTo(bBody) 865 } 866 867 // set up for continue/break in body 868 prevContinue := s.continueTo 869 prevBreak := s.breakTo 870 s.continueTo = bIncr 871 s.breakTo = bEnd 872 lab := s.labeledNodes[n] 873 if lab != nil { 874 // labeled for loop 875 lab.continueTarget = bIncr 876 lab.breakTarget = bEnd 877 } 878 879 // generate body 880 s.startBlock(bBody) 881 s.stmtList(n.Nbody) 882 883 // tear down continue/break 884 s.continueTo = prevContinue 885 s.breakTo = prevBreak 886 if lab != nil { 887 lab.continueTarget = nil 888 lab.breakTarget = nil 889 } 890 891 // done with body, goto incr 892 if b := s.endBlock(); b != nil { 893 b.AddEdgeTo(bIncr) 894 } 895 896 // generate incr 897 s.startBlock(bIncr) 898 if n.Right != nil { 899 s.stmt(n.Right) 900 } 901 if b := s.endBlock(); b != nil { 902 b.AddEdgeTo(bCond) 903 } 904 s.startBlock(bEnd) 905 906 case OSWITCH, OSELECT: 907 // These have been mostly rewritten by the front end into their Nbody fields. 908 // Our main task is to correctly hook up any break statements. 909 bEnd := s.f.NewBlock(ssa.BlockPlain) 910 911 prevBreak := s.breakTo 912 s.breakTo = bEnd 913 lab := s.labeledNodes[n] 914 if lab != nil { 915 // labeled 916 lab.breakTarget = bEnd 917 } 918 919 // generate body code 920 s.stmtList(n.Nbody) 921 922 s.breakTo = prevBreak 923 if lab != nil { 924 lab.breakTarget = nil 925 } 926 927 // OSWITCH never falls through (s.curBlock == nil here). 928 // OSELECT does not fall through if we're calling selectgo. 929 // OSELECT does fall through if we're calling selectnb{send,recv}[2]. 930 // In those latter cases, go to the code after the select. 931 if b := s.endBlock(); b != nil { 932 b.AddEdgeTo(bEnd) 933 } 934 s.startBlock(bEnd) 935 936 case OVARKILL: 937 // Insert a varkill op to record that a variable is no longer live. 938 // We only care about liveness info at call sites, so putting the 939 // varkill in the store chain is enough to keep it correctly ordered 940 // with respect to call ops. 941 if !s.canSSA(n.Left) { 942 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, n.Left, s.mem()) 943 } 944 945 case OVARLIVE: 946 // Insert a varlive op to record that a variable is still live. 947 if !n.Left.Addrtaken { 948 s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left) 949 } 950 s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, ssa.TypeMem, n.Left, s.mem()) 951 952 case OCHECKNIL: 953 p := s.expr(n.Left) 954 s.nilCheck(p) 955 956 default: 957 s.Fatalf("unhandled stmt %v", n.Op) 958 } 959 } 960 961 // exit processes any code that needs to be generated just before returning. 962 // It returns a BlockRet block that ends the control flow. Its control value 963 // will be set to the final memory state. 964 func (s *state) exit() *ssa.Block { 965 if hasdefer { 966 s.rtcall(Deferreturn, true, nil) 967 } 968 969 // Run exit code. Typically, this code copies heap-allocated PPARAMOUT 970 // variables back to the stack. 971 s.stmtList(s.exitCode) 972 973 // Store SSAable PPARAMOUT variables back to stack locations. 974 for _, n := range s.returns { 975 addr := s.decladdrs[n] 976 val := s.variable(n, n.Type) 977 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, n, s.mem()) 978 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, n.Type.Size(), addr, val, s.mem()) 979 // TODO: if val is ever spilled, we'd like to use the 980 // PPARAMOUT slot for spilling it. That won't happen 981 // currently. 982 } 983 984 // Do actual return. 985 m := s.mem() 986 b := s.endBlock() 987 b.Kind = ssa.BlockRet 988 b.SetControl(m) 989 return b 990 } 991 992 type opAndType struct { 993 op Op 994 etype EType 995 } 996 997 var opToSSA = map[opAndType]ssa.Op{ 998 opAndType{OADD, TINT8}: ssa.OpAdd8, 999 opAndType{OADD, TUINT8}: ssa.OpAdd8, 1000 opAndType{OADD, TINT16}: ssa.OpAdd16, 1001 opAndType{OADD, TUINT16}: ssa.OpAdd16, 1002 opAndType{OADD, TINT32}: ssa.OpAdd32, 1003 opAndType{OADD, TUINT32}: ssa.OpAdd32, 1004 opAndType{OADD, TPTR32}: ssa.OpAdd32, 1005 opAndType{OADD, TINT64}: ssa.OpAdd64, 1006 opAndType{OADD, TUINT64}: ssa.OpAdd64, 1007 opAndType{OADD, TPTR64}: ssa.OpAdd64, 1008 opAndType{OADD, TFLOAT32}: ssa.OpAdd32F, 1009 opAndType{OADD, TFLOAT64}: ssa.OpAdd64F, 1010 1011 opAndType{OSUB, TINT8}: ssa.OpSub8, 1012 opAndType{OSUB, TUINT8}: ssa.OpSub8, 1013 opAndType{OSUB, TINT16}: ssa.OpSub16, 1014 opAndType{OSUB, TUINT16}: ssa.OpSub16, 1015 opAndType{OSUB, TINT32}: ssa.OpSub32, 1016 opAndType{OSUB, TUINT32}: ssa.OpSub32, 1017 opAndType{OSUB, TINT64}: ssa.OpSub64, 1018 opAndType{OSUB, TUINT64}: ssa.OpSub64, 1019 opAndType{OSUB, TFLOAT32}: ssa.OpSub32F, 1020 opAndType{OSUB, TFLOAT64}: ssa.OpSub64F, 1021 1022 opAndType{ONOT, TBOOL}: ssa.OpNot, 1023 1024 opAndType{OMINUS, TINT8}: ssa.OpNeg8, 1025 opAndType{OMINUS, TUINT8}: ssa.OpNeg8, 1026 opAndType{OMINUS, TINT16}: ssa.OpNeg16, 1027 opAndType{OMINUS, TUINT16}: ssa.OpNeg16, 1028 opAndType{OMINUS, TINT32}: ssa.OpNeg32, 1029 opAndType{OMINUS, TUINT32}: ssa.OpNeg32, 1030 opAndType{OMINUS, TINT64}: ssa.OpNeg64, 1031 opAndType{OMINUS, TUINT64}: ssa.OpNeg64, 1032 opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F, 1033 opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F, 1034 1035 opAndType{OCOM, TINT8}: ssa.OpCom8, 1036 opAndType{OCOM, TUINT8}: ssa.OpCom8, 1037 opAndType{OCOM, TINT16}: ssa.OpCom16, 1038 opAndType{OCOM, TUINT16}: ssa.OpCom16, 1039 opAndType{OCOM, TINT32}: ssa.OpCom32, 1040 opAndType{OCOM, TUINT32}: ssa.OpCom32, 1041 opAndType{OCOM, TINT64}: ssa.OpCom64, 1042 opAndType{OCOM, TUINT64}: ssa.OpCom64, 1043 1044 opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag, 1045 opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag, 1046 opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal, 1047 opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal, 1048 1049 opAndType{OMUL, TINT8}: ssa.OpMul8, 1050 opAndType{OMUL, TUINT8}: ssa.OpMul8, 1051 opAndType{OMUL, TINT16}: ssa.OpMul16, 1052 opAndType{OMUL, TUINT16}: ssa.OpMul16, 1053 opAndType{OMUL, TINT32}: ssa.OpMul32, 1054 opAndType{OMUL, TUINT32}: ssa.OpMul32, 1055 opAndType{OMUL, TINT64}: ssa.OpMul64, 1056 opAndType{OMUL, TUINT64}: ssa.OpMul64, 1057 opAndType{OMUL, TFLOAT32}: ssa.OpMul32F, 1058 opAndType{OMUL, TFLOAT64}: ssa.OpMul64F, 1059 1060 opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F, 1061 opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F, 1062 1063 opAndType{OHMUL, TINT8}: ssa.OpHmul8, 1064 opAndType{OHMUL, TUINT8}: ssa.OpHmul8u, 1065 opAndType{OHMUL, TINT16}: ssa.OpHmul16, 1066 opAndType{OHMUL, TUINT16}: ssa.OpHmul16u, 1067 opAndType{OHMUL, TINT32}: ssa.OpHmul32, 1068 opAndType{OHMUL, TUINT32}: ssa.OpHmul32u, 1069 1070 opAndType{ODIV, TINT8}: ssa.OpDiv8, 1071 opAndType{ODIV, TUINT8}: ssa.OpDiv8u, 1072 opAndType{ODIV, TINT16}: ssa.OpDiv16, 1073 opAndType{ODIV, TUINT16}: ssa.OpDiv16u, 1074 opAndType{ODIV, TINT32}: ssa.OpDiv32, 1075 opAndType{ODIV, TUINT32}: ssa.OpDiv32u, 1076 opAndType{ODIV, TINT64}: ssa.OpDiv64, 1077 opAndType{ODIV, TUINT64}: ssa.OpDiv64u, 1078 1079 opAndType{OMOD, TINT8}: ssa.OpMod8, 1080 opAndType{OMOD, TUINT8}: ssa.OpMod8u, 1081 opAndType{OMOD, TINT16}: ssa.OpMod16, 1082 opAndType{OMOD, TUINT16}: ssa.OpMod16u, 1083 opAndType{OMOD, TINT32}: ssa.OpMod32, 1084 opAndType{OMOD, TUINT32}: ssa.OpMod32u, 1085 opAndType{OMOD, TINT64}: ssa.OpMod64, 1086 opAndType{OMOD, TUINT64}: ssa.OpMod64u, 1087 1088 opAndType{OAND, TINT8}: ssa.OpAnd8, 1089 opAndType{OAND, TUINT8}: ssa.OpAnd8, 1090 opAndType{OAND, TINT16}: ssa.OpAnd16, 1091 opAndType{OAND, TUINT16}: ssa.OpAnd16, 1092 opAndType{OAND, TINT32}: ssa.OpAnd32, 1093 opAndType{OAND, TUINT32}: ssa.OpAnd32, 1094 opAndType{OAND, TINT64}: ssa.OpAnd64, 1095 opAndType{OAND, TUINT64}: ssa.OpAnd64, 1096 1097 opAndType{OOR, TINT8}: ssa.OpOr8, 1098 opAndType{OOR, TUINT8}: ssa.OpOr8, 1099 opAndType{OOR, TINT16}: ssa.OpOr16, 1100 opAndType{OOR, TUINT16}: ssa.OpOr16, 1101 opAndType{OOR, TINT32}: ssa.OpOr32, 1102 opAndType{OOR, TUINT32}: ssa.OpOr32, 1103 opAndType{OOR, TINT64}: ssa.OpOr64, 1104 opAndType{OOR, TUINT64}: ssa.OpOr64, 1105 1106 opAndType{OXOR, TINT8}: ssa.OpXor8, 1107 opAndType{OXOR, TUINT8}: ssa.OpXor8, 1108 opAndType{OXOR, TINT16}: ssa.OpXor16, 1109 opAndType{OXOR, TUINT16}: ssa.OpXor16, 1110 opAndType{OXOR, TINT32}: ssa.OpXor32, 1111 opAndType{OXOR, TUINT32}: ssa.OpXor32, 1112 opAndType{OXOR, TINT64}: ssa.OpXor64, 1113 opAndType{OXOR, TUINT64}: ssa.OpXor64, 1114 1115 opAndType{OEQ, TBOOL}: ssa.OpEqB, 1116 opAndType{OEQ, TINT8}: ssa.OpEq8, 1117 opAndType{OEQ, TUINT8}: ssa.OpEq8, 1118 opAndType{OEQ, TINT16}: ssa.OpEq16, 1119 opAndType{OEQ, TUINT16}: ssa.OpEq16, 1120 opAndType{OEQ, TINT32}: ssa.OpEq32, 1121 opAndType{OEQ, TUINT32}: ssa.OpEq32, 1122 opAndType{OEQ, TINT64}: ssa.OpEq64, 1123 opAndType{OEQ, TUINT64}: ssa.OpEq64, 1124 opAndType{OEQ, TINTER}: ssa.OpEqInter, 1125 opAndType{OEQ, TSLICE}: ssa.OpEqSlice, 1126 opAndType{OEQ, TFUNC}: ssa.OpEqPtr, 1127 opAndType{OEQ, TMAP}: ssa.OpEqPtr, 1128 opAndType{OEQ, TCHAN}: ssa.OpEqPtr, 1129 opAndType{OEQ, TPTR32}: ssa.OpEqPtr, 1130 opAndType{OEQ, TPTR64}: ssa.OpEqPtr, 1131 opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr, 1132 opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr, 1133 opAndType{OEQ, TFLOAT64}: ssa.OpEq64F, 1134 opAndType{OEQ, TFLOAT32}: ssa.OpEq32F, 1135 1136 opAndType{ONE, TBOOL}: ssa.OpNeqB, 1137 opAndType{ONE, TINT8}: ssa.OpNeq8, 1138 opAndType{ONE, TUINT8}: ssa.OpNeq8, 1139 opAndType{ONE, TINT16}: ssa.OpNeq16, 1140 opAndType{ONE, TUINT16}: ssa.OpNeq16, 1141 opAndType{ONE, TINT32}: ssa.OpNeq32, 1142 opAndType{ONE, TUINT32}: ssa.OpNeq32, 1143 opAndType{ONE, TINT64}: ssa.OpNeq64, 1144 opAndType{ONE, TUINT64}: ssa.OpNeq64, 1145 opAndType{ONE, TINTER}: ssa.OpNeqInter, 1146 opAndType{ONE, TSLICE}: ssa.OpNeqSlice, 1147 opAndType{ONE, TFUNC}: ssa.OpNeqPtr, 1148 opAndType{ONE, TMAP}: ssa.OpNeqPtr, 1149 opAndType{ONE, TCHAN}: ssa.OpNeqPtr, 1150 opAndType{ONE, TPTR32}: ssa.OpNeqPtr, 1151 opAndType{ONE, TPTR64}: ssa.OpNeqPtr, 1152 opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr, 1153 opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr, 1154 opAndType{ONE, TFLOAT64}: ssa.OpNeq64F, 1155 opAndType{ONE, TFLOAT32}: ssa.OpNeq32F, 1156 1157 opAndType{OLT, TINT8}: ssa.OpLess8, 1158 opAndType{OLT, TUINT8}: ssa.OpLess8U, 1159 opAndType{OLT, TINT16}: ssa.OpLess16, 1160 opAndType{OLT, TUINT16}: ssa.OpLess16U, 1161 opAndType{OLT, TINT32}: ssa.OpLess32, 1162 opAndType{OLT, TUINT32}: ssa.OpLess32U, 1163 opAndType{OLT, TINT64}: ssa.OpLess64, 1164 opAndType{OLT, TUINT64}: ssa.OpLess64U, 1165 opAndType{OLT, TFLOAT64}: ssa.OpLess64F, 1166 opAndType{OLT, TFLOAT32}: ssa.OpLess32F, 1167 1168 opAndType{OGT, TINT8}: ssa.OpGreater8, 1169 opAndType{OGT, TUINT8}: ssa.OpGreater8U, 1170 opAndType{OGT, TINT16}: ssa.OpGreater16, 1171 opAndType{OGT, TUINT16}: ssa.OpGreater16U, 1172 opAndType{OGT, TINT32}: ssa.OpGreater32, 1173 opAndType{OGT, TUINT32}: ssa.OpGreater32U, 1174 opAndType{OGT, TINT64}: ssa.OpGreater64, 1175 opAndType{OGT, TUINT64}: ssa.OpGreater64U, 1176 opAndType{OGT, TFLOAT64}: ssa.OpGreater64F, 1177 opAndType{OGT, TFLOAT32}: ssa.OpGreater32F, 1178 1179 opAndType{OLE, TINT8}: ssa.OpLeq8, 1180 opAndType{OLE, TUINT8}: ssa.OpLeq8U, 1181 opAndType{OLE, TINT16}: ssa.OpLeq16, 1182 opAndType{OLE, TUINT16}: ssa.OpLeq16U, 1183 opAndType{OLE, TINT32}: ssa.OpLeq32, 1184 opAndType{OLE, TUINT32}: ssa.OpLeq32U, 1185 opAndType{OLE, TINT64}: ssa.OpLeq64, 1186 opAndType{OLE, TUINT64}: ssa.OpLeq64U, 1187 opAndType{OLE, TFLOAT64}: ssa.OpLeq64F, 1188 opAndType{OLE, TFLOAT32}: ssa.OpLeq32F, 1189 1190 opAndType{OGE, TINT8}: ssa.OpGeq8, 1191 opAndType{OGE, TUINT8}: ssa.OpGeq8U, 1192 opAndType{OGE, TINT16}: ssa.OpGeq16, 1193 opAndType{OGE, TUINT16}: ssa.OpGeq16U, 1194 opAndType{OGE, TINT32}: ssa.OpGeq32, 1195 opAndType{OGE, TUINT32}: ssa.OpGeq32U, 1196 opAndType{OGE, TINT64}: ssa.OpGeq64, 1197 opAndType{OGE, TUINT64}: ssa.OpGeq64U, 1198 opAndType{OGE, TFLOAT64}: ssa.OpGeq64F, 1199 opAndType{OGE, TFLOAT32}: ssa.OpGeq32F, 1200 } 1201 1202 func (s *state) concreteEtype(t *Type) EType { 1203 e := t.Etype 1204 switch e { 1205 default: 1206 return e 1207 case TINT: 1208 if s.config.IntSize == 8 { 1209 return TINT64 1210 } 1211 return TINT32 1212 case TUINT: 1213 if s.config.IntSize == 8 { 1214 return TUINT64 1215 } 1216 return TUINT32 1217 case TUINTPTR: 1218 if s.config.PtrSize == 8 { 1219 return TUINT64 1220 } 1221 return TUINT32 1222 } 1223 } 1224 1225 func (s *state) ssaOp(op Op, t *Type) ssa.Op { 1226 etype := s.concreteEtype(t) 1227 x, ok := opToSSA[opAndType{op, etype}] 1228 if !ok { 1229 s.Fatalf("unhandled binary op %v %s", op, etype) 1230 } 1231 return x 1232 } 1233 1234 func floatForComplex(t *Type) *Type { 1235 if t.Size() == 8 { 1236 return Types[TFLOAT32] 1237 } else { 1238 return Types[TFLOAT64] 1239 } 1240 } 1241 1242 type opAndTwoTypes struct { 1243 op Op 1244 etype1 EType 1245 etype2 EType 1246 } 1247 1248 type twoTypes struct { 1249 etype1 EType 1250 etype2 EType 1251 } 1252 1253 type twoOpsAndType struct { 1254 op1 ssa.Op 1255 op2 ssa.Op 1256 intermediateType EType 1257 } 1258 1259 var fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1260 1261 twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32}, 1262 twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32}, 1263 twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32}, 1264 twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64}, 1265 1266 twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32}, 1267 twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32}, 1268 twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32}, 1269 twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64}, 1270 1271 twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1272 twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1273 twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32}, 1274 twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64}, 1275 1276 twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1277 twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1278 twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32}, 1279 twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64}, 1280 // unsigned 1281 twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32}, 1282 twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32}, 1283 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned 1284 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead 1285 1286 twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32}, 1287 twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32}, 1288 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned 1289 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead 1290 1291 twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1292 twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1293 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1294 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead 1295 1296 twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1297 twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1298 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1299 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead 1300 1301 // float 1302 twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32}, 1303 twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT64}, 1304 twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT32}, 1305 twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64}, 1306 } 1307 1308 // this map is used only for 32-bit arch, and only includes the difference 1309 // on 32-bit arch, don't use int64<->float conversion for uint32 1310 var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{ 1311 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32}, 1312 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32}, 1313 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32}, 1314 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32}, 1315 } 1316 1317 // uint64<->float conversions, only on machines that have intructions for that 1318 var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1319 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64}, 1320 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64}, 1321 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64}, 1322 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64}, 1323 } 1324 1325 var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{ 1326 opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8, 1327 opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8, 1328 opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16, 1329 opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16, 1330 opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32, 1331 opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32, 1332 opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64, 1333 opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64, 1334 1335 opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8, 1336 opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8, 1337 opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16, 1338 opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16, 1339 opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32, 1340 opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32, 1341 opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64, 1342 opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64, 1343 1344 opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8, 1345 opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8, 1346 opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16, 1347 opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16, 1348 opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32, 1349 opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32, 1350 opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64, 1351 opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64, 1352 1353 opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8, 1354 opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8, 1355 opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16, 1356 opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16, 1357 opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32, 1358 opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32, 1359 opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64, 1360 opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64, 1361 1362 opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8, 1363 opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8, 1364 opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16, 1365 opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16, 1366 opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32, 1367 opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32, 1368 opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64, 1369 opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64, 1370 1371 opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8, 1372 opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8, 1373 opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16, 1374 opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16, 1375 opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32, 1376 opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32, 1377 opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64, 1378 opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64, 1379 1380 opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8, 1381 opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8, 1382 opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16, 1383 opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16, 1384 opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32, 1385 opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32, 1386 opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64, 1387 opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64, 1388 1389 opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8, 1390 opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8, 1391 opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16, 1392 opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16, 1393 opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32, 1394 opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32, 1395 opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64, 1396 opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64, 1397 } 1398 1399 func (s *state) ssaShiftOp(op Op, t *Type, u *Type) ssa.Op { 1400 etype1 := s.concreteEtype(t) 1401 etype2 := s.concreteEtype(u) 1402 x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}] 1403 if !ok { 1404 s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2) 1405 } 1406 return x 1407 } 1408 1409 // expr converts the expression n to ssa, adds it to s and returns the ssa result. 1410 func (s *state) expr(n *Node) *ssa.Value { 1411 if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) { 1412 // ONAMEs and named OLITERALs have the line number 1413 // of the decl, not the use. See issue 14742. 1414 s.pushLine(n.Pos) 1415 defer s.popLine() 1416 } 1417 1418 s.stmtList(n.Ninit) 1419 switch n.Op { 1420 case OARRAYBYTESTRTMP: 1421 slice := s.expr(n.Left) 1422 ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), slice) 1423 len := s.newValue1(ssa.OpSliceLen, Types[TINT], slice) 1424 return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) 1425 case OSTRARRAYBYTETMP: 1426 str := s.expr(n.Left) 1427 ptr := s.newValue1(ssa.OpStringPtr, ptrto(Types[TUINT8]), str) 1428 len := s.newValue1(ssa.OpStringLen, Types[TINT], str) 1429 return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len) 1430 case OCFUNC: 1431 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: Linksym(n.Left.Sym)}) 1432 return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb) 1433 case ONAME: 1434 if n.Class == PFUNC { 1435 // "value" of a function is the address of the function's closure 1436 sym := Linksym(funcsym(n.Sym)) 1437 aux := &ssa.ExternSymbol{Typ: n.Type, Sym: sym} 1438 return s.entryNewValue1A(ssa.OpAddr, ptrto(n.Type), aux, s.sb) 1439 } 1440 if s.canSSA(n) { 1441 return s.variable(n, n.Type) 1442 } 1443 addr, _ := s.addr(n, false) 1444 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1445 case OCLOSUREVAR: 1446 addr, _ := s.addr(n, false) 1447 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1448 case OLITERAL: 1449 switch u := n.Val().U.(type) { 1450 case *Mpint: 1451 i := u.Int64() 1452 switch n.Type.Size() { 1453 case 1: 1454 return s.constInt8(n.Type, int8(i)) 1455 case 2: 1456 return s.constInt16(n.Type, int16(i)) 1457 case 4: 1458 return s.constInt32(n.Type, int32(i)) 1459 case 8: 1460 return s.constInt64(n.Type, i) 1461 default: 1462 s.Fatalf("bad integer size %d", n.Type.Size()) 1463 return nil 1464 } 1465 case string: 1466 if u == "" { 1467 return s.constEmptyString(n.Type) 1468 } 1469 return s.entryNewValue0A(ssa.OpConstString, n.Type, u) 1470 case bool: 1471 return s.constBool(u) 1472 case *NilVal: 1473 t := n.Type 1474 switch { 1475 case t.IsSlice(): 1476 return s.constSlice(t) 1477 case t.IsInterface(): 1478 return s.constInterface(t) 1479 default: 1480 return s.constNil(t) 1481 } 1482 case *Mpflt: 1483 switch n.Type.Size() { 1484 case 4: 1485 return s.constFloat32(n.Type, u.Float32()) 1486 case 8: 1487 return s.constFloat64(n.Type, u.Float64()) 1488 default: 1489 s.Fatalf("bad float size %d", n.Type.Size()) 1490 return nil 1491 } 1492 case *Mpcplx: 1493 r := &u.Real 1494 i := &u.Imag 1495 switch n.Type.Size() { 1496 case 8: 1497 pt := Types[TFLOAT32] 1498 return s.newValue2(ssa.OpComplexMake, n.Type, 1499 s.constFloat32(pt, r.Float32()), 1500 s.constFloat32(pt, i.Float32())) 1501 case 16: 1502 pt := Types[TFLOAT64] 1503 return s.newValue2(ssa.OpComplexMake, n.Type, 1504 s.constFloat64(pt, r.Float64()), 1505 s.constFloat64(pt, i.Float64())) 1506 default: 1507 s.Fatalf("bad float size %d", n.Type.Size()) 1508 return nil 1509 } 1510 1511 default: 1512 s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype()) 1513 return nil 1514 } 1515 case OCONVNOP: 1516 to := n.Type 1517 from := n.Left.Type 1518 1519 // Assume everything will work out, so set up our return value. 1520 // Anything interesting that happens from here is a fatal. 1521 x := s.expr(n.Left) 1522 1523 // Special case for not confusing GC and liveness. 1524 // We don't want pointers accidentally classified 1525 // as not-pointers or vice-versa because of copy 1526 // elision. 1527 if to.IsPtrShaped() != from.IsPtrShaped() { 1528 return s.newValue2(ssa.OpConvert, to, x, s.mem()) 1529 } 1530 1531 v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type 1532 1533 // CONVNOP closure 1534 if to.Etype == TFUNC && from.IsPtrShaped() { 1535 return v 1536 } 1537 1538 // named <--> unnamed type or typed <--> untyped const 1539 if from.Etype == to.Etype { 1540 return v 1541 } 1542 1543 // unsafe.Pointer <--> *T 1544 if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() { 1545 return v 1546 } 1547 1548 dowidth(from) 1549 dowidth(to) 1550 if from.Width != to.Width { 1551 s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width) 1552 return nil 1553 } 1554 if etypesign(from.Etype) != etypesign(to.Etype) { 1555 s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype) 1556 return nil 1557 } 1558 1559 if instrumenting { 1560 // These appear to be fine, but they fail the 1561 // integer constraint below, so okay them here. 1562 // Sample non-integer conversion: map[string]string -> *uint8 1563 return v 1564 } 1565 1566 if etypesign(from.Etype) == 0 { 1567 s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to) 1568 return nil 1569 } 1570 1571 // integer, same width, same sign 1572 return v 1573 1574 case OCONV: 1575 x := s.expr(n.Left) 1576 ft := n.Left.Type // from type 1577 tt := n.Type // to type 1578 if ft.IsBoolean() && tt.IsKind(TUINT8) { 1579 // Bool -> uint8 is generated internally when indexing into runtime.staticbyte. 1580 return s.newValue1(ssa.OpCopy, n.Type, x) 1581 } 1582 if ft.IsInteger() && tt.IsInteger() { 1583 var op ssa.Op 1584 if tt.Size() == ft.Size() { 1585 op = ssa.OpCopy 1586 } else if tt.Size() < ft.Size() { 1587 // truncation 1588 switch 10*ft.Size() + tt.Size() { 1589 case 21: 1590 op = ssa.OpTrunc16to8 1591 case 41: 1592 op = ssa.OpTrunc32to8 1593 case 42: 1594 op = ssa.OpTrunc32to16 1595 case 81: 1596 op = ssa.OpTrunc64to8 1597 case 82: 1598 op = ssa.OpTrunc64to16 1599 case 84: 1600 op = ssa.OpTrunc64to32 1601 default: 1602 s.Fatalf("weird integer truncation %v -> %v", ft, tt) 1603 } 1604 } else if ft.IsSigned() { 1605 // sign extension 1606 switch 10*ft.Size() + tt.Size() { 1607 case 12: 1608 op = ssa.OpSignExt8to16 1609 case 14: 1610 op = ssa.OpSignExt8to32 1611 case 18: 1612 op = ssa.OpSignExt8to64 1613 case 24: 1614 op = ssa.OpSignExt16to32 1615 case 28: 1616 op = ssa.OpSignExt16to64 1617 case 48: 1618 op = ssa.OpSignExt32to64 1619 default: 1620 s.Fatalf("bad integer sign extension %v -> %v", ft, tt) 1621 } 1622 } else { 1623 // zero extension 1624 switch 10*ft.Size() + tt.Size() { 1625 case 12: 1626 op = ssa.OpZeroExt8to16 1627 case 14: 1628 op = ssa.OpZeroExt8to32 1629 case 18: 1630 op = ssa.OpZeroExt8to64 1631 case 24: 1632 op = ssa.OpZeroExt16to32 1633 case 28: 1634 op = ssa.OpZeroExt16to64 1635 case 48: 1636 op = ssa.OpZeroExt32to64 1637 default: 1638 s.Fatalf("weird integer sign extension %v -> %v", ft, tt) 1639 } 1640 } 1641 return s.newValue1(op, n.Type, x) 1642 } 1643 1644 if ft.IsFloat() || tt.IsFloat() { 1645 conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}] 1646 if s.config.IntSize == 4 && Thearch.LinkArch.Name != "amd64p32" && Thearch.LinkArch.Family != sys.MIPS { 1647 if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { 1648 conv = conv1 1649 } 1650 } 1651 if Thearch.LinkArch.Name == "arm64" { 1652 if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { 1653 conv = conv1 1654 } 1655 } 1656 1657 if Thearch.LinkArch.Family == sys.MIPS { 1658 if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() { 1659 // tt is float32 or float64, and ft is also unsigned 1660 if tt.Size() == 4 { 1661 return s.uint32Tofloat32(n, x, ft, tt) 1662 } 1663 if tt.Size() == 8 { 1664 return s.uint32Tofloat64(n, x, ft, tt) 1665 } 1666 } else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() { 1667 // ft is float32 or float64, and tt is unsigned integer 1668 if ft.Size() == 4 { 1669 return s.float32ToUint32(n, x, ft, tt) 1670 } 1671 if ft.Size() == 8 { 1672 return s.float64ToUint32(n, x, ft, tt) 1673 } 1674 } 1675 } 1676 1677 if !ok { 1678 s.Fatalf("weird float conversion %v -> %v", ft, tt) 1679 } 1680 op1, op2, it := conv.op1, conv.op2, conv.intermediateType 1681 1682 if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid { 1683 // normal case, not tripping over unsigned 64 1684 if op1 == ssa.OpCopy { 1685 if op2 == ssa.OpCopy { 1686 return x 1687 } 1688 return s.newValue1(op2, n.Type, x) 1689 } 1690 if op2 == ssa.OpCopy { 1691 return s.newValue1(op1, n.Type, x) 1692 } 1693 return s.newValue1(op2, n.Type, s.newValue1(op1, Types[it], x)) 1694 } 1695 // Tricky 64-bit unsigned cases. 1696 if ft.IsInteger() { 1697 // tt is float32 or float64, and ft is also unsigned 1698 if tt.Size() == 4 { 1699 return s.uint64Tofloat32(n, x, ft, tt) 1700 } 1701 if tt.Size() == 8 { 1702 return s.uint64Tofloat64(n, x, ft, tt) 1703 } 1704 s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt) 1705 } 1706 // ft is float32 or float64, and tt is unsigned integer 1707 if ft.Size() == 4 { 1708 return s.float32ToUint64(n, x, ft, tt) 1709 } 1710 if ft.Size() == 8 { 1711 return s.float64ToUint64(n, x, ft, tt) 1712 } 1713 s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt) 1714 return nil 1715 } 1716 1717 if ft.IsComplex() && tt.IsComplex() { 1718 var op ssa.Op 1719 if ft.Size() == tt.Size() { 1720 op = ssa.OpCopy 1721 } else if ft.Size() == 8 && tt.Size() == 16 { 1722 op = ssa.OpCvt32Fto64F 1723 } else if ft.Size() == 16 && tt.Size() == 8 { 1724 op = ssa.OpCvt64Fto32F 1725 } else { 1726 s.Fatalf("weird complex conversion %v -> %v", ft, tt) 1727 } 1728 ftp := floatForComplex(ft) 1729 ttp := floatForComplex(tt) 1730 return s.newValue2(ssa.OpComplexMake, tt, 1731 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)), 1732 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x))) 1733 } 1734 1735 s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype) 1736 return nil 1737 1738 case ODOTTYPE: 1739 res, _ := s.dottype(n, false) 1740 return res 1741 1742 // binary ops 1743 case OLT, OEQ, ONE, OLE, OGE, OGT: 1744 a := s.expr(n.Left) 1745 b := s.expr(n.Right) 1746 if n.Left.Type.IsComplex() { 1747 pt := floatForComplex(n.Left.Type) 1748 op := s.ssaOp(OEQ, pt) 1749 r := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)) 1750 i := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)) 1751 c := s.newValue2(ssa.OpAndB, Types[TBOOL], r, i) 1752 switch n.Op { 1753 case OEQ: 1754 return c 1755 case ONE: 1756 return s.newValue1(ssa.OpNot, Types[TBOOL], c) 1757 default: 1758 s.Fatalf("ordered complex compare %v", n.Op) 1759 } 1760 } 1761 return s.newValue2(s.ssaOp(n.Op, n.Left.Type), Types[TBOOL], a, b) 1762 case OMUL: 1763 a := s.expr(n.Left) 1764 b := s.expr(n.Right) 1765 if n.Type.IsComplex() { 1766 mulop := ssa.OpMul64F 1767 addop := ssa.OpAdd64F 1768 subop := ssa.OpSub64F 1769 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1770 wt := Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 1771 1772 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1773 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1774 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1775 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1776 1777 if pt != wt { // Widen for calculation 1778 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal) 1779 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal) 1780 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag) 1781 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag) 1782 } 1783 1784 xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag)) 1785 ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal)) 1786 1787 if pt != wt { // Narrow to store back 1788 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal) 1789 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag) 1790 } 1791 1792 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1793 } 1794 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1795 1796 case ODIV: 1797 a := s.expr(n.Left) 1798 b := s.expr(n.Right) 1799 if n.Type.IsComplex() { 1800 // TODO this is not executed because the front-end substitutes a runtime call. 1801 // That probably ought to change; with modest optimization the widen/narrow 1802 // conversions could all be elided in larger expression trees. 1803 mulop := ssa.OpMul64F 1804 addop := ssa.OpAdd64F 1805 subop := ssa.OpSub64F 1806 divop := ssa.OpDiv64F 1807 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1808 wt := Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 1809 1810 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1811 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1812 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1813 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1814 1815 if pt != wt { // Widen for calculation 1816 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal) 1817 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal) 1818 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag) 1819 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag) 1820 } 1821 1822 denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag)) 1823 xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag)) 1824 ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag)) 1825 1826 // TODO not sure if this is best done in wide precision or narrow 1827 // Double-rounding might be an issue. 1828 // Note that the pre-SSA implementation does the entire calculation 1829 // in wide format, so wide is compatible. 1830 xreal = s.newValue2(divop, wt, xreal, denom) 1831 ximag = s.newValue2(divop, wt, ximag, denom) 1832 1833 if pt != wt { // Narrow to store back 1834 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal) 1835 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag) 1836 } 1837 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1838 } 1839 if n.Type.IsFloat() { 1840 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1841 } 1842 return s.intDivide(n, a, b) 1843 case OMOD: 1844 a := s.expr(n.Left) 1845 b := s.expr(n.Right) 1846 return s.intDivide(n, a, b) 1847 case OADD, OSUB: 1848 a := s.expr(n.Left) 1849 b := s.expr(n.Right) 1850 if n.Type.IsComplex() { 1851 pt := floatForComplex(n.Type) 1852 op := s.ssaOp(n.Op, pt) 1853 return s.newValue2(ssa.OpComplexMake, n.Type, 1854 s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)), 1855 s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))) 1856 } 1857 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1858 case OAND, OOR, OHMUL, OXOR: 1859 a := s.expr(n.Left) 1860 b := s.expr(n.Right) 1861 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1862 case OLSH, ORSH: 1863 a := s.expr(n.Left) 1864 b := s.expr(n.Right) 1865 return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b) 1866 case OANDAND, OOROR: 1867 // To implement OANDAND (and OOROR), we introduce a 1868 // new temporary variable to hold the result. The 1869 // variable is associated with the OANDAND node in the 1870 // s.vars table (normally variables are only 1871 // associated with ONAME nodes). We convert 1872 // A && B 1873 // to 1874 // var = A 1875 // if var { 1876 // var = B 1877 // } 1878 // Using var in the subsequent block introduces the 1879 // necessary phi variable. 1880 el := s.expr(n.Left) 1881 s.vars[n] = el 1882 1883 b := s.endBlock() 1884 b.Kind = ssa.BlockIf 1885 b.SetControl(el) 1886 // In theory, we should set b.Likely here based on context. 1887 // However, gc only gives us likeliness hints 1888 // in a single place, for plain OIF statements, 1889 // and passing around context is finnicky, so don't bother for now. 1890 1891 bRight := s.f.NewBlock(ssa.BlockPlain) 1892 bResult := s.f.NewBlock(ssa.BlockPlain) 1893 if n.Op == OANDAND { 1894 b.AddEdgeTo(bRight) 1895 b.AddEdgeTo(bResult) 1896 } else if n.Op == OOROR { 1897 b.AddEdgeTo(bResult) 1898 b.AddEdgeTo(bRight) 1899 } 1900 1901 s.startBlock(bRight) 1902 er := s.expr(n.Right) 1903 s.vars[n] = er 1904 1905 b = s.endBlock() 1906 b.AddEdgeTo(bResult) 1907 1908 s.startBlock(bResult) 1909 return s.variable(n, Types[TBOOL]) 1910 case OCOMPLEX: 1911 r := s.expr(n.Left) 1912 i := s.expr(n.Right) 1913 return s.newValue2(ssa.OpComplexMake, n.Type, r, i) 1914 1915 // unary ops 1916 case OMINUS: 1917 a := s.expr(n.Left) 1918 if n.Type.IsComplex() { 1919 tp := floatForComplex(n.Type) 1920 negop := s.ssaOp(n.Op, tp) 1921 return s.newValue2(ssa.OpComplexMake, n.Type, 1922 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)), 1923 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a))) 1924 } 1925 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 1926 case ONOT, OCOM: 1927 a := s.expr(n.Left) 1928 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 1929 case OIMAG, OREAL: 1930 a := s.expr(n.Left) 1931 return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a) 1932 case OPLUS: 1933 return s.expr(n.Left) 1934 1935 case OADDR: 1936 a, _ := s.addr(n.Left, n.Bounded) 1937 // Note we know the volatile result is false because you can't write &f() in Go. 1938 return a 1939 1940 case OINDREGSP: 1941 addr := s.entryNewValue1I(ssa.OpOffPtr, ptrto(n.Type), n.Xoffset, s.sp) 1942 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1943 1944 case OIND: 1945 p := s.exprPtr(n.Left, false, n.Pos) 1946 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1947 1948 case ODOT: 1949 t := n.Left.Type 1950 if canSSAType(t) { 1951 v := s.expr(n.Left) 1952 return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v) 1953 } 1954 if n.Left.Op == OSTRUCTLIT { 1955 // All literals with nonzero fields have already been 1956 // rewritten during walk. Any that remain are just T{} 1957 // or equivalents. Use the zero value. 1958 if !iszero(n.Left) { 1959 Fatalf("literal with nonzero value in SSA: %v", n.Left) 1960 } 1961 return s.zeroVal(n.Type) 1962 } 1963 p, _ := s.addr(n, false) 1964 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1965 1966 case ODOTPTR: 1967 p := s.exprPtr(n.Left, false, n.Pos) 1968 p = s.newValue1I(ssa.OpOffPtr, p.Type, n.Xoffset, p) 1969 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1970 1971 case OINDEX: 1972 switch { 1973 case n.Left.Type.IsString(): 1974 if n.Bounded && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) { 1975 // Replace "abc"[1] with 'b'. 1976 // Delayed until now because "abc"[1] is not an ideal constant. 1977 // See test/fixedbugs/issue11370.go. 1978 return s.newValue0I(ssa.OpConst8, Types[TUINT8], int64(int8(n.Left.Val().U.(string)[n.Right.Int64()]))) 1979 } 1980 a := s.expr(n.Left) 1981 i := s.expr(n.Right) 1982 i = s.extendIndex(i, panicindex) 1983 if !n.Bounded { 1984 len := s.newValue1(ssa.OpStringLen, Types[TINT], a) 1985 s.boundsCheck(i, len) 1986 } 1987 ptrtyp := ptrto(Types[TUINT8]) 1988 ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a) 1989 if Isconst(n.Right, CTINT) { 1990 ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr) 1991 } else { 1992 ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i) 1993 } 1994 return s.newValue2(ssa.OpLoad, Types[TUINT8], ptr, s.mem()) 1995 case n.Left.Type.IsSlice(): 1996 p, _ := s.addr(n, false) 1997 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 1998 case n.Left.Type.IsArray(): 1999 if bound := n.Left.Type.NumElem(); bound <= 1 { 2000 // SSA can handle arrays of length at most 1. 2001 a := s.expr(n.Left) 2002 i := s.expr(n.Right) 2003 if bound == 0 { 2004 // Bounds check will never succeed. Might as well 2005 // use constants for the bounds check. 2006 z := s.constInt(Types[TINT], 0) 2007 s.boundsCheck(z, z) 2008 // The return value won't be live, return junk. 2009 return s.newValue0(ssa.OpUnknown, n.Type) 2010 } 2011 i = s.extendIndex(i, panicindex) 2012 s.boundsCheck(i, s.constInt(Types[TINT], bound)) 2013 return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a) 2014 } 2015 p, _ := s.addr(n, false) 2016 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 2017 default: 2018 s.Fatalf("bad type for index %v", n.Left.Type) 2019 return nil 2020 } 2021 2022 case OLEN, OCAP: 2023 switch { 2024 case n.Left.Type.IsSlice(): 2025 op := ssa.OpSliceLen 2026 if n.Op == OCAP { 2027 op = ssa.OpSliceCap 2028 } 2029 return s.newValue1(op, Types[TINT], s.expr(n.Left)) 2030 case n.Left.Type.IsString(): // string; not reachable for OCAP 2031 return s.newValue1(ssa.OpStringLen, Types[TINT], s.expr(n.Left)) 2032 case n.Left.Type.IsMap(), n.Left.Type.IsChan(): 2033 return s.referenceTypeBuiltin(n, s.expr(n.Left)) 2034 default: // array 2035 return s.constInt(Types[TINT], n.Left.Type.NumElem()) 2036 } 2037 2038 case OSPTR: 2039 a := s.expr(n.Left) 2040 if n.Left.Type.IsSlice() { 2041 return s.newValue1(ssa.OpSlicePtr, n.Type, a) 2042 } else { 2043 return s.newValue1(ssa.OpStringPtr, n.Type, a) 2044 } 2045 2046 case OITAB: 2047 a := s.expr(n.Left) 2048 return s.newValue1(ssa.OpITab, n.Type, a) 2049 2050 case OIDATA: 2051 a := s.expr(n.Left) 2052 return s.newValue1(ssa.OpIData, n.Type, a) 2053 2054 case OEFACE: 2055 tab := s.expr(n.Left) 2056 data := s.expr(n.Right) 2057 return s.newValue2(ssa.OpIMake, n.Type, tab, data) 2058 2059 case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR: 2060 v := s.expr(n.Left) 2061 var i, j, k *ssa.Value 2062 low, high, max := n.SliceBounds() 2063 if low != nil { 2064 i = s.extendIndex(s.expr(low), panicslice) 2065 } 2066 if high != nil { 2067 j = s.extendIndex(s.expr(high), panicslice) 2068 } 2069 if max != nil { 2070 k = s.extendIndex(s.expr(max), panicslice) 2071 } 2072 p, l, c := s.slice(n.Left.Type, v, i, j, k) 2073 return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c) 2074 2075 case OSLICESTR: 2076 v := s.expr(n.Left) 2077 var i, j *ssa.Value 2078 low, high, _ := n.SliceBounds() 2079 if low != nil { 2080 i = s.extendIndex(s.expr(low), panicslice) 2081 } 2082 if high != nil { 2083 j = s.extendIndex(s.expr(high), panicslice) 2084 } 2085 p, l, _ := s.slice(n.Left.Type, v, i, j, nil) 2086 return s.newValue2(ssa.OpStringMake, n.Type, p, l) 2087 2088 case OCALLFUNC: 2089 if isIntrinsicCall(n) { 2090 return s.intrinsicCall(n) 2091 } 2092 fallthrough 2093 2094 case OCALLINTER, OCALLMETH: 2095 a := s.call(n, callNormal) 2096 return s.newValue2(ssa.OpLoad, n.Type, a, s.mem()) 2097 2098 case OGETG: 2099 return s.newValue1(ssa.OpGetG, n.Type, s.mem()) 2100 2101 case OAPPEND: 2102 return s.append(n, false) 2103 2104 case OSTRUCTLIT, OARRAYLIT: 2105 // All literals with nonzero fields have already been 2106 // rewritten during walk. Any that remain are just T{} 2107 // or equivalents. Use the zero value. 2108 if !iszero(n) { 2109 Fatalf("literal with nonzero value in SSA: %v", n) 2110 } 2111 return s.zeroVal(n.Type) 2112 2113 default: 2114 s.Fatalf("unhandled expr %v", n.Op) 2115 return nil 2116 } 2117 } 2118 2119 // append converts an OAPPEND node to SSA. 2120 // If inplace is false, it converts the OAPPEND expression n to an ssa.Value, 2121 // adds it to s, and returns the Value. 2122 // If inplace is true, it writes the result of the OAPPEND expression n 2123 // back to the slice being appended to, and returns nil. 2124 // inplace MUST be set to false if the slice can be SSA'd. 2125 func (s *state) append(n *Node, inplace bool) *ssa.Value { 2126 // If inplace is false, process as expression "append(s, e1, e2, e3)": 2127 // 2128 // ptr, len, cap := s 2129 // newlen := len + 3 2130 // if newlen > cap { 2131 // ptr, len, cap = growslice(s, newlen) 2132 // newlen = len + 3 // recalculate to avoid a spill 2133 // } 2134 // // with write barriers, if needed: 2135 // *(ptr+len) = e1 2136 // *(ptr+len+1) = e2 2137 // *(ptr+len+2) = e3 2138 // return makeslice(ptr, newlen, cap) 2139 // 2140 // 2141 // If inplace is true, process as statement "s = append(s, e1, e2, e3)": 2142 // 2143 // a := &s 2144 // ptr, len, cap := s 2145 // newlen := len + 3 2146 // if newlen > cap { 2147 // newptr, len, newcap = growslice(ptr, len, cap, newlen) 2148 // vardef(a) // if necessary, advise liveness we are writing a new a 2149 // *a.cap = newcap // write before ptr to avoid a spill 2150 // *a.ptr = newptr // with write barrier 2151 // } 2152 // newlen = len + 3 // recalculate to avoid a spill 2153 // *a.len = newlen 2154 // // with write barriers, if needed: 2155 // *(ptr+len) = e1 2156 // *(ptr+len+1) = e2 2157 // *(ptr+len+2) = e3 2158 2159 et := n.Type.Elem() 2160 pt := ptrto(et) 2161 2162 // Evaluate slice 2163 sn := n.List.First() // the slice node is the first in the list 2164 2165 var slice, addr *ssa.Value 2166 if inplace { 2167 addr, _ = s.addr(sn, false) 2168 slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 2169 } else { 2170 slice = s.expr(sn) 2171 } 2172 2173 // Allocate new blocks 2174 grow := s.f.NewBlock(ssa.BlockPlain) 2175 assign := s.f.NewBlock(ssa.BlockPlain) 2176 2177 // Decide if we need to grow 2178 nargs := int64(n.List.Len() - 1) 2179 p := s.newValue1(ssa.OpSlicePtr, pt, slice) 2180 l := s.newValue1(ssa.OpSliceLen, Types[TINT], slice) 2181 c := s.newValue1(ssa.OpSliceCap, Types[TINT], slice) 2182 nl := s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs)) 2183 2184 cmp := s.newValue2(s.ssaOp(OGT, Types[TINT]), Types[TBOOL], nl, c) 2185 s.vars[&ptrVar] = p 2186 2187 if !inplace { 2188 s.vars[&newlenVar] = nl 2189 s.vars[&capVar] = c 2190 } else { 2191 s.vars[&lenVar] = l 2192 } 2193 2194 b := s.endBlock() 2195 b.Kind = ssa.BlockIf 2196 b.Likely = ssa.BranchUnlikely 2197 b.SetControl(cmp) 2198 b.AddEdgeTo(grow) 2199 b.AddEdgeTo(assign) 2200 2201 // Call growslice 2202 s.startBlock(grow) 2203 taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: Linksym(typenamesym(n.Type.Elem()))}, s.sb) 2204 2205 r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl) 2206 2207 if inplace { 2208 if sn.Op == ONAME { 2209 // Tell liveness we're about to build a new slice 2210 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, sn, s.mem()) 2211 } 2212 capaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(array_cap), addr) 2213 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capaddr, r[2], s.mem()) 2214 if ssa.IsStackAddr(addr) { 2215 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, pt.Size(), addr, r[0], s.mem()) 2216 } else { 2217 s.insertWBstore(pt, addr, r[0], n.Pos, 0) 2218 } 2219 // load the value we just stored to avoid having to spill it 2220 s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem()) 2221 s.vars[&lenVar] = r[1] // avoid a spill in the fast path 2222 } else { 2223 s.vars[&ptrVar] = r[0] 2224 s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], r[1], s.constInt(Types[TINT], nargs)) 2225 s.vars[&capVar] = r[2] 2226 } 2227 2228 b = s.endBlock() 2229 b.AddEdgeTo(assign) 2230 2231 // assign new elements to slots 2232 s.startBlock(assign) 2233 2234 if inplace { 2235 l = s.variable(&lenVar, Types[TINT]) // generates phi for len 2236 nl = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs)) 2237 lenaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(array_nel), addr) 2238 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenaddr, nl, s.mem()) 2239 } 2240 2241 // Evaluate args 2242 type argRec struct { 2243 // if store is true, we're appending the value v. If false, we're appending the 2244 // value at *v. If store==false, isVolatile reports whether the source 2245 // is in the outargs section of the stack frame. 2246 v *ssa.Value 2247 store bool 2248 isVolatile bool 2249 } 2250 args := make([]argRec, 0, nargs) 2251 for _, n := range n.List.Slice()[1:] { 2252 if canSSAType(n.Type) { 2253 args = append(args, argRec{v: s.expr(n), store: true}) 2254 } else { 2255 v, isVolatile := s.addr(n, false) 2256 args = append(args, argRec{v: v, isVolatile: isVolatile}) 2257 } 2258 } 2259 2260 p = s.variable(&ptrVar, pt) // generates phi for ptr 2261 if !inplace { 2262 nl = s.variable(&newlenVar, Types[TINT]) // generates phi for nl 2263 c = s.variable(&capVar, Types[TINT]) // generates phi for cap 2264 } 2265 p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l) 2266 // TODO: just one write barrier call for all of these writes? 2267 // TODO: maybe just one writeBarrier.enabled check? 2268 for i, arg := range args { 2269 addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(Types[TINT], int64(i))) 2270 if arg.store { 2271 if haspointers(et) { 2272 s.insertWBstore(et, addr, arg.v, n.Pos, 0) 2273 } else { 2274 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, et.Size(), addr, arg.v, s.mem()) 2275 } 2276 } else { 2277 if haspointers(et) { 2278 s.insertWBmove(et, addr, arg.v, n.Pos, arg.isVolatile) 2279 } else { 2280 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, sizeAlignAuxInt(et), addr, arg.v, s.mem()) 2281 } 2282 } 2283 } 2284 2285 delete(s.vars, &ptrVar) 2286 if inplace { 2287 delete(s.vars, &lenVar) 2288 return nil 2289 } 2290 delete(s.vars, &newlenVar) 2291 delete(s.vars, &capVar) 2292 // make result 2293 return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c) 2294 } 2295 2296 // condBranch evaluates the boolean expression cond and branches to yes 2297 // if cond is true and no if cond is false. 2298 // This function is intended to handle && and || better than just calling 2299 // s.expr(cond) and branching on the result. 2300 func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) { 2301 if cond.Op == OANDAND { 2302 mid := s.f.NewBlock(ssa.BlockPlain) 2303 s.stmtList(cond.Ninit) 2304 s.condBranch(cond.Left, mid, no, max8(likely, 0)) 2305 s.startBlock(mid) 2306 s.condBranch(cond.Right, yes, no, likely) 2307 return 2308 // Note: if likely==1, then both recursive calls pass 1. 2309 // If likely==-1, then we don't have enough information to decide 2310 // whether the first branch is likely or not. So we pass 0 for 2311 // the likeliness of the first branch. 2312 // TODO: have the frontend give us branch prediction hints for 2313 // OANDAND and OOROR nodes (if it ever has such info). 2314 } 2315 if cond.Op == OOROR { 2316 mid := s.f.NewBlock(ssa.BlockPlain) 2317 s.stmtList(cond.Ninit) 2318 s.condBranch(cond.Left, yes, mid, min8(likely, 0)) 2319 s.startBlock(mid) 2320 s.condBranch(cond.Right, yes, no, likely) 2321 return 2322 // Note: if likely==-1, then both recursive calls pass -1. 2323 // If likely==1, then we don't have enough info to decide 2324 // the likelihood of the first branch. 2325 } 2326 if cond.Op == ONOT { 2327 s.stmtList(cond.Ninit) 2328 s.condBranch(cond.Left, no, yes, -likely) 2329 return 2330 } 2331 c := s.expr(cond) 2332 b := s.endBlock() 2333 b.Kind = ssa.BlockIf 2334 b.SetControl(c) 2335 b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness 2336 b.AddEdgeTo(yes) 2337 b.AddEdgeTo(no) 2338 } 2339 2340 type skipMask uint8 2341 2342 const ( 2343 skipPtr skipMask = 1 << iota 2344 skipLen 2345 skipCap 2346 ) 2347 2348 // assign does left = right. 2349 // Right has already been evaluated to ssa, left has not. 2350 // If deref is true, then we do left = *right instead (and right has already been nil-checked). 2351 // If deref is true and right == nil, just do left = 0. 2352 // If deref is true, rightIsVolatile reports whether right points to volatile (clobbered by a call) storage. 2353 // Include a write barrier if wb is true. 2354 // skip indicates assignments (at the top level) that can be avoided. 2355 func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, line src.XPos, skip skipMask, rightIsVolatile bool) { 2356 if left.Op == ONAME && isblank(left) { 2357 return 2358 } 2359 t := left.Type 2360 dowidth(t) 2361 if s.canSSA(left) { 2362 if deref { 2363 s.Fatalf("can SSA LHS %v but not RHS %s", left, right) 2364 } 2365 if left.Op == ODOT { 2366 // We're assigning to a field of an ssa-able value. 2367 // We need to build a new structure with the new value for the 2368 // field we're assigning and the old values for the other fields. 2369 // For instance: 2370 // type T struct {a, b, c int} 2371 // var T x 2372 // x.b = 5 2373 // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c} 2374 2375 // Grab information about the structure type. 2376 t := left.Left.Type 2377 nf := t.NumFields() 2378 idx := fieldIdx(left) 2379 2380 // Grab old value of structure. 2381 old := s.expr(left.Left) 2382 2383 // Make new structure. 2384 new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t) 2385 2386 // Add fields as args. 2387 for i := 0; i < nf; i++ { 2388 if i == idx { 2389 new.AddArg(right) 2390 } else { 2391 new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old)) 2392 } 2393 } 2394 2395 // Recursively assign the new value we've made to the base of the dot op. 2396 s.assign(left.Left, new, false, false, line, 0, rightIsVolatile) 2397 // TODO: do we need to update named values here? 2398 return 2399 } 2400 if left.Op == OINDEX && left.Left.Type.IsArray() { 2401 // We're assigning to an element of an ssa-able array. 2402 // a[i] = v 2403 t := left.Left.Type 2404 n := t.NumElem() 2405 2406 i := s.expr(left.Right) // index 2407 if n == 0 { 2408 // The bounds check must fail. Might as well 2409 // ignore the actual index and just use zeros. 2410 z := s.constInt(Types[TINT], 0) 2411 s.boundsCheck(z, z) 2412 return 2413 } 2414 if n != 1 { 2415 s.Fatalf("assigning to non-1-length array") 2416 } 2417 // Rewrite to a = [1]{v} 2418 i = s.extendIndex(i, panicindex) 2419 s.boundsCheck(i, s.constInt(Types[TINT], 1)) 2420 v := s.newValue1(ssa.OpArrayMake1, t, right) 2421 s.assign(left.Left, v, false, false, line, 0, rightIsVolatile) 2422 return 2423 } 2424 // Update variable assignment. 2425 s.vars[left] = right 2426 s.addNamedValue(left, right) 2427 return 2428 } 2429 // Left is not ssa-able. Compute its address. 2430 addr, _ := s.addr(left, false) 2431 if left.Op == ONAME && skip == 0 { 2432 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, left, s.mem()) 2433 } 2434 if deref { 2435 // Treat as a mem->mem move. 2436 if wb && !ssa.IsStackAddr(addr) { 2437 s.insertWBmove(t, addr, right, line, rightIsVolatile) 2438 return 2439 } 2440 if right == nil { 2441 s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, sizeAlignAuxInt(t), addr, s.mem()) 2442 return 2443 } 2444 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, sizeAlignAuxInt(t), addr, right, s.mem()) 2445 return 2446 } 2447 // Treat as a store. 2448 if wb && !ssa.IsStackAddr(addr) { 2449 if skip&skipPtr != 0 { 2450 // Special case: if we don't write back the pointers, don't bother 2451 // doing the write barrier check. 2452 s.storeTypeScalars(t, addr, right, skip) 2453 return 2454 } 2455 s.insertWBstore(t, addr, right, line, skip) 2456 return 2457 } 2458 if skip != 0 { 2459 if skip&skipPtr == 0 { 2460 s.storeTypePtrs(t, addr, right) 2461 } 2462 s.storeTypeScalars(t, addr, right, skip) 2463 return 2464 } 2465 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), addr, right, s.mem()) 2466 } 2467 2468 // zeroVal returns the zero value for type t. 2469 func (s *state) zeroVal(t *Type) *ssa.Value { 2470 switch { 2471 case t.IsInteger(): 2472 switch t.Size() { 2473 case 1: 2474 return s.constInt8(t, 0) 2475 case 2: 2476 return s.constInt16(t, 0) 2477 case 4: 2478 return s.constInt32(t, 0) 2479 case 8: 2480 return s.constInt64(t, 0) 2481 default: 2482 s.Fatalf("bad sized integer type %v", t) 2483 } 2484 case t.IsFloat(): 2485 switch t.Size() { 2486 case 4: 2487 return s.constFloat32(t, 0) 2488 case 8: 2489 return s.constFloat64(t, 0) 2490 default: 2491 s.Fatalf("bad sized float type %v", t) 2492 } 2493 case t.IsComplex(): 2494 switch t.Size() { 2495 case 8: 2496 z := s.constFloat32(Types[TFLOAT32], 0) 2497 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2498 case 16: 2499 z := s.constFloat64(Types[TFLOAT64], 0) 2500 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2501 default: 2502 s.Fatalf("bad sized complex type %v", t) 2503 } 2504 2505 case t.IsString(): 2506 return s.constEmptyString(t) 2507 case t.IsPtrShaped(): 2508 return s.constNil(t) 2509 case t.IsBoolean(): 2510 return s.constBool(false) 2511 case t.IsInterface(): 2512 return s.constInterface(t) 2513 case t.IsSlice(): 2514 return s.constSlice(t) 2515 case t.IsStruct(): 2516 n := t.NumFields() 2517 v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t) 2518 for i := 0; i < n; i++ { 2519 v.AddArg(s.zeroVal(t.FieldType(i).(*Type))) 2520 } 2521 return v 2522 case t.IsArray(): 2523 switch t.NumElem() { 2524 case 0: 2525 return s.entryNewValue0(ssa.OpArrayMake0, t) 2526 case 1: 2527 return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem())) 2528 } 2529 } 2530 s.Fatalf("zero for type %v not implemented", t) 2531 return nil 2532 } 2533 2534 type callKind int8 2535 2536 const ( 2537 callNormal callKind = iota 2538 callDefer 2539 callGo 2540 ) 2541 2542 // TODO: make this a field of a configuration object instead of a global. 2543 var intrinsics *intrinsicInfo 2544 2545 type intrinsicInfo struct { 2546 std map[intrinsicKey]intrinsicBuilder 2547 intSized map[sizedIntrinsicKey]intrinsicBuilder 2548 ptrSized map[sizedIntrinsicKey]intrinsicBuilder 2549 } 2550 2551 // An intrinsicBuilder converts a call node n into an ssa value that 2552 // implements that call as an intrinsic. args is a list of arguments to the func. 2553 type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value 2554 2555 type intrinsicKey struct { 2556 pkg string 2557 fn string 2558 } 2559 2560 type sizedIntrinsicKey struct { 2561 pkg string 2562 fn string 2563 size int 2564 } 2565 2566 // disableForInstrumenting returns nil when instrumenting, fn otherwise 2567 func disableForInstrumenting(fn intrinsicBuilder) intrinsicBuilder { 2568 if instrumenting { 2569 return nil 2570 } 2571 return fn 2572 } 2573 2574 // enableOnArch returns fn on given archs, nil otherwise 2575 func enableOnArch(fn intrinsicBuilder, archs ...sys.ArchFamily) intrinsicBuilder { 2576 if Thearch.LinkArch.InFamily(archs...) { 2577 return fn 2578 } 2579 return nil 2580 } 2581 2582 func intrinsicInit() { 2583 i := &intrinsicInfo{} 2584 intrinsics = i 2585 2586 // initial set of intrinsics. 2587 i.std = map[intrinsicKey]intrinsicBuilder{ 2588 /******** runtime ********/ 2589 intrinsicKey{"runtime", "slicebytetostringtmp"}: disableForInstrumenting(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2590 // Compiler frontend optimizations emit OARRAYBYTESTRTMP nodes 2591 // for the backend instead of slicebytetostringtmp calls 2592 // when not instrumenting. 2593 slice := args[0] 2594 ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), slice) 2595 len := s.newValue1(ssa.OpSliceLen, Types[TINT], slice) 2596 return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) 2597 }), 2598 intrinsicKey{"runtime", "KeepAlive"}: func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2599 data := s.newValue1(ssa.OpIData, ptrto(Types[TUINT8]), args[0]) 2600 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, data, s.mem()) 2601 return nil 2602 }, 2603 2604 /******** runtime/internal/sys ********/ 2605 intrinsicKey{"runtime/internal/sys", "Ctz32"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2606 return s.newValue1(ssa.OpCtz32, Types[TUINT32], args[0]) 2607 }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS), 2608 intrinsicKey{"runtime/internal/sys", "Ctz64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2609 return s.newValue1(ssa.OpCtz64, Types[TUINT64], args[0]) 2610 }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS), 2611 intrinsicKey{"runtime/internal/sys", "Bswap32"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2612 return s.newValue1(ssa.OpBswap32, Types[TUINT32], args[0]) 2613 }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X), 2614 intrinsicKey{"runtime/internal/sys", "Bswap64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2615 return s.newValue1(ssa.OpBswap64, Types[TUINT64], args[0]) 2616 }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X), 2617 2618 /******** runtime/internal/atomic ********/ 2619 intrinsicKey{"runtime/internal/atomic", "Load"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2620 v := s.newValue2(ssa.OpAtomicLoad32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], s.mem()) 2621 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2622 return s.newValue1(ssa.OpSelect0, Types[TUINT32], v) 2623 }, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS), 2624 intrinsicKey{"runtime/internal/atomic", "Load64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2625 v := s.newValue2(ssa.OpAtomicLoad64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], s.mem()) 2626 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2627 return s.newValue1(ssa.OpSelect0, Types[TUINT64], v) 2628 }, sys.AMD64, sys.ARM64, sys.S390X), 2629 intrinsicKey{"runtime/internal/atomic", "Loadp"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2630 v := s.newValue2(ssa.OpAtomicLoadPtr, ssa.MakeTuple(ptrto(Types[TUINT8]), ssa.TypeMem), args[0], s.mem()) 2631 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2632 return s.newValue1(ssa.OpSelect0, ptrto(Types[TUINT8]), v) 2633 }, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS), 2634 2635 intrinsicKey{"runtime/internal/atomic", "Store"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2636 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, ssa.TypeMem, args[0], args[1], s.mem()) 2637 return nil 2638 }, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS), 2639 intrinsicKey{"runtime/internal/atomic", "Store64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2640 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, ssa.TypeMem, args[0], args[1], s.mem()) 2641 return nil 2642 }, sys.AMD64, sys.ARM64, sys.S390X), 2643 intrinsicKey{"runtime/internal/atomic", "StorepNoWB"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2644 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, ssa.TypeMem, args[0], args[1], s.mem()) 2645 return nil 2646 }, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS), 2647 2648 intrinsicKey{"runtime/internal/atomic", "Xchg"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2649 v := s.newValue3(ssa.OpAtomicExchange32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem()) 2650 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2651 return s.newValue1(ssa.OpSelect0, Types[TUINT32], v) 2652 }, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS), 2653 intrinsicKey{"runtime/internal/atomic", "Xchg64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2654 v := s.newValue3(ssa.OpAtomicExchange64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem()) 2655 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2656 return s.newValue1(ssa.OpSelect0, Types[TUINT64], v) 2657 }, sys.AMD64, sys.ARM64, sys.S390X), 2658 2659 intrinsicKey{"runtime/internal/atomic", "Xadd"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2660 v := s.newValue3(ssa.OpAtomicAdd32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem()) 2661 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2662 return s.newValue1(ssa.OpSelect0, Types[TUINT32], v) 2663 }, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS), 2664 intrinsicKey{"runtime/internal/atomic", "Xadd64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2665 v := s.newValue3(ssa.OpAtomicAdd64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem()) 2666 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2667 return s.newValue1(ssa.OpSelect0, Types[TUINT64], v) 2668 }, sys.AMD64, sys.ARM64, sys.S390X), 2669 2670 intrinsicKey{"runtime/internal/atomic", "Cas"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2671 v := s.newValue4(ssa.OpAtomicCompareAndSwap32, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem()) 2672 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2673 return s.newValue1(ssa.OpSelect0, Types[TBOOL], v) 2674 }, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS), 2675 intrinsicKey{"runtime/internal/atomic", "Cas64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2676 v := s.newValue4(ssa.OpAtomicCompareAndSwap64, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem()) 2677 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2678 return s.newValue1(ssa.OpSelect0, Types[TBOOL], v) 2679 }, sys.AMD64, sys.ARM64, sys.S390X), 2680 2681 intrinsicKey{"runtime/internal/atomic", "And8"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2682 s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, ssa.TypeMem, args[0], args[1], s.mem()) 2683 return nil 2684 }, sys.AMD64, sys.ARM64, sys.MIPS), 2685 intrinsicKey{"runtime/internal/atomic", "Or8"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2686 s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, ssa.TypeMem, args[0], args[1], s.mem()) 2687 return nil 2688 }, sys.AMD64, sys.ARM64, sys.MIPS), 2689 2690 /******** math ********/ 2691 intrinsicKey{"math", "Sqrt"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2692 return s.newValue1(ssa.OpSqrt, Types[TFLOAT64], args[0]) 2693 }, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X), 2694 } 2695 2696 // aliases internal to runtime/internal/atomic 2697 i.std[intrinsicKey{"runtime/internal/atomic", "Loadint64"}] = 2698 i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}] 2699 i.std[intrinsicKey{"runtime/internal/atomic", "Xaddint64"}] = 2700 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}] 2701 2702 // intrinsics which vary depending on the size of int/ptr. 2703 i.intSized = map[sizedIntrinsicKey]intrinsicBuilder{ 2704 sizedIntrinsicKey{"runtime/internal/atomic", "Loaduint", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Load"}], 2705 sizedIntrinsicKey{"runtime/internal/atomic", "Loaduint", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}], 2706 } 2707 i.ptrSized = map[sizedIntrinsicKey]intrinsicBuilder{ 2708 sizedIntrinsicKey{"runtime/internal/atomic", "Loaduintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Load"}], 2709 sizedIntrinsicKey{"runtime/internal/atomic", "Loaduintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}], 2710 sizedIntrinsicKey{"runtime/internal/atomic", "Storeuintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Store"}], 2711 sizedIntrinsicKey{"runtime/internal/atomic", "Storeuintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Store64"}], 2712 sizedIntrinsicKey{"runtime/internal/atomic", "Xchguintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Xchg"}], 2713 sizedIntrinsicKey{"runtime/internal/atomic", "Xchguintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Xchg64"}], 2714 sizedIntrinsicKey{"runtime/internal/atomic", "Xadduintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Xadd"}], 2715 sizedIntrinsicKey{"runtime/internal/atomic", "Xadduintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}], 2716 sizedIntrinsicKey{"runtime/internal/atomic", "Casuintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}], 2717 sizedIntrinsicKey{"runtime/internal/atomic", "Casuintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}], 2718 sizedIntrinsicKey{"runtime/internal/atomic", "Casp1", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}], 2719 sizedIntrinsicKey{"runtime/internal/atomic", "Casp1", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}], 2720 } 2721 2722 /******** sync/atomic ********/ 2723 if flag_race { 2724 // The race detector needs to be able to intercept these calls. 2725 // We can't intrinsify them. 2726 return 2727 } 2728 // these are all aliases to runtime/internal/atomic implementations. 2729 i.std[intrinsicKey{"sync/atomic", "LoadInt32"}] = 2730 i.std[intrinsicKey{"runtime/internal/atomic", "Load"}] 2731 i.std[intrinsicKey{"sync/atomic", "LoadInt64"}] = 2732 i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}] 2733 i.std[intrinsicKey{"sync/atomic", "LoadPointer"}] = 2734 i.std[intrinsicKey{"runtime/internal/atomic", "Loadp"}] 2735 i.std[intrinsicKey{"sync/atomic", "LoadUint32"}] = 2736 i.std[intrinsicKey{"runtime/internal/atomic", "Load"}] 2737 i.std[intrinsicKey{"sync/atomic", "LoadUint64"}] = 2738 i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}] 2739 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "LoadUintptr", 4}] = 2740 i.std[intrinsicKey{"runtime/internal/atomic", "Load"}] 2741 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "LoadUintptr", 8}] = 2742 i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}] 2743 2744 i.std[intrinsicKey{"sync/atomic", "StoreInt32"}] = 2745 i.std[intrinsicKey{"runtime/internal/atomic", "Store"}] 2746 i.std[intrinsicKey{"sync/atomic", "StoreInt64"}] = 2747 i.std[intrinsicKey{"runtime/internal/atomic", "Store64"}] 2748 // Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap. 2749 i.std[intrinsicKey{"sync/atomic", "StoreUint32"}] = 2750 i.std[intrinsicKey{"runtime/internal/atomic", "Store"}] 2751 i.std[intrinsicKey{"sync/atomic", "StoreUint64"}] = 2752 i.std[intrinsicKey{"runtime/internal/atomic", "Store64"}] 2753 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "StoreUintptr", 4}] = 2754 i.std[intrinsicKey{"runtime/internal/atomic", "Store"}] 2755 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "StoreUintptr", 8}] = 2756 i.std[intrinsicKey{"runtime/internal/atomic", "Store64"}] 2757 2758 i.std[intrinsicKey{"sync/atomic", "SwapInt32"}] = 2759 i.std[intrinsicKey{"runtime/internal/atomic", "Xchg"}] 2760 i.std[intrinsicKey{"sync/atomic", "SwapInt64"}] = 2761 i.std[intrinsicKey{"runtime/internal/atomic", "Xchg64"}] 2762 i.std[intrinsicKey{"sync/atomic", "SwapUint32"}] = 2763 i.std[intrinsicKey{"runtime/internal/atomic", "Xchg"}] 2764 i.std[intrinsicKey{"sync/atomic", "SwapUint64"}] = 2765 i.std[intrinsicKey{"runtime/internal/atomic", "Xchg64"}] 2766 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "SwapUintptr", 4}] = 2767 i.std[intrinsicKey{"runtime/internal/atomic", "Xchg"}] 2768 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "SwapUintptr", 8}] = 2769 i.std[intrinsicKey{"runtime/internal/atomic", "Xchg64"}] 2770 2771 i.std[intrinsicKey{"sync/atomic", "CompareAndSwapInt32"}] = 2772 i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}] 2773 i.std[intrinsicKey{"sync/atomic", "CompareAndSwapInt64"}] = 2774 i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}] 2775 i.std[intrinsicKey{"sync/atomic", "CompareAndSwapUint32"}] = 2776 i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}] 2777 i.std[intrinsicKey{"sync/atomic", "CompareAndSwapUint64"}] = 2778 i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}] 2779 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "CompareAndSwapUintptr", 4}] = 2780 i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}] 2781 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "CompareAndSwapUintptr", 8}] = 2782 i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}] 2783 2784 i.std[intrinsicKey{"sync/atomic", "AddInt32"}] = 2785 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd"}] 2786 i.std[intrinsicKey{"sync/atomic", "AddInt64"}] = 2787 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}] 2788 i.std[intrinsicKey{"sync/atomic", "AddUint32"}] = 2789 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd"}] 2790 i.std[intrinsicKey{"sync/atomic", "AddUint64"}] = 2791 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}] 2792 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "AddUintptr", 4}] = 2793 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd"}] 2794 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "AddUintptr", 8}] = 2795 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}] 2796 2797 /******** math/big ********/ 2798 i.intSized[sizedIntrinsicKey{"math/big", "mulWW", 8}] = 2799 enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2800 return s.newValue2(ssa.OpMul64uhilo, ssa.MakeTuple(Types[TUINT64], Types[TUINT64]), args[0], args[1]) 2801 }, sys.AMD64) 2802 i.intSized[sizedIntrinsicKey{"math/big", "divWW", 8}] = 2803 enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2804 return s.newValue3(ssa.OpDiv128u, ssa.MakeTuple(Types[TUINT64], Types[TUINT64]), args[0], args[1], args[2]) 2805 }, sys.AMD64) 2806 } 2807 2808 // findIntrinsic returns a function which builds the SSA equivalent of the 2809 // function identified by the symbol sym. If sym is not an intrinsic call, returns nil. 2810 func findIntrinsic(sym *Sym) intrinsicBuilder { 2811 if ssa.IntrinsicsDisable { 2812 return nil 2813 } 2814 if sym == nil || sym.Pkg == nil { 2815 return nil 2816 } 2817 if intrinsics == nil { 2818 intrinsicInit() 2819 } 2820 pkg := sym.Pkg.Path 2821 if sym.Pkg == localpkg { 2822 pkg = myimportpath 2823 } 2824 fn := sym.Name 2825 f := intrinsics.std[intrinsicKey{pkg, fn}] 2826 if f != nil { 2827 return f 2828 } 2829 f = intrinsics.intSized[sizedIntrinsicKey{pkg, fn, Widthint}] 2830 if f != nil { 2831 return f 2832 } 2833 return intrinsics.ptrSized[sizedIntrinsicKey{pkg, fn, Widthptr}] 2834 } 2835 2836 func isIntrinsicCall(n *Node) bool { 2837 if n == nil || n.Left == nil { 2838 return false 2839 } 2840 return findIntrinsic(n.Left.Sym) != nil 2841 } 2842 2843 // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation. 2844 func (s *state) intrinsicCall(n *Node) *ssa.Value { 2845 v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n)) 2846 if ssa.IntrinsicsDebug > 0 { 2847 x := v 2848 if x == nil { 2849 x = s.mem() 2850 } 2851 if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 { 2852 x = x.Args[0] 2853 } 2854 Warnl(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString()) 2855 } 2856 return v 2857 } 2858 2859 type callArg struct { 2860 offset int64 2861 v *ssa.Value 2862 } 2863 type byOffset []callArg 2864 2865 func (x byOffset) Len() int { return len(x) } 2866 func (x byOffset) Swap(i, j int) { x[i], x[j] = x[j], x[i] } 2867 func (x byOffset) Less(i, j int) bool { 2868 return x[i].offset < x[j].offset 2869 } 2870 2871 // intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them. 2872 func (s *state) intrinsicArgs(n *Node) []*ssa.Value { 2873 // This code is complicated because of how walk transforms calls. For a call node, 2874 // each entry in n.List is either an assignment to OINDREGSP which actually 2875 // stores an arg, or an assignment to a temporary which computes an arg 2876 // which is later assigned. 2877 // The args can also be out of order. 2878 // TODO: when walk goes away someday, this code can go away also. 2879 var args []callArg 2880 temps := map[*Node]*ssa.Value{} 2881 for _, a := range n.List.Slice() { 2882 if a.Op != OAS { 2883 s.Fatalf("non-assignment as a function argument %s", opnames[a.Op]) 2884 } 2885 l, r := a.Left, a.Right 2886 switch l.Op { 2887 case ONAME: 2888 // Evaluate and store to "temporary". 2889 // Walk ensures these temporaries are dead outside of n. 2890 temps[l] = s.expr(r) 2891 case OINDREGSP: 2892 // Store a value to an argument slot. 2893 var v *ssa.Value 2894 if x, ok := temps[r]; ok { 2895 // This is a previously computed temporary. 2896 v = x 2897 } else { 2898 // This is an explicit value; evaluate it. 2899 v = s.expr(r) 2900 } 2901 args = append(args, callArg{l.Xoffset, v}) 2902 default: 2903 s.Fatalf("function argument assignment target not allowed: %s", opnames[l.Op]) 2904 } 2905 } 2906 sort.Sort(byOffset(args)) 2907 res := make([]*ssa.Value, len(args)) 2908 for i, a := range args { 2909 res[i] = a.v 2910 } 2911 return res 2912 } 2913 2914 // Calls the function n using the specified call type. 2915 // Returns the address of the return value (or nil if none). 2916 func (s *state) call(n *Node, k callKind) *ssa.Value { 2917 var sym *Sym // target symbol (if static) 2918 var closure *ssa.Value // ptr to closure to run (if dynamic) 2919 var codeptr *ssa.Value // ptr to target code (if dynamic) 2920 var rcvr *ssa.Value // receiver to set 2921 fn := n.Left 2922 switch n.Op { 2923 case OCALLFUNC: 2924 if k == callNormal && fn.Op == ONAME && fn.Class == PFUNC { 2925 sym = fn.Sym 2926 break 2927 } 2928 closure = s.expr(fn) 2929 case OCALLMETH: 2930 if fn.Op != ODOTMETH { 2931 Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) 2932 } 2933 if k == callNormal { 2934 sym = fn.Sym 2935 break 2936 } 2937 // Make a name n2 for the function. 2938 // fn.Sym might be sync.(*Mutex).Unlock. 2939 // Make a PFUNC node out of that, then evaluate it. 2940 // We get back an SSA value representing &sync.(*Mutex).Unlock·f. 2941 // We can then pass that to defer or go. 2942 n2 := newname(fn.Sym) 2943 n2.Class = PFUNC 2944 n2.Pos = fn.Pos 2945 n2.Type = Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it. 2946 closure = s.expr(n2) 2947 // Note: receiver is already assigned in n.List, so we don't 2948 // want to set it here. 2949 case OCALLINTER: 2950 if fn.Op != ODOTINTER { 2951 Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op) 2952 } 2953 i := s.expr(fn.Left) 2954 itab := s.newValue1(ssa.OpITab, Types[TUINTPTR], i) 2955 if k != callNormal { 2956 s.nilCheck(itab) 2957 } 2958 itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab 2959 itab = s.newValue1I(ssa.OpOffPtr, ptrto(Types[TUINTPTR]), itabidx, itab) 2960 if k == callNormal { 2961 codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], itab, s.mem()) 2962 } else { 2963 closure = itab 2964 } 2965 rcvr = s.newValue1(ssa.OpIData, Types[TUINTPTR], i) 2966 } 2967 dowidth(fn.Type) 2968 stksize := fn.Type.ArgWidth() // includes receiver 2969 2970 // Run all argument assignments. The arg slots have already 2971 // been offset by the appropriate amount (+2*widthptr for go/defer, 2972 // +widthptr for interface calls). 2973 // For OCALLMETH, the receiver is set in these statements. 2974 s.stmtList(n.List) 2975 2976 // Set receiver (for interface calls) 2977 if rcvr != nil { 2978 argStart := Ctxt.FixedFrameSize() 2979 if k != callNormal { 2980 argStart += int64(2 * Widthptr) 2981 } 2982 addr := s.entryNewValue1I(ssa.OpOffPtr, ptrto(Types[TUINTPTR]), argStart, s.sp) 2983 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, rcvr, s.mem()) 2984 } 2985 2986 // Defer/go args 2987 if k != callNormal { 2988 // Write argsize and closure (args to Newproc/Deferproc). 2989 argStart := Ctxt.FixedFrameSize() 2990 argsize := s.constInt32(Types[TUINT32], int32(stksize)) 2991 addr := s.entryNewValue1I(ssa.OpOffPtr, ptrto(Types[TUINT32]), argStart, s.sp) 2992 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, 4, addr, argsize, s.mem()) 2993 addr = s.entryNewValue1I(ssa.OpOffPtr, ptrto(Types[TUINTPTR]), argStart+int64(Widthptr), s.sp) 2994 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, closure, s.mem()) 2995 stksize += 2 * int64(Widthptr) 2996 } 2997 2998 // call target 2999 var call *ssa.Value 3000 switch { 3001 case k == callDefer: 3002 call = s.newValue1(ssa.OpDeferCall, ssa.TypeMem, s.mem()) 3003 case k == callGo: 3004 call = s.newValue1(ssa.OpGoCall, ssa.TypeMem, s.mem()) 3005 case closure != nil: 3006 codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], closure, s.mem()) 3007 call = s.newValue3(ssa.OpClosureCall, ssa.TypeMem, codeptr, closure, s.mem()) 3008 case codeptr != nil: 3009 call = s.newValue2(ssa.OpInterCall, ssa.TypeMem, codeptr, s.mem()) 3010 case sym != nil: 3011 call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, Linksym(sym), s.mem()) 3012 default: 3013 Fatalf("bad call type %v %v", n.Op, n) 3014 } 3015 call.AuxInt = stksize // Call operations carry the argsize of the callee along with them 3016 s.vars[&memVar] = call 3017 3018 // Finish block for defers 3019 if k == callDefer { 3020 b := s.endBlock() 3021 b.Kind = ssa.BlockDefer 3022 b.SetControl(call) 3023 bNext := s.f.NewBlock(ssa.BlockPlain) 3024 b.AddEdgeTo(bNext) 3025 // Add recover edge to exit code. 3026 r := s.f.NewBlock(ssa.BlockPlain) 3027 s.startBlock(r) 3028 s.exit() 3029 b.AddEdgeTo(r) 3030 b.Likely = ssa.BranchLikely 3031 s.startBlock(bNext) 3032 } 3033 3034 res := n.Left.Type.Results() 3035 if res.NumFields() == 0 || k != callNormal { 3036 // call has no return value. Continue with the next statement. 3037 return nil 3038 } 3039 fp := res.Field(0) 3040 return s.entryNewValue1I(ssa.OpOffPtr, ptrto(fp.Type), fp.Offset+Ctxt.FixedFrameSize(), s.sp) 3041 } 3042 3043 // etypesign returns the signed-ness of e, for integer/pointer etypes. 3044 // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer. 3045 func etypesign(e EType) int8 { 3046 switch e { 3047 case TINT8, TINT16, TINT32, TINT64, TINT: 3048 return -1 3049 case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR: 3050 return +1 3051 } 3052 return 0 3053 } 3054 3055 // lookupSymbol is used to retrieve the symbol (Extern, Arg or Auto) used for a particular node. 3056 // This improves the effectiveness of cse by using the same Aux values for the 3057 // same symbols. 3058 func (s *state) lookupSymbol(n *Node, sym interface{}) interface{} { 3059 switch sym.(type) { 3060 default: 3061 s.Fatalf("sym %v is of uknown type %T", sym, sym) 3062 case *ssa.ExternSymbol, *ssa.ArgSymbol, *ssa.AutoSymbol: 3063 // these are the only valid types 3064 } 3065 3066 if lsym, ok := s.varsyms[n]; ok { 3067 return lsym 3068 } else { 3069 s.varsyms[n] = sym 3070 return sym 3071 } 3072 } 3073 3074 // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result. 3075 // Also returns a bool reporting whether the returned value is "volatile", that is it 3076 // points to the outargs section and thus the referent will be clobbered by any call. 3077 // The value that the returned Value represents is guaranteed to be non-nil. 3078 // If bounded is true then this address does not require a nil check for its operand 3079 // even if that would otherwise be implied. 3080 func (s *state) addr(n *Node, bounded bool) (*ssa.Value, bool) { 3081 t := ptrto(n.Type) 3082 switch n.Op { 3083 case ONAME: 3084 switch n.Class { 3085 case PEXTERN: 3086 // global variable 3087 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: Linksym(n.Sym)}) 3088 v := s.entryNewValue1A(ssa.OpAddr, t, aux, s.sb) 3089 // TODO: Make OpAddr use AuxInt as well as Aux. 3090 if n.Xoffset != 0 { 3091 v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v) 3092 } 3093 return v, false 3094 case PPARAM: 3095 // parameter slot 3096 v := s.decladdrs[n] 3097 if v != nil { 3098 return v, false 3099 } 3100 if n == nodfp { 3101 // Special arg that points to the frame pointer (Used by ORECOVER). 3102 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n}) 3103 return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp), false 3104 } 3105 s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs) 3106 return nil, false 3107 case PAUTO: 3108 aux := s.lookupSymbol(n, &ssa.AutoSymbol{Typ: n.Type, Node: n}) 3109 return s.newValue1A(ssa.OpAddr, t, aux, s.sp), false 3110 case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early. 3111 // ensure that we reuse symbols for out parameters so 3112 // that cse works on their addresses 3113 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n}) 3114 return s.newValue1A(ssa.OpAddr, t, aux, s.sp), false 3115 default: 3116 s.Fatalf("variable address class %v not implemented", classnames[n.Class]) 3117 return nil, false 3118 } 3119 case OINDREGSP: 3120 // indirect off REGSP 3121 // used for storing/loading arguments/returns to/from callees 3122 return s.entryNewValue1I(ssa.OpOffPtr, t, n.Xoffset, s.sp), true 3123 case OINDEX: 3124 if n.Left.Type.IsSlice() { 3125 a := s.expr(n.Left) 3126 i := s.expr(n.Right) 3127 i = s.extendIndex(i, panicindex) 3128 len := s.newValue1(ssa.OpSliceLen, Types[TINT], a) 3129 if !n.Bounded { 3130 s.boundsCheck(i, len) 3131 } 3132 p := s.newValue1(ssa.OpSlicePtr, t, a) 3133 return s.newValue2(ssa.OpPtrIndex, t, p, i), false 3134 } else { // array 3135 a, isVolatile := s.addr(n.Left, bounded) 3136 i := s.expr(n.Right) 3137 i = s.extendIndex(i, panicindex) 3138 len := s.constInt(Types[TINT], n.Left.Type.NumElem()) 3139 if !n.Bounded { 3140 s.boundsCheck(i, len) 3141 } 3142 return s.newValue2(ssa.OpPtrIndex, ptrto(n.Left.Type.Elem()), a, i), isVolatile 3143 } 3144 case OIND: 3145 return s.exprPtr(n.Left, bounded, n.Pos), false 3146 case ODOT: 3147 p, isVolatile := s.addr(n.Left, bounded) 3148 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p), isVolatile 3149 case ODOTPTR: 3150 p := s.exprPtr(n.Left, bounded, n.Pos) 3151 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p), false 3152 case OCLOSUREVAR: 3153 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, 3154 s.entryNewValue0(ssa.OpGetClosurePtr, ptrto(Types[TUINT8]))), false 3155 case OCONVNOP: 3156 addr, isVolatile := s.addr(n.Left, bounded) 3157 return s.newValue1(ssa.OpCopy, t, addr), isVolatile // ensure that addr has the right type 3158 case OCALLFUNC, OCALLINTER, OCALLMETH: 3159 return s.call(n, callNormal), true 3160 case ODOTTYPE: 3161 v, _ := s.dottype(n, false) 3162 if v.Op != ssa.OpLoad { 3163 s.Fatalf("dottype of non-load") 3164 } 3165 if v.Args[1] != s.mem() { 3166 s.Fatalf("memory no longer live from dottype load") 3167 } 3168 return v.Args[0], false 3169 default: 3170 s.Fatalf("unhandled addr %v", n.Op) 3171 return nil, false 3172 } 3173 } 3174 3175 // canSSA reports whether n is SSA-able. 3176 // n must be an ONAME (or an ODOT sequence with an ONAME base). 3177 func (s *state) canSSA(n *Node) bool { 3178 if Debug['N'] != 0 { 3179 return false 3180 } 3181 for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) { 3182 n = n.Left 3183 } 3184 if n.Op != ONAME { 3185 return false 3186 } 3187 if n.Addrtaken { 3188 return false 3189 } 3190 if n.isParamHeapCopy() { 3191 return false 3192 } 3193 if n.Class == PAUTOHEAP { 3194 Fatalf("canSSA of PAUTOHEAP %v", n) 3195 } 3196 switch n.Class { 3197 case PEXTERN: 3198 return false 3199 case PPARAMOUT: 3200 if hasdefer { 3201 // TODO: handle this case? Named return values must be 3202 // in memory so that the deferred function can see them. 3203 // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false } 3204 // Or maybe not, see issue 18860. Even unnamed return values 3205 // must be written back so if a defer recovers, the caller can see them. 3206 return false 3207 } 3208 if s.cgoUnsafeArgs { 3209 // Cgo effectively takes the address of all result args, 3210 // but the compiler can't see that. 3211 return false 3212 } 3213 } 3214 if n.Class == PPARAM && n.String() == ".this" { 3215 // wrappers generated by genwrapper need to update 3216 // the .this pointer in place. 3217 // TODO: treat as a PPARMOUT? 3218 return false 3219 } 3220 return canSSAType(n.Type) 3221 // TODO: try to make more variables SSAable? 3222 } 3223 3224 // canSSA reports whether variables of type t are SSA-able. 3225 func canSSAType(t *Type) bool { 3226 dowidth(t) 3227 if t.Width > int64(4*Widthptr) { 3228 // 4*Widthptr is an arbitrary constant. We want it 3229 // to be at least 3*Widthptr so slices can be registerized. 3230 // Too big and we'll introduce too much register pressure. 3231 return false 3232 } 3233 switch t.Etype { 3234 case TARRAY: 3235 // We can't do larger arrays because dynamic indexing is 3236 // not supported on SSA variables. 3237 // TODO: allow if all indexes are constant. 3238 if t.NumElem() == 0 { 3239 return true 3240 } 3241 if t.NumElem() == 1 { 3242 return canSSAType(t.Elem()) 3243 } 3244 return false 3245 case TSTRUCT: 3246 if t.NumFields() > ssa.MaxStruct { 3247 return false 3248 } 3249 for _, t1 := range t.Fields().Slice() { 3250 if !canSSAType(t1.Type) { 3251 return false 3252 } 3253 } 3254 return true 3255 default: 3256 return true 3257 } 3258 } 3259 3260 // exprPtr evaluates n to a pointer and nil-checks it. 3261 func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value { 3262 p := s.expr(n) 3263 if bounded || n.NonNil { 3264 if s.f.Config.Debug_checknil() && lineno.Line() > 1 { 3265 s.f.Config.Warnl(lineno, "removed nil check") 3266 } 3267 return p 3268 } 3269 s.nilCheck(p) 3270 return p 3271 } 3272 3273 // nilCheck generates nil pointer checking code. 3274 // Used only for automatically inserted nil checks, 3275 // not for user code like 'x != nil'. 3276 func (s *state) nilCheck(ptr *ssa.Value) { 3277 if disable_checknil != 0 { 3278 return 3279 } 3280 s.newValue2(ssa.OpNilCheck, ssa.TypeVoid, ptr, s.mem()) 3281 } 3282 3283 // boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not. 3284 // Starts a new block on return. 3285 // idx is already converted to full int width. 3286 func (s *state) boundsCheck(idx, len *ssa.Value) { 3287 if Debug['B'] != 0 { 3288 return 3289 } 3290 3291 // bounds check 3292 cmp := s.newValue2(ssa.OpIsInBounds, Types[TBOOL], idx, len) 3293 s.check(cmp, panicindex) 3294 } 3295 3296 // sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not. 3297 // Starts a new block on return. 3298 // idx and len are already converted to full int width. 3299 func (s *state) sliceBoundsCheck(idx, len *ssa.Value) { 3300 if Debug['B'] != 0 { 3301 return 3302 } 3303 3304 // bounds check 3305 cmp := s.newValue2(ssa.OpIsSliceInBounds, Types[TBOOL], idx, len) 3306 s.check(cmp, panicslice) 3307 } 3308 3309 // If cmp (a bool) is false, panic using the given function. 3310 func (s *state) check(cmp *ssa.Value, fn *obj.LSym) { 3311 b := s.endBlock() 3312 b.Kind = ssa.BlockIf 3313 b.SetControl(cmp) 3314 b.Likely = ssa.BranchLikely 3315 bNext := s.f.NewBlock(ssa.BlockPlain) 3316 line := s.peekPos() 3317 bPanic := s.panics[funcLine{fn, line}] 3318 if bPanic == nil { 3319 bPanic = s.f.NewBlock(ssa.BlockPlain) 3320 s.panics[funcLine{fn, line}] = bPanic 3321 s.startBlock(bPanic) 3322 // The panic call takes/returns memory to ensure that the right 3323 // memory state is observed if the panic happens. 3324 s.rtcall(fn, false, nil) 3325 } 3326 b.AddEdgeTo(bNext) 3327 b.AddEdgeTo(bPanic) 3328 s.startBlock(bNext) 3329 } 3330 3331 func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value { 3332 needcheck := true 3333 switch b.Op { 3334 case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64: 3335 if b.AuxInt != 0 { 3336 needcheck = false 3337 } 3338 } 3339 if needcheck { 3340 // do a size-appropriate check for zero 3341 cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type)) 3342 s.check(cmp, panicdivide) 3343 } 3344 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 3345 } 3346 3347 // rtcall issues a call to the given runtime function fn with the listed args. 3348 // Returns a slice of results of the given result types. 3349 // The call is added to the end of the current block. 3350 // If returns is false, the block is marked as an exit block. 3351 func (s *state) rtcall(fn *obj.LSym, returns bool, results []*Type, args ...*ssa.Value) []*ssa.Value { 3352 // Write args to the stack 3353 off := Ctxt.FixedFrameSize() 3354 for _, arg := range args { 3355 t := arg.Type 3356 off = Rnd(off, t.Alignment()) 3357 ptr := s.sp 3358 if off != 0 { 3359 ptr = s.newValue1I(ssa.OpOffPtr, t.PtrTo(), off, s.sp) 3360 } 3361 size := t.Size() 3362 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, size, ptr, arg, s.mem()) 3363 off += size 3364 } 3365 off = Rnd(off, int64(Widthptr)) 3366 if Thearch.LinkArch.Name == "amd64p32" { 3367 // amd64p32 wants 8-byte alignment of the start of the return values. 3368 off = Rnd(off, 8) 3369 } 3370 3371 // Issue call 3372 call := s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, fn, s.mem()) 3373 s.vars[&memVar] = call 3374 3375 if !returns { 3376 // Finish block 3377 b := s.endBlock() 3378 b.Kind = ssa.BlockExit 3379 b.SetControl(call) 3380 call.AuxInt = off - Ctxt.FixedFrameSize() 3381 if len(results) > 0 { 3382 Fatalf("panic call can't have results") 3383 } 3384 return nil 3385 } 3386 3387 // Load results 3388 res := make([]*ssa.Value, len(results)) 3389 for i, t := range results { 3390 off = Rnd(off, t.Alignment()) 3391 ptr := s.sp 3392 if off != 0 { 3393 ptr = s.newValue1I(ssa.OpOffPtr, ptrto(t), off, s.sp) 3394 } 3395 res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem()) 3396 off += t.Size() 3397 } 3398 off = Rnd(off, int64(Widthptr)) 3399 3400 // Remember how much callee stack space we needed. 3401 call.AuxInt = off 3402 3403 return res 3404 } 3405 3406 // insertWBmove inserts the assignment *left = *right including a write barrier. 3407 // t is the type being assigned. 3408 // If right == nil, then we're zeroing *left. 3409 func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line src.XPos, rightIsVolatile bool) { 3410 // if writeBarrier.enabled { 3411 // typedmemmove(&t, left, right) 3412 // } else { 3413 // *left = *right 3414 // } 3415 // 3416 // or 3417 // 3418 // if writeBarrier.enabled { 3419 // typedmemclr(&t, left) 3420 // } else { 3421 // *left = zeroValue 3422 // } 3423 3424 if s.noWB { 3425 s.Error("write barrier prohibited") 3426 } 3427 if !s.WBPos.IsKnown() { 3428 s.WBPos = left.Pos 3429 } 3430 3431 var val *ssa.Value 3432 if right == nil { 3433 val = s.newValue2I(ssa.OpZeroWB, ssa.TypeMem, sizeAlignAuxInt(t), left, s.mem()) 3434 } else { 3435 var op ssa.Op 3436 if rightIsVolatile { 3437 op = ssa.OpMoveWBVolatile 3438 } else { 3439 op = ssa.OpMoveWB 3440 } 3441 val = s.newValue3I(op, ssa.TypeMem, sizeAlignAuxInt(t), left, right, s.mem()) 3442 } 3443 val.Aux = &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: Linksym(typenamesym(t))} 3444 s.vars[&memVar] = val 3445 3446 // WB ops will be expanded to branches at writebarrier phase. 3447 // To make it easy, we put WB ops at the end of a block, so 3448 // that it does not need to split a block into two parts when 3449 // expanding WB ops. 3450 b := s.f.NewBlock(ssa.BlockPlain) 3451 s.endBlock().AddEdgeTo(b) 3452 s.startBlock(b) 3453 } 3454 3455 // insertWBstore inserts the assignment *left = right including a write barrier. 3456 // t is the type being assigned. 3457 func (s *state) insertWBstore(t *Type, left, right *ssa.Value, line src.XPos, skip skipMask) { 3458 // store scalar fields 3459 // if writeBarrier.enabled { 3460 // writebarrierptr for pointer fields 3461 // } else { 3462 // store pointer fields 3463 // } 3464 3465 if s.noWB { 3466 s.Error("write barrier prohibited") 3467 } 3468 if !s.WBPos.IsKnown() { 3469 s.WBPos = left.Pos 3470 } 3471 s.storeTypeScalars(t, left, right, skip) 3472 s.storeTypePtrsWB(t, left, right) 3473 3474 // WB ops will be expanded to branches at writebarrier phase. 3475 // To make it easy, we put WB ops at the end of a block, so 3476 // that it does not need to split a block into two parts when 3477 // expanding WB ops. 3478 b := s.f.NewBlock(ssa.BlockPlain) 3479 s.endBlock().AddEdgeTo(b) 3480 s.startBlock(b) 3481 } 3482 3483 // do *left = right for all scalar (non-pointer) parts of t. 3484 func (s *state) storeTypeScalars(t *Type, left, right *ssa.Value, skip skipMask) { 3485 switch { 3486 case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex(): 3487 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), left, right, s.mem()) 3488 case t.IsPtrShaped(): 3489 // no scalar fields. 3490 case t.IsString(): 3491 if skip&skipLen != 0 { 3492 return 3493 } 3494 len := s.newValue1(ssa.OpStringLen, Types[TINT], right) 3495 lenAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TINT]), s.config.IntSize, left) 3496 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem()) 3497 case t.IsSlice(): 3498 if skip&skipLen == 0 { 3499 len := s.newValue1(ssa.OpSliceLen, Types[TINT], right) 3500 lenAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TINT]), s.config.IntSize, left) 3501 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem()) 3502 } 3503 if skip&skipCap == 0 { 3504 cap := s.newValue1(ssa.OpSliceCap, Types[TINT], right) 3505 capAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TINT]), 2*s.config.IntSize, left) 3506 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capAddr, cap, s.mem()) 3507 } 3508 case t.IsInterface(): 3509 // itab field doesn't need a write barrier (even though it is a pointer). 3510 itab := s.newValue1(ssa.OpITab, ptrto(Types[TUINT8]), right) 3511 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, left, itab, s.mem()) 3512 case t.IsStruct(): 3513 n := t.NumFields() 3514 for i := 0; i < n; i++ { 3515 ft := t.FieldType(i) 3516 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3517 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3518 s.storeTypeScalars(ft.(*Type), addr, val, 0) 3519 } 3520 case t.IsArray() && t.NumElem() == 0: 3521 // nothing 3522 case t.IsArray() && t.NumElem() == 1: 3523 s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0) 3524 default: 3525 s.Fatalf("bad write barrier type %v", t) 3526 } 3527 } 3528 3529 // do *left = right for all pointer parts of t. 3530 func (s *state) storeTypePtrs(t *Type, left, right *ssa.Value) { 3531 switch { 3532 case t.IsPtrShaped(): 3533 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, right, s.mem()) 3534 case t.IsString(): 3535 ptr := s.newValue1(ssa.OpStringPtr, ptrto(Types[TUINT8]), right) 3536 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem()) 3537 case t.IsSlice(): 3538 ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), right) 3539 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem()) 3540 case t.IsInterface(): 3541 // itab field is treated as a scalar. 3542 idata := s.newValue1(ssa.OpIData, ptrto(Types[TUINT8]), right) 3543 idataAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TUINT8]), s.config.PtrSize, left) 3544 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, idataAddr, idata, s.mem()) 3545 case t.IsStruct(): 3546 n := t.NumFields() 3547 for i := 0; i < n; i++ { 3548 ft := t.FieldType(i) 3549 if !haspointers(ft.(*Type)) { 3550 continue 3551 } 3552 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3553 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3554 s.storeTypePtrs(ft.(*Type), addr, val) 3555 } 3556 case t.IsArray() && t.NumElem() == 0: 3557 // nothing 3558 case t.IsArray() && t.NumElem() == 1: 3559 s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right)) 3560 default: 3561 s.Fatalf("bad write barrier type %v", t) 3562 } 3563 } 3564 3565 // do *left = right for all pointer parts of t, with write barriers if necessary. 3566 func (s *state) storeTypePtrsWB(t *Type, left, right *ssa.Value) { 3567 switch { 3568 case t.IsPtrShaped(): 3569 s.vars[&memVar] = s.newValue3I(ssa.OpStoreWB, ssa.TypeMem, s.config.PtrSize, left, right, s.mem()) 3570 case t.IsString(): 3571 ptr := s.newValue1(ssa.OpStringPtr, ptrto(Types[TUINT8]), right) 3572 s.vars[&memVar] = s.newValue3I(ssa.OpStoreWB, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem()) 3573 case t.IsSlice(): 3574 ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), right) 3575 s.vars[&memVar] = s.newValue3I(ssa.OpStoreWB, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem()) 3576 case t.IsInterface(): 3577 // itab field is treated as a scalar. 3578 idata := s.newValue1(ssa.OpIData, ptrto(Types[TUINT8]), right) 3579 idataAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TUINT8]), s.config.PtrSize, left) 3580 s.vars[&memVar] = s.newValue3I(ssa.OpStoreWB, ssa.TypeMem, s.config.PtrSize, idataAddr, idata, s.mem()) 3581 case t.IsStruct(): 3582 n := t.NumFields() 3583 for i := 0; i < n; i++ { 3584 ft := t.FieldType(i) 3585 if !haspointers(ft.(*Type)) { 3586 continue 3587 } 3588 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3589 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3590 s.storeTypePtrsWB(ft.(*Type), addr, val) 3591 } 3592 case t.IsArray() && t.NumElem() == 0: 3593 // nothing 3594 case t.IsArray() && t.NumElem() == 1: 3595 s.storeTypePtrsWB(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right)) 3596 default: 3597 s.Fatalf("bad write barrier type %v", t) 3598 } 3599 } 3600 3601 // slice computes the slice v[i:j:k] and returns ptr, len, and cap of result. 3602 // i,j,k may be nil, in which case they are set to their default value. 3603 // t is a slice, ptr to array, or string type. 3604 func (s *state) slice(t *Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) { 3605 var elemtype *Type 3606 var ptrtype *Type 3607 var ptr *ssa.Value 3608 var len *ssa.Value 3609 var cap *ssa.Value 3610 zero := s.constInt(Types[TINT], 0) 3611 switch { 3612 case t.IsSlice(): 3613 elemtype = t.Elem() 3614 ptrtype = ptrto(elemtype) 3615 ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v) 3616 len = s.newValue1(ssa.OpSliceLen, Types[TINT], v) 3617 cap = s.newValue1(ssa.OpSliceCap, Types[TINT], v) 3618 case t.IsString(): 3619 elemtype = Types[TUINT8] 3620 ptrtype = ptrto(elemtype) 3621 ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v) 3622 len = s.newValue1(ssa.OpStringLen, Types[TINT], v) 3623 cap = len 3624 case t.IsPtr(): 3625 if !t.Elem().IsArray() { 3626 s.Fatalf("bad ptr to array in slice %v\n", t) 3627 } 3628 elemtype = t.Elem().Elem() 3629 ptrtype = ptrto(elemtype) 3630 s.nilCheck(v) 3631 ptr = v 3632 len = s.constInt(Types[TINT], t.Elem().NumElem()) 3633 cap = len 3634 default: 3635 s.Fatalf("bad type in slice %v\n", t) 3636 } 3637 3638 // Set default values 3639 if i == nil { 3640 i = zero 3641 } 3642 if j == nil { 3643 j = len 3644 } 3645 if k == nil { 3646 k = cap 3647 } 3648 3649 // Panic if slice indices are not in bounds. 3650 s.sliceBoundsCheck(i, j) 3651 if j != k { 3652 s.sliceBoundsCheck(j, k) 3653 } 3654 if k != cap { 3655 s.sliceBoundsCheck(k, cap) 3656 } 3657 3658 // Generate the following code assuming that indexes are in bounds. 3659 // The masking is to make sure that we don't generate a slice 3660 // that points to the next object in memory. 3661 // rlen = j - i 3662 // rcap = k - i 3663 // delta = i * elemsize 3664 // rptr = p + delta&mask(rcap) 3665 // result = (SliceMake rptr rlen rcap) 3666 // where mask(x) is 0 if x==0 and -1 if x>0. 3667 subOp := s.ssaOp(OSUB, Types[TINT]) 3668 mulOp := s.ssaOp(OMUL, Types[TINT]) 3669 andOp := s.ssaOp(OAND, Types[TINT]) 3670 rlen := s.newValue2(subOp, Types[TINT], j, i) 3671 var rcap *ssa.Value 3672 switch { 3673 case t.IsString(): 3674 // Capacity of the result is unimportant. However, we use 3675 // rcap to test if we've generated a zero-length slice. 3676 // Use length of strings for that. 3677 rcap = rlen 3678 case j == k: 3679 rcap = rlen 3680 default: 3681 rcap = s.newValue2(subOp, Types[TINT], k, i) 3682 } 3683 3684 var rptr *ssa.Value 3685 if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 { 3686 // No pointer arithmetic necessary. 3687 rptr = ptr 3688 } else { 3689 // delta = # of bytes to offset pointer by. 3690 delta := s.newValue2(mulOp, Types[TINT], i, s.constInt(Types[TINT], elemtype.Width)) 3691 // If we're slicing to the point where the capacity is zero, 3692 // zero out the delta. 3693 mask := s.newValue1(ssa.OpSlicemask, Types[TINT], rcap) 3694 delta = s.newValue2(andOp, Types[TINT], delta, mask) 3695 // Compute rptr = ptr + delta 3696 rptr = s.newValue2(ssa.OpAddPtr, ptrtype, ptr, delta) 3697 } 3698 3699 return rptr, rlen, rcap 3700 } 3701 3702 type u642fcvtTab struct { 3703 geq, cvt2F, and, rsh, or, add ssa.Op 3704 one func(*state, ssa.Type, int64) *ssa.Value 3705 } 3706 3707 var u64_f64 u642fcvtTab = u642fcvtTab{ 3708 geq: ssa.OpGeq64, 3709 cvt2F: ssa.OpCvt64to64F, 3710 and: ssa.OpAnd64, 3711 rsh: ssa.OpRsh64Ux64, 3712 or: ssa.OpOr64, 3713 add: ssa.OpAdd64F, 3714 one: (*state).constInt64, 3715 } 3716 3717 var u64_f32 u642fcvtTab = u642fcvtTab{ 3718 geq: ssa.OpGeq64, 3719 cvt2F: ssa.OpCvt64to32F, 3720 and: ssa.OpAnd64, 3721 rsh: ssa.OpRsh64Ux64, 3722 or: ssa.OpOr64, 3723 add: ssa.OpAdd32F, 3724 one: (*state).constInt64, 3725 } 3726 3727 func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3728 return s.uint64Tofloat(&u64_f64, n, x, ft, tt) 3729 } 3730 3731 func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3732 return s.uint64Tofloat(&u64_f32, n, x, ft, tt) 3733 } 3734 3735 func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3736 // if x >= 0 { 3737 // result = (floatY) x 3738 // } else { 3739 // y = uintX(x) ; y = x & 1 3740 // z = uintX(x) ; z = z >> 1 3741 // z = z >> 1 3742 // z = z | y 3743 // result = floatY(z) 3744 // result = result + result 3745 // } 3746 // 3747 // Code borrowed from old code generator. 3748 // What's going on: large 64-bit "unsigned" looks like 3749 // negative number to hardware's integer-to-float 3750 // conversion. However, because the mantissa is only 3751 // 63 bits, we don't need the LSB, so instead we do an 3752 // unsigned right shift (divide by two), convert, and 3753 // double. However, before we do that, we need to be 3754 // sure that we do not lose a "1" if that made the 3755 // difference in the resulting rounding. Therefore, we 3756 // preserve it, and OR (not ADD) it back in. The case 3757 // that matters is when the eleven discarded bits are 3758 // equal to 10000000001; that rounds up, and the 1 cannot 3759 // be lost else it would round down if the LSB of the 3760 // candidate mantissa is 0. 3761 cmp := s.newValue2(cvttab.geq, Types[TBOOL], x, s.zeroVal(ft)) 3762 b := s.endBlock() 3763 b.Kind = ssa.BlockIf 3764 b.SetControl(cmp) 3765 b.Likely = ssa.BranchLikely 3766 3767 bThen := s.f.NewBlock(ssa.BlockPlain) 3768 bElse := s.f.NewBlock(ssa.BlockPlain) 3769 bAfter := s.f.NewBlock(ssa.BlockPlain) 3770 3771 b.AddEdgeTo(bThen) 3772 s.startBlock(bThen) 3773 a0 := s.newValue1(cvttab.cvt2F, tt, x) 3774 s.vars[n] = a0 3775 s.endBlock() 3776 bThen.AddEdgeTo(bAfter) 3777 3778 b.AddEdgeTo(bElse) 3779 s.startBlock(bElse) 3780 one := cvttab.one(s, ft, 1) 3781 y := s.newValue2(cvttab.and, ft, x, one) 3782 z := s.newValue2(cvttab.rsh, ft, x, one) 3783 z = s.newValue2(cvttab.or, ft, z, y) 3784 a := s.newValue1(cvttab.cvt2F, tt, z) 3785 a1 := s.newValue2(cvttab.add, tt, a, a) 3786 s.vars[n] = a1 3787 s.endBlock() 3788 bElse.AddEdgeTo(bAfter) 3789 3790 s.startBlock(bAfter) 3791 return s.variable(n, n.Type) 3792 } 3793 3794 type u322fcvtTab struct { 3795 cvtI2F, cvtF2F ssa.Op 3796 } 3797 3798 var u32_f64 u322fcvtTab = u322fcvtTab{ 3799 cvtI2F: ssa.OpCvt32to64F, 3800 cvtF2F: ssa.OpCopy, 3801 } 3802 3803 var u32_f32 u322fcvtTab = u322fcvtTab{ 3804 cvtI2F: ssa.OpCvt32to32F, 3805 cvtF2F: ssa.OpCvt64Fto32F, 3806 } 3807 3808 func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3809 return s.uint32Tofloat(&u32_f64, n, x, ft, tt) 3810 } 3811 3812 func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3813 return s.uint32Tofloat(&u32_f32, n, x, ft, tt) 3814 } 3815 3816 func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3817 // if x >= 0 { 3818 // result = floatY(x) 3819 // } else { 3820 // result = floatY(float64(x) + (1<<32)) 3821 // } 3822 cmp := s.newValue2(ssa.OpGeq32, Types[TBOOL], x, s.zeroVal(ft)) 3823 b := s.endBlock() 3824 b.Kind = ssa.BlockIf 3825 b.SetControl(cmp) 3826 b.Likely = ssa.BranchLikely 3827 3828 bThen := s.f.NewBlock(ssa.BlockPlain) 3829 bElse := s.f.NewBlock(ssa.BlockPlain) 3830 bAfter := s.f.NewBlock(ssa.BlockPlain) 3831 3832 b.AddEdgeTo(bThen) 3833 s.startBlock(bThen) 3834 a0 := s.newValue1(cvttab.cvtI2F, tt, x) 3835 s.vars[n] = a0 3836 s.endBlock() 3837 bThen.AddEdgeTo(bAfter) 3838 3839 b.AddEdgeTo(bElse) 3840 s.startBlock(bElse) 3841 a1 := s.newValue1(ssa.OpCvt32to64F, Types[TFLOAT64], x) 3842 twoToThe32 := s.constFloat64(Types[TFLOAT64], float64(1<<32)) 3843 a2 := s.newValue2(ssa.OpAdd64F, Types[TFLOAT64], a1, twoToThe32) 3844 a3 := s.newValue1(cvttab.cvtF2F, tt, a2) 3845 3846 s.vars[n] = a3 3847 s.endBlock() 3848 bElse.AddEdgeTo(bAfter) 3849 3850 s.startBlock(bAfter) 3851 return s.variable(n, n.Type) 3852 } 3853 3854 // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels. 3855 func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value { 3856 if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() { 3857 s.Fatalf("node must be a map or a channel") 3858 } 3859 // if n == nil { 3860 // return 0 3861 // } else { 3862 // // len 3863 // return *((*int)n) 3864 // // cap 3865 // return *(((*int)n)+1) 3866 // } 3867 lenType := n.Type 3868 nilValue := s.constNil(Types[TUINTPTR]) 3869 cmp := s.newValue2(ssa.OpEqPtr, Types[TBOOL], x, nilValue) 3870 b := s.endBlock() 3871 b.Kind = ssa.BlockIf 3872 b.SetControl(cmp) 3873 b.Likely = ssa.BranchUnlikely 3874 3875 bThen := s.f.NewBlock(ssa.BlockPlain) 3876 bElse := s.f.NewBlock(ssa.BlockPlain) 3877 bAfter := s.f.NewBlock(ssa.BlockPlain) 3878 3879 // length/capacity of a nil map/chan is zero 3880 b.AddEdgeTo(bThen) 3881 s.startBlock(bThen) 3882 s.vars[n] = s.zeroVal(lenType) 3883 s.endBlock() 3884 bThen.AddEdgeTo(bAfter) 3885 3886 b.AddEdgeTo(bElse) 3887 s.startBlock(bElse) 3888 if n.Op == OLEN { 3889 // length is stored in the first word for map/chan 3890 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem()) 3891 } else if n.Op == OCAP { 3892 // capacity is stored in the second word for chan 3893 sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x) 3894 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem()) 3895 } else { 3896 s.Fatalf("op must be OLEN or OCAP") 3897 } 3898 s.endBlock() 3899 bElse.AddEdgeTo(bAfter) 3900 3901 s.startBlock(bAfter) 3902 return s.variable(n, lenType) 3903 } 3904 3905 type f2uCvtTab struct { 3906 ltf, cvt2U, subf, or ssa.Op 3907 floatValue func(*state, ssa.Type, float64) *ssa.Value 3908 intValue func(*state, ssa.Type, int64) *ssa.Value 3909 cutoff uint64 3910 } 3911 3912 var f32_u64 f2uCvtTab = f2uCvtTab{ 3913 ltf: ssa.OpLess32F, 3914 cvt2U: ssa.OpCvt32Fto64, 3915 subf: ssa.OpSub32F, 3916 or: ssa.OpOr64, 3917 floatValue: (*state).constFloat32, 3918 intValue: (*state).constInt64, 3919 cutoff: 9223372036854775808, 3920 } 3921 3922 var f64_u64 f2uCvtTab = f2uCvtTab{ 3923 ltf: ssa.OpLess64F, 3924 cvt2U: ssa.OpCvt64Fto64, 3925 subf: ssa.OpSub64F, 3926 or: ssa.OpOr64, 3927 floatValue: (*state).constFloat64, 3928 intValue: (*state).constInt64, 3929 cutoff: 9223372036854775808, 3930 } 3931 3932 var f32_u32 f2uCvtTab = f2uCvtTab{ 3933 ltf: ssa.OpLess32F, 3934 cvt2U: ssa.OpCvt32Fto32, 3935 subf: ssa.OpSub32F, 3936 or: ssa.OpOr32, 3937 floatValue: (*state).constFloat32, 3938 intValue: func(s *state, t ssa.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) }, 3939 cutoff: 2147483648, 3940 } 3941 3942 var f64_u32 f2uCvtTab = f2uCvtTab{ 3943 ltf: ssa.OpLess64F, 3944 cvt2U: ssa.OpCvt64Fto32, 3945 subf: ssa.OpSub64F, 3946 or: ssa.OpOr32, 3947 floatValue: (*state).constFloat64, 3948 intValue: func(s *state, t ssa.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) }, 3949 cutoff: 2147483648, 3950 } 3951 3952 func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3953 return s.floatToUint(&f32_u64, n, x, ft, tt) 3954 } 3955 func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3956 return s.floatToUint(&f64_u64, n, x, ft, tt) 3957 } 3958 3959 func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3960 return s.floatToUint(&f32_u32, n, x, ft, tt) 3961 } 3962 3963 func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3964 return s.floatToUint(&f64_u32, n, x, ft, tt) 3965 } 3966 3967 func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3968 // cutoff:=1<<(intY_Size-1) 3969 // if x < floatX(cutoff) { 3970 // result = uintY(x) 3971 // } else { 3972 // y = x - floatX(cutoff) 3973 // z = uintY(y) 3974 // result = z | -(cutoff) 3975 // } 3976 cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff)) 3977 cmp := s.newValue2(cvttab.ltf, Types[TBOOL], x, cutoff) 3978 b := s.endBlock() 3979 b.Kind = ssa.BlockIf 3980 b.SetControl(cmp) 3981 b.Likely = ssa.BranchLikely 3982 3983 bThen := s.f.NewBlock(ssa.BlockPlain) 3984 bElse := s.f.NewBlock(ssa.BlockPlain) 3985 bAfter := s.f.NewBlock(ssa.BlockPlain) 3986 3987 b.AddEdgeTo(bThen) 3988 s.startBlock(bThen) 3989 a0 := s.newValue1(cvttab.cvt2U, tt, x) 3990 s.vars[n] = a0 3991 s.endBlock() 3992 bThen.AddEdgeTo(bAfter) 3993 3994 b.AddEdgeTo(bElse) 3995 s.startBlock(bElse) 3996 y := s.newValue2(cvttab.subf, ft, x, cutoff) 3997 y = s.newValue1(cvttab.cvt2U, tt, y) 3998 z := cvttab.intValue(s, tt, int64(-cvttab.cutoff)) 3999 a1 := s.newValue2(cvttab.or, tt, y, z) 4000 s.vars[n] = a1 4001 s.endBlock() 4002 bElse.AddEdgeTo(bAfter) 4003 4004 s.startBlock(bAfter) 4005 return s.variable(n, n.Type) 4006 } 4007 4008 // dottype generates SSA for a type assertion node. 4009 // commaok indicates whether to panic or return a bool. 4010 // If commaok is false, resok will be nil. 4011 func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { 4012 iface := s.expr(n.Left) // input interface 4013 target := s.expr(typename(n.Type)) // target type 4014 byteptr := ptrto(Types[TUINT8]) 4015 4016 if n.Type.IsInterface() { 4017 if n.Type.IsEmptyInterface() { 4018 // Converting to an empty interface. 4019 // Input could be an empty or nonempty interface. 4020 if Debug_typeassert > 0 { 4021 Warnl(n.Pos, "type assertion inlined") 4022 } 4023 4024 // Get itab/type field from input. 4025 itab := s.newValue1(ssa.OpITab, byteptr, iface) 4026 // Conversion succeeds iff that field is not nil. 4027 cond := s.newValue2(ssa.OpNeqPtr, Types[TBOOL], itab, s.constNil(byteptr)) 4028 4029 if n.Left.Type.IsEmptyInterface() && commaok { 4030 // Converting empty interface to empty interface with ,ok is just a nil check. 4031 return iface, cond 4032 } 4033 4034 // Branch on nilness. 4035 b := s.endBlock() 4036 b.Kind = ssa.BlockIf 4037 b.SetControl(cond) 4038 b.Likely = ssa.BranchLikely 4039 bOk := s.f.NewBlock(ssa.BlockPlain) 4040 bFail := s.f.NewBlock(ssa.BlockPlain) 4041 b.AddEdgeTo(bOk) 4042 b.AddEdgeTo(bFail) 4043 4044 if !commaok { 4045 // On failure, panic by calling panicnildottype. 4046 s.startBlock(bFail) 4047 s.rtcall(panicnildottype, false, nil, target) 4048 4049 // On success, return (perhaps modified) input interface. 4050 s.startBlock(bOk) 4051 if n.Left.Type.IsEmptyInterface() { 4052 res = iface // Use input interface unchanged. 4053 return 4054 } 4055 // Load type out of itab, build interface with existing idata. 4056 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) 4057 typ := s.newValue2(ssa.OpLoad, byteptr, off, s.mem()) 4058 idata := s.newValue1(ssa.OpIData, n.Type, iface) 4059 res = s.newValue2(ssa.OpIMake, n.Type, typ, idata) 4060 return 4061 } 4062 4063 s.startBlock(bOk) 4064 // nonempty -> empty 4065 // Need to load type from itab 4066 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) 4067 s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem()) 4068 s.endBlock() 4069 4070 // itab is nil, might as well use that as the nil result. 4071 s.startBlock(bFail) 4072 s.vars[&typVar] = itab 4073 s.endBlock() 4074 4075 // Merge point. 4076 bEnd := s.f.NewBlock(ssa.BlockPlain) 4077 bOk.AddEdgeTo(bEnd) 4078 bFail.AddEdgeTo(bEnd) 4079 s.startBlock(bEnd) 4080 idata := s.newValue1(ssa.OpIData, n.Type, iface) 4081 res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata) 4082 resok = cond 4083 delete(s.vars, &typVar) 4084 return 4085 } 4086 // converting to a nonempty interface needs a runtime call. 4087 if Debug_typeassert > 0 { 4088 Warnl(n.Pos, "type assertion not inlined") 4089 } 4090 if n.Left.Type.IsEmptyInterface() { 4091 if commaok { 4092 call := s.rtcall(assertE2I2, true, []*Type{n.Type, Types[TBOOL]}, target, iface) 4093 return call[0], call[1] 4094 } 4095 return s.rtcall(assertE2I, true, []*Type{n.Type}, target, iface)[0], nil 4096 } 4097 if commaok { 4098 call := s.rtcall(assertI2I2, true, []*Type{n.Type, Types[TBOOL]}, target, iface) 4099 return call[0], call[1] 4100 } 4101 return s.rtcall(assertI2I, true, []*Type{n.Type}, target, iface)[0], nil 4102 } 4103 4104 if Debug_typeassert > 0 { 4105 Warnl(n.Pos, "type assertion inlined") 4106 } 4107 4108 // Converting to a concrete type. 4109 direct := isdirectiface(n.Type) 4110 itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface 4111 if Debug_typeassert > 0 { 4112 Warnl(n.Pos, "type assertion inlined") 4113 } 4114 var targetITab *ssa.Value 4115 if n.Left.Type.IsEmptyInterface() { 4116 // Looking for pointer to target type. 4117 targetITab = target 4118 } else { 4119 // Looking for pointer to itab for target type and source interface. 4120 targetITab = s.expr(itabname(n.Type, n.Left.Type)) 4121 } 4122 4123 var tmp *Node // temporary for use with large types 4124 var addr *ssa.Value // address of tmp 4125 if commaok && !canSSAType(n.Type) { 4126 // unSSAable type, use temporary. 4127 // TODO: get rid of some of these temporaries. 4128 tmp = temp(n.Type) 4129 addr, _ = s.addr(tmp, false) 4130 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, tmp, s.mem()) 4131 } 4132 4133 cond := s.newValue2(ssa.OpEqPtr, Types[TBOOL], itab, targetITab) 4134 b := s.endBlock() 4135 b.Kind = ssa.BlockIf 4136 b.SetControl(cond) 4137 b.Likely = ssa.BranchLikely 4138 4139 bOk := s.f.NewBlock(ssa.BlockPlain) 4140 bFail := s.f.NewBlock(ssa.BlockPlain) 4141 b.AddEdgeTo(bOk) 4142 b.AddEdgeTo(bFail) 4143 4144 if !commaok { 4145 // on failure, panic by calling panicdottype 4146 s.startBlock(bFail) 4147 taddr := s.newValue1A(ssa.OpAddr, byteptr, &ssa.ExternSymbol{Typ: byteptr, Sym: Linksym(typenamesym(n.Left.Type))}, s.sb) 4148 if n.Left.Type.IsEmptyInterface() { 4149 s.rtcall(panicdottypeE, false, nil, itab, target, taddr) 4150 } else { 4151 s.rtcall(panicdottypeI, false, nil, itab, target, taddr) 4152 } 4153 4154 // on success, return data from interface 4155 s.startBlock(bOk) 4156 if direct { 4157 return s.newValue1(ssa.OpIData, n.Type, iface), nil 4158 } 4159 p := s.newValue1(ssa.OpIData, ptrto(n.Type), iface) 4160 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()), nil 4161 } 4162 4163 // commaok is the more complicated case because we have 4164 // a control flow merge point. 4165 bEnd := s.f.NewBlock(ssa.BlockPlain) 4166 // Note that we need a new valVar each time (unlike okVar where we can 4167 // reuse the variable) because it might have a different type every time. 4168 valVar := &Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "val"}} 4169 4170 // type assertion succeeded 4171 s.startBlock(bOk) 4172 if tmp == nil { 4173 if direct { 4174 s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface) 4175 } else { 4176 p := s.newValue1(ssa.OpIData, ptrto(n.Type), iface) 4177 s.vars[valVar] = s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 4178 } 4179 } else { 4180 p := s.newValue1(ssa.OpIData, ptrto(n.Type), iface) 4181 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, sizeAlignAuxInt(n.Type), addr, p, s.mem()) 4182 } 4183 s.vars[&okVar] = s.constBool(true) 4184 s.endBlock() 4185 bOk.AddEdgeTo(bEnd) 4186 4187 // type assertion failed 4188 s.startBlock(bFail) 4189 if tmp == nil { 4190 s.vars[valVar] = s.zeroVal(n.Type) 4191 } else { 4192 s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, sizeAlignAuxInt(n.Type), addr, s.mem()) 4193 } 4194 s.vars[&okVar] = s.constBool(false) 4195 s.endBlock() 4196 bFail.AddEdgeTo(bEnd) 4197 4198 // merge point 4199 s.startBlock(bEnd) 4200 if tmp == nil { 4201 res = s.variable(valVar, n.Type) 4202 delete(s.vars, valVar) 4203 } else { 4204 res = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 4205 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, tmp, s.mem()) 4206 } 4207 resok = s.variable(&okVar, Types[TBOOL]) 4208 delete(s.vars, &okVar) 4209 return res, resok 4210 } 4211 4212 // checkgoto checks that a goto from from to to does not 4213 // jump into a block or jump over variable declarations. 4214 func (s *state) checkgoto(from *Node, to *Node) { 4215 if from.Op != OGOTO || to.Op != OLABEL { 4216 Fatalf("bad from/to in checkgoto: %v -> %v", from, to) 4217 } 4218 4219 // from and to's Sym fields record dclstack's value at their 4220 // position, which implicitly encodes their block nesting 4221 // level and variable declaration position within that block. 4222 // 4223 // For valid gotos, to.Sym will be a tail of from.Sym. 4224 // Otherwise, any link in to.Sym not also in from.Sym 4225 // indicates a block/declaration being jumped into/over. 4226 // 4227 // TODO(mdempsky): We should only complain about jumping over 4228 // variable declarations, but currently we reject type and 4229 // constant declarations too (#8042). 4230 4231 if from.Sym == to.Sym { 4232 return 4233 } 4234 4235 nf := dcldepth(from.Sym) 4236 nt := dcldepth(to.Sym) 4237 4238 // Unwind from.Sym so it's no longer than to.Sym. It's okay to 4239 // jump out of blocks or backwards past variable declarations. 4240 fs := from.Sym 4241 for ; nf > nt; nf-- { 4242 fs = fs.Link 4243 } 4244 4245 if fs == to.Sym { 4246 return 4247 } 4248 4249 // Decide what to complain about. Unwind to.Sym until where it 4250 // forked from from.Sym, and keep track of the innermost block 4251 // and declaration we jumped into/over. 4252 var block *Sym 4253 var dcl *Sym 4254 4255 // If to.Sym is longer, unwind until it's the same length. 4256 ts := to.Sym 4257 for ; nt > nf; nt-- { 4258 if ts.Pkg == nil { 4259 block = ts 4260 } else { 4261 dcl = ts 4262 } 4263 ts = ts.Link 4264 } 4265 4266 // Same length; unwind until we find their common ancestor. 4267 for ts != fs { 4268 if ts.Pkg == nil { 4269 block = ts 4270 } else { 4271 dcl = ts 4272 } 4273 ts = ts.Link 4274 fs = fs.Link 4275 } 4276 4277 // Prefer to complain about 'into block' over declarations. 4278 lno := from.Left.Pos 4279 if block != nil { 4280 yyerrorl(lno, "goto %v jumps into block starting at %v", from.Left.Sym, linestr(block.Lastlineno)) 4281 } else { 4282 yyerrorl(lno, "goto %v jumps over declaration of %v at %v", from.Left.Sym, dcl, linestr(dcl.Lastlineno)) 4283 } 4284 } 4285 4286 // dcldepth returns the declaration depth for a dclstack Sym; that is, 4287 // the sum of the block nesting level and the number of declarations 4288 // in scope. 4289 func dcldepth(s *Sym) int { 4290 n := 0 4291 for ; s != nil; s = s.Link { 4292 n++ 4293 } 4294 return n 4295 } 4296 4297 // variable returns the value of a variable at the current location. 4298 func (s *state) variable(name *Node, t ssa.Type) *ssa.Value { 4299 v := s.vars[name] 4300 if v != nil { 4301 return v 4302 } 4303 v = s.fwdVars[name] 4304 if v != nil { 4305 return v 4306 } 4307 4308 if s.curBlock == s.f.Entry { 4309 // No variable should be live at entry. 4310 s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, name, v) 4311 } 4312 // Make a FwdRef, which records a value that's live on block input. 4313 // We'll find the matching definition as part of insertPhis. 4314 v = s.newValue0A(ssa.OpFwdRef, t, name) 4315 s.fwdVars[name] = v 4316 s.addNamedValue(name, v) 4317 return v 4318 } 4319 4320 func (s *state) mem() *ssa.Value { 4321 return s.variable(&memVar, ssa.TypeMem) 4322 } 4323 4324 func (s *state) addNamedValue(n *Node, v *ssa.Value) { 4325 if n.Class == Pxxx { 4326 // Don't track our dummy nodes (&memVar etc.). 4327 return 4328 } 4329 if n.IsAutoTmp() { 4330 // Don't track temporary variables. 4331 return 4332 } 4333 if n.Class == PPARAMOUT { 4334 // Don't track named output values. This prevents return values 4335 // from being assigned too early. See #14591 and #14762. TODO: allow this. 4336 return 4337 } 4338 if n.Class == PAUTO && n.Xoffset != 0 { 4339 s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset) 4340 } 4341 loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0} 4342 values, ok := s.f.NamedValues[loc] 4343 if !ok { 4344 s.f.Names = append(s.f.Names, loc) 4345 } 4346 s.f.NamedValues[loc] = append(values, v) 4347 } 4348 4349 // Branch is an unresolved branch. 4350 type Branch struct { 4351 P *obj.Prog // branch instruction 4352 B *ssa.Block // target 4353 } 4354 4355 // SSAGenState contains state needed during Prog generation. 4356 type SSAGenState struct { 4357 // Branches remembers all the branch instructions we've seen 4358 // and where they would like to go. 4359 Branches []Branch 4360 4361 // bstart remembers where each block starts (indexed by block ID) 4362 bstart []*obj.Prog 4363 4364 // 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?) 4365 SSEto387 map[int16]int16 4366 // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8. 4367 ScratchFpMem *Node 4368 } 4369 4370 // Pc returns the current Prog. 4371 func (s *SSAGenState) Pc() *obj.Prog { 4372 return pc 4373 } 4374 4375 // SetPos sets the current source position. 4376 func (s *SSAGenState) SetPos(pos src.XPos) { 4377 lineno = pos 4378 } 4379 4380 // genssa appends entries to ptxt for each instruction in f. 4381 // gcargs and gclocals are filled in with pointer maps for the frame. 4382 func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) { 4383 var s SSAGenState 4384 4385 e := f.Config.Frontend().(*ssaExport) 4386 4387 // Remember where each block starts. 4388 s.bstart = make([]*obj.Prog, f.NumBlocks()) 4389 4390 var valueProgs map[*obj.Prog]*ssa.Value 4391 var blockProgs map[*obj.Prog]*ssa.Block 4392 var logProgs = e.log 4393 if logProgs { 4394 valueProgs = make(map[*obj.Prog]*ssa.Value, f.NumValues()) 4395 blockProgs = make(map[*obj.Prog]*ssa.Block, f.NumBlocks()) 4396 f.Logf("genssa %s\n", f.Name) 4397 blockProgs[pc] = f.Blocks[0] 4398 } 4399 4400 if Thearch.Use387 { 4401 s.SSEto387 = map[int16]int16{} 4402 } 4403 4404 s.ScratchFpMem = scratchFpMem 4405 scratchFpMem = nil 4406 4407 // Emit basic blocks 4408 for i, b := range f.Blocks { 4409 s.bstart[b.ID] = pc 4410 // Emit values in block 4411 Thearch.SSAMarkMoves(&s, b) 4412 for _, v := range b.Values { 4413 x := pc 4414 Thearch.SSAGenValue(&s, v) 4415 if logProgs { 4416 for ; x != pc; x = x.Link { 4417 valueProgs[x] = v 4418 } 4419 } 4420 } 4421 // Emit control flow instructions for block 4422 var next *ssa.Block 4423 if i < len(f.Blocks)-1 && Debug['N'] == 0 { 4424 // If -N, leave next==nil so every block with successors 4425 // ends in a JMP (except call blocks - plive doesn't like 4426 // select{send,recv} followed by a JMP call). Helps keep 4427 // line numbers for otherwise empty blocks. 4428 next = f.Blocks[i+1] 4429 } 4430 x := pc 4431 Thearch.SSAGenBlock(&s, b, next) 4432 if logProgs { 4433 for ; x != pc; x = x.Link { 4434 blockProgs[x] = b 4435 } 4436 } 4437 } 4438 4439 // Resolve branches 4440 for _, br := range s.Branches { 4441 br.P.To.Val = s.bstart[br.B.ID] 4442 } 4443 4444 if logProgs { 4445 for p := ptxt; p != nil; p = p.Link { 4446 var s string 4447 if v, ok := valueProgs[p]; ok { 4448 s = v.String() 4449 } else if b, ok := blockProgs[p]; ok { 4450 s = b.String() 4451 } else { 4452 s = " " // most value and branch strings are 2-3 characters long 4453 } 4454 f.Logf("%s\t%s\n", s, p) 4455 } 4456 if f.Config.HTML != nil { 4457 // LineHist is defunct now - this code won't do 4458 // anything. 4459 // TODO: fix this (ideally without a global variable) 4460 // saved := ptxt.Ctxt.LineHist.PrintFilenameOnly 4461 // ptxt.Ctxt.LineHist.PrintFilenameOnly = true 4462 var buf bytes.Buffer 4463 buf.WriteString("<code>") 4464 buf.WriteString("<dl class=\"ssa-gen\">") 4465 for p := ptxt; p != nil; p = p.Link { 4466 buf.WriteString("<dt class=\"ssa-prog-src\">") 4467 if v, ok := valueProgs[p]; ok { 4468 buf.WriteString(v.HTML()) 4469 } else if b, ok := blockProgs[p]; ok { 4470 buf.WriteString(b.HTML()) 4471 } 4472 buf.WriteString("</dt>") 4473 buf.WriteString("<dd class=\"ssa-prog\">") 4474 buf.WriteString(html.EscapeString(p.String())) 4475 buf.WriteString("</dd>") 4476 buf.WriteString("</li>") 4477 } 4478 buf.WriteString("</dl>") 4479 buf.WriteString("</code>") 4480 f.Config.HTML.WriteColumn("genssa", buf.String()) 4481 // ptxt.Ctxt.LineHist.PrintFilenameOnly = saved 4482 } 4483 } 4484 4485 // Generate gc bitmaps. 4486 liveness(Curfn, ptxt, gcargs, gclocals) 4487 4488 // Add frame prologue. Zero ambiguously live variables. 4489 Thearch.Defframe(ptxt) 4490 if Debug['f'] != 0 { 4491 frame(0) 4492 } 4493 4494 // Remove leftover instrumentation from the instruction stream. 4495 removevardef(ptxt) 4496 4497 f.Config.HTML.Close() 4498 f.Config.HTML = nil 4499 } 4500 4501 type FloatingEQNEJump struct { 4502 Jump obj.As 4503 Index int 4504 } 4505 4506 func oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPrediction, branches []Branch) []Branch { 4507 p := Prog(jumps.Jump) 4508 p.To.Type = obj.TYPE_BRANCH 4509 to := jumps.Index 4510 branches = append(branches, Branch{p, b.Succs[to].Block()}) 4511 if to == 1 { 4512 likely = -likely 4513 } 4514 // liblink reorders the instruction stream as it sees fit. 4515 // Pass along what we know so liblink can make use of it. 4516 // TODO: Once we've fully switched to SSA, 4517 // make liblink leave our output alone. 4518 switch likely { 4519 case ssa.BranchUnlikely: 4520 p.From.Type = obj.TYPE_CONST 4521 p.From.Offset = 0 4522 case ssa.BranchLikely: 4523 p.From.Type = obj.TYPE_CONST 4524 p.From.Offset = 1 4525 } 4526 return branches 4527 } 4528 4529 func SSAGenFPJump(s *SSAGenState, b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) { 4530 likely := b.Likely 4531 switch next { 4532 case b.Succs[0].Block(): 4533 s.Branches = oneFPJump(b, &jumps[0][0], likely, s.Branches) 4534 s.Branches = oneFPJump(b, &jumps[0][1], likely, s.Branches) 4535 case b.Succs[1].Block(): 4536 s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches) 4537 s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches) 4538 default: 4539 s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches) 4540 s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches) 4541 q := Prog(obj.AJMP) 4542 q.To.Type = obj.TYPE_BRANCH 4543 s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()}) 4544 } 4545 } 4546 4547 func AuxOffset(v *ssa.Value) (offset int64) { 4548 if v.Aux == nil { 4549 return 0 4550 } 4551 switch sym := v.Aux.(type) { 4552 4553 case *ssa.AutoSymbol: 4554 n := sym.Node.(*Node) 4555 return n.Xoffset 4556 } 4557 return 0 4558 } 4559 4560 // AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a. 4561 func AddAux(a *obj.Addr, v *ssa.Value) { 4562 AddAux2(a, v, v.AuxInt) 4563 } 4564 func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { 4565 if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR { 4566 v.Fatalf("bad AddAux addr %v", a) 4567 } 4568 // add integer offset 4569 a.Offset += offset 4570 4571 // If no additional symbol offset, we're done. 4572 if v.Aux == nil { 4573 return 4574 } 4575 // Add symbol's offset from its base register. 4576 switch sym := v.Aux.(type) { 4577 case *ssa.ExternSymbol: 4578 a.Name = obj.NAME_EXTERN 4579 a.Sym = sym.Sym 4580 case *ssa.ArgSymbol: 4581 n := sym.Node.(*Node) 4582 a.Name = obj.NAME_PARAM 4583 a.Node = n 4584 a.Sym = Linksym(n.Orig.Sym) 4585 a.Offset += n.Xoffset 4586 case *ssa.AutoSymbol: 4587 n := sym.Node.(*Node) 4588 a.Name = obj.NAME_AUTO 4589 a.Node = n 4590 a.Sym = Linksym(n.Sym) 4591 a.Offset += n.Xoffset 4592 default: 4593 v.Fatalf("aux in %s not implemented %#v", v, v.Aux) 4594 } 4595 } 4596 4597 // sizeAlignAuxInt returns an AuxInt encoding the size and alignment of type t. 4598 func sizeAlignAuxInt(t *Type) int64 { 4599 return ssa.MakeSizeAndAlign(t.Size(), t.Alignment()).Int64() 4600 } 4601 4602 // extendIndex extends v to a full int width. 4603 // panic using the given function if v does not fit in an int (only on 32-bit archs). 4604 func (s *state) extendIndex(v *ssa.Value, panicfn *obj.LSym) *ssa.Value { 4605 size := v.Type.Size() 4606 if size == s.config.IntSize { 4607 return v 4608 } 4609 if size > s.config.IntSize { 4610 // truncate 64-bit indexes on 32-bit pointer archs. Test the 4611 // high word and branch to out-of-bounds failure if it is not 0. 4612 if Debug['B'] == 0 { 4613 hi := s.newValue1(ssa.OpInt64Hi, Types[TUINT32], v) 4614 cmp := s.newValue2(ssa.OpEq32, Types[TBOOL], hi, s.constInt32(Types[TUINT32], 0)) 4615 s.check(cmp, panicfn) 4616 } 4617 return s.newValue1(ssa.OpTrunc64to32, Types[TINT], v) 4618 } 4619 4620 // Extend value to the required size 4621 var op ssa.Op 4622 if v.Type.IsSigned() { 4623 switch 10*size + s.config.IntSize { 4624 case 14: 4625 op = ssa.OpSignExt8to32 4626 case 18: 4627 op = ssa.OpSignExt8to64 4628 case 24: 4629 op = ssa.OpSignExt16to32 4630 case 28: 4631 op = ssa.OpSignExt16to64 4632 case 48: 4633 op = ssa.OpSignExt32to64 4634 default: 4635 s.Fatalf("bad signed index extension %s", v.Type) 4636 } 4637 } else { 4638 switch 10*size + s.config.IntSize { 4639 case 14: 4640 op = ssa.OpZeroExt8to32 4641 case 18: 4642 op = ssa.OpZeroExt8to64 4643 case 24: 4644 op = ssa.OpZeroExt16to32 4645 case 28: 4646 op = ssa.OpZeroExt16to64 4647 case 48: 4648 op = ssa.OpZeroExt32to64 4649 default: 4650 s.Fatalf("bad unsigned index extension %s", v.Type) 4651 } 4652 } 4653 return s.newValue1(op, Types[TINT], v) 4654 } 4655 4656 // CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values. 4657 // Called during ssaGenValue. 4658 func CheckLoweredPhi(v *ssa.Value) { 4659 if v.Op != ssa.OpPhi { 4660 v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString()) 4661 } 4662 if v.Type.IsMemory() { 4663 return 4664 } 4665 f := v.Block.Func 4666 loc := f.RegAlloc[v.ID] 4667 for _, a := range v.Args { 4668 if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead? 4669 v.Fatalf("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n%s\n", v, loc, a, aloc, v.Block.Func) 4670 } 4671 } 4672 } 4673 4674 // CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block. 4675 // The output of LoweredGetClosurePtr is generally hardwired to the correct register. 4676 // That register contains the closure pointer on closure entry. 4677 func CheckLoweredGetClosurePtr(v *ssa.Value) { 4678 entry := v.Block.Func.Entry 4679 if entry != v.Block || entry.Values[0] != v { 4680 Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v) 4681 } 4682 } 4683 4684 // KeepAlive marks the variable referenced by OpKeepAlive as live. 4685 // Called during ssaGenValue. 4686 func KeepAlive(v *ssa.Value) { 4687 if v.Op != ssa.OpKeepAlive { 4688 v.Fatalf("KeepAlive called with non-KeepAlive value: %v", v.LongString()) 4689 } 4690 if !v.Args[0].Type.IsPtrShaped() { 4691 v.Fatalf("keeping non-pointer alive %v", v.Args[0]) 4692 } 4693 n, _ := AutoVar(v.Args[0]) 4694 if n == nil { 4695 v.Fatalf("KeepAlive with non-spilled value %s %s", v, v.Args[0]) 4696 } 4697 // Note: KeepAlive arg may be a small part of a larger variable n. We keep the 4698 // whole variable n alive at this point. (Typically, this happens when 4699 // we are requested to keep the idata portion of an interface{} alive, and 4700 // we end up keeping the whole interface{} alive. That's ok.) 4701 Gvarlive(n) 4702 } 4703 4704 // AutoVar returns a *Node and int64 representing the auto variable and offset within it 4705 // where v should be spilled. 4706 func AutoVar(v *ssa.Value) (*Node, int64) { 4707 loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot) 4708 if v.Type.Size() > loc.Type.Size() { 4709 v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) 4710 } 4711 return loc.N.(*Node), loc.Off 4712 } 4713 4714 func AddrAuto(a *obj.Addr, v *ssa.Value) { 4715 n, off := AutoVar(v) 4716 a.Type = obj.TYPE_MEM 4717 a.Node = n 4718 a.Sym = Linksym(n.Sym) 4719 a.Offset = n.Xoffset + off 4720 if n.Class == PPARAM || n.Class == PPARAMOUT { 4721 a.Name = obj.NAME_PARAM 4722 } else { 4723 a.Name = obj.NAME_AUTO 4724 } 4725 } 4726 4727 func (s *SSAGenState) AddrScratch(a *obj.Addr) { 4728 if s.ScratchFpMem == nil { 4729 panic("no scratch memory available; forgot to declare usesScratch for Op?") 4730 } 4731 a.Type = obj.TYPE_MEM 4732 a.Name = obj.NAME_AUTO 4733 a.Node = s.ScratchFpMem 4734 a.Sym = Linksym(s.ScratchFpMem.Sym) 4735 a.Reg = int16(Thearch.REGSP) 4736 a.Offset = s.ScratchFpMem.Xoffset 4737 } 4738 4739 // fieldIdx finds the index of the field referred to by the ODOT node n. 4740 func fieldIdx(n *Node) int { 4741 t := n.Left.Type 4742 f := n.Sym 4743 if !t.IsStruct() { 4744 panic("ODOT's LHS is not a struct") 4745 } 4746 4747 var i int 4748 for _, t1 := range t.Fields().Slice() { 4749 if t1.Sym != f { 4750 i++ 4751 continue 4752 } 4753 if t1.Offset != n.Xoffset { 4754 panic("field offset doesn't match") 4755 } 4756 return i 4757 } 4758 panic(fmt.Sprintf("can't find field in expr %v\n", n)) 4759 4760 // TODO: keep the result of this function somewhere in the ODOT Node 4761 // so we don't have to recompute it each time we need it. 4762 } 4763 4764 // ssaExport exports a bunch of compiler services for the ssa backend. 4765 type ssaExport struct { 4766 log bool 4767 } 4768 4769 func (s *ssaExport) TypeBool() ssa.Type { return Types[TBOOL] } 4770 func (s *ssaExport) TypeInt8() ssa.Type { return Types[TINT8] } 4771 func (s *ssaExport) TypeInt16() ssa.Type { return Types[TINT16] } 4772 func (s *ssaExport) TypeInt32() ssa.Type { return Types[TINT32] } 4773 func (s *ssaExport) TypeInt64() ssa.Type { return Types[TINT64] } 4774 func (s *ssaExport) TypeUInt8() ssa.Type { return Types[TUINT8] } 4775 func (s *ssaExport) TypeUInt16() ssa.Type { return Types[TUINT16] } 4776 func (s *ssaExport) TypeUInt32() ssa.Type { return Types[TUINT32] } 4777 func (s *ssaExport) TypeUInt64() ssa.Type { return Types[TUINT64] } 4778 func (s *ssaExport) TypeFloat32() ssa.Type { return Types[TFLOAT32] } 4779 func (s *ssaExport) TypeFloat64() ssa.Type { return Types[TFLOAT64] } 4780 func (s *ssaExport) TypeInt() ssa.Type { return Types[TINT] } 4781 func (s *ssaExport) TypeUintptr() ssa.Type { return Types[TUINTPTR] } 4782 func (s *ssaExport) TypeString() ssa.Type { return Types[TSTRING] } 4783 func (s *ssaExport) TypeBytePtr() ssa.Type { return ptrto(Types[TUINT8]) } 4784 4785 // StringData returns a symbol (a *Sym wrapped in an interface) which 4786 // is the data component of a global string constant containing s. 4787 func (*ssaExport) StringData(s string) interface{} { 4788 // TODO: is idealstring correct? It might not matter... 4789 data := stringsym(s) 4790 return &ssa.ExternSymbol{Typ: idealstring, Sym: data} 4791 } 4792 4793 func (e *ssaExport) Auto(t ssa.Type) ssa.GCNode { 4794 n := temp(t.(*Type)) // Note: adds new auto to Curfn.Func.Dcl list 4795 return n 4796 } 4797 4798 func (e *ssaExport) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4799 n := name.N.(*Node) 4800 ptrType := ptrto(Types[TUINT8]) 4801 lenType := Types[TINT] 4802 if n.Class == PAUTO && !n.Addrtaken { 4803 // Split this string up into two separate variables. 4804 p := e.namedAuto(n.Sym.Name+".ptr", ptrType) 4805 l := e.namedAuto(n.Sym.Name+".len", lenType) 4806 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0} 4807 } 4808 // Return the two parts of the larger variable. 4809 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)} 4810 } 4811 4812 func (e *ssaExport) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4813 n := name.N.(*Node) 4814 t := ptrto(Types[TUINT8]) 4815 if n.Class == PAUTO && !n.Addrtaken { 4816 // Split this interface up into two separate variables. 4817 f := ".itab" 4818 if n.Type.IsEmptyInterface() { 4819 f = ".type" 4820 } 4821 c := e.namedAuto(n.Sym.Name+f, t) 4822 d := e.namedAuto(n.Sym.Name+".data", t) 4823 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0} 4824 } 4825 // Return the two parts of the larger variable. 4826 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)} 4827 } 4828 4829 func (e *ssaExport) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) { 4830 n := name.N.(*Node) 4831 ptrType := ptrto(name.Type.ElemType().(*Type)) 4832 lenType := Types[TINT] 4833 if n.Class == PAUTO && !n.Addrtaken { 4834 // Split this slice up into three separate variables. 4835 p := e.namedAuto(n.Sym.Name+".ptr", ptrType) 4836 l := e.namedAuto(n.Sym.Name+".len", lenType) 4837 c := e.namedAuto(n.Sym.Name+".cap", lenType) 4838 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}, ssa.LocalSlot{N: c, Type: lenType, Off: 0} 4839 } 4840 // Return the three parts of the larger variable. 4841 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, 4842 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}, 4843 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)} 4844 } 4845 4846 func (e *ssaExport) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4847 n := name.N.(*Node) 4848 s := name.Type.Size() / 2 4849 var t *Type 4850 if s == 8 { 4851 t = Types[TFLOAT64] 4852 } else { 4853 t = Types[TFLOAT32] 4854 } 4855 if n.Class == PAUTO && !n.Addrtaken { 4856 // Split this complex up into two separate variables. 4857 c := e.namedAuto(n.Sym.Name+".real", t) 4858 d := e.namedAuto(n.Sym.Name+".imag", t) 4859 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0} 4860 } 4861 // Return the two parts of the larger variable. 4862 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s} 4863 } 4864 4865 func (e *ssaExport) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4866 n := name.N.(*Node) 4867 var t *Type 4868 if name.Type.IsSigned() { 4869 t = Types[TINT32] 4870 } else { 4871 t = Types[TUINT32] 4872 } 4873 if n.Class == PAUTO && !n.Addrtaken { 4874 // Split this int64 up into two separate variables. 4875 h := e.namedAuto(n.Sym.Name+".hi", t) 4876 l := e.namedAuto(n.Sym.Name+".lo", Types[TUINT32]) 4877 return ssa.LocalSlot{N: h, Type: t, Off: 0}, ssa.LocalSlot{N: l, Type: Types[TUINT32], Off: 0} 4878 } 4879 // Return the two parts of the larger variable. 4880 if Thearch.LinkArch.ByteOrder == binary.BigEndian { 4881 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: Types[TUINT32], Off: name.Off + 4} 4882 } 4883 return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: Types[TUINT32], Off: name.Off} 4884 } 4885 4886 func (e *ssaExport) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot { 4887 n := name.N.(*Node) 4888 st := name.Type 4889 ft := st.FieldType(i) 4890 if n.Class == PAUTO && !n.Addrtaken { 4891 // Note: the _ field may appear several times. But 4892 // have no fear, identically-named but distinct Autos are 4893 // ok, albeit maybe confusing for a debugger. 4894 x := e.namedAuto(n.Sym.Name+"."+st.FieldName(i), ft) 4895 return ssa.LocalSlot{N: x, Type: ft, Off: 0} 4896 } 4897 return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)} 4898 } 4899 4900 func (e *ssaExport) SplitArray(name ssa.LocalSlot) ssa.LocalSlot { 4901 n := name.N.(*Node) 4902 at := name.Type 4903 if at.NumElem() != 1 { 4904 Fatalf("bad array size") 4905 } 4906 et := at.ElemType() 4907 if n.Class == PAUTO && !n.Addrtaken { 4908 x := e.namedAuto(n.Sym.Name+"[0]", et) 4909 return ssa.LocalSlot{N: x, Type: et, Off: 0} 4910 } 4911 return ssa.LocalSlot{N: n, Type: et, Off: name.Off} 4912 } 4913 4914 // namedAuto returns a new AUTO variable with the given name and type. 4915 // These are exposed to the debugger. 4916 func (e *ssaExport) namedAuto(name string, typ ssa.Type) ssa.GCNode { 4917 t := typ.(*Type) 4918 s := &Sym{Name: name, Pkg: localpkg} 4919 n := nod(ONAME, nil, nil) 4920 s.Def = n 4921 s.Def.Used = true 4922 n.Sym = s 4923 n.Type = t 4924 n.Class = PAUTO 4925 n.Addable = true 4926 n.Ullman = 1 4927 n.Esc = EscNever 4928 n.Xoffset = 0 4929 n.Name.Curfn = Curfn 4930 Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) 4931 4932 dowidth(t) 4933 return n 4934 } 4935 4936 func (e *ssaExport) CanSSA(t ssa.Type) bool { 4937 return canSSAType(t.(*Type)) 4938 } 4939 4940 func (e *ssaExport) Line(pos src.XPos) string { 4941 return linestr(pos) 4942 } 4943 4944 // Log logs a message from the compiler. 4945 func (e *ssaExport) Logf(msg string, args ...interface{}) { 4946 if e.log { 4947 fmt.Printf(msg, args...) 4948 } 4949 } 4950 4951 func (e *ssaExport) Log() bool { 4952 return e.log 4953 } 4954 4955 // Fatal reports a compiler error and exits. 4956 func (e *ssaExport) Fatalf(pos src.XPos, msg string, args ...interface{}) { 4957 lineno = pos 4958 Fatalf(msg, args...) 4959 } 4960 4961 // Warnl reports a "warning", which is usually flag-triggered 4962 // logging output for the benefit of tests. 4963 func (e *ssaExport) Warnl(pos src.XPos, fmt_ string, args ...interface{}) { 4964 Warnl(pos, fmt_, args...) 4965 } 4966 4967 func (e *ssaExport) Debug_checknil() bool { 4968 return Debug_checknil != 0 4969 } 4970 4971 func (e *ssaExport) Debug_wb() bool { 4972 return Debug_wb != 0 4973 } 4974 4975 func (e *ssaExport) Syslook(name string) *obj.LSym { 4976 return Linksym(syslook(name).Sym) 4977 } 4978 4979 func (n *Node) Typ() ssa.Type { 4980 return n.Type 4981 }