github.com/dannin/go@v0.0.0-20161031215817-d35dfd405eaa/src/cmd/compile/internal/gc/ssa.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "bytes" 9 "fmt" 10 "html" 11 "os" 12 13 "cmd/compile/internal/ssa" 14 "cmd/internal/obj" 15 "cmd/internal/sys" 16 ) 17 18 var ssaConfig *ssa.Config 19 var ssaExp ssaExport 20 21 func initssa() *ssa.Config { 22 if ssaConfig == nil { 23 ssaConfig = ssa.NewConfig(Thearch.LinkArch.Name, &ssaExp, Ctxt, Debug['N'] == 0) 24 if Thearch.LinkArch.Name == "386" { 25 ssaConfig.Set387(Thearch.Use387) 26 } 27 } 28 ssaConfig.HTML = nil 29 return ssaConfig 30 } 31 32 // buildssa builds an SSA function. 33 func buildssa(fn *Node) *ssa.Func { 34 name := fn.Func.Nname.Sym.Name 35 printssa := name == os.Getenv("GOSSAFUNC") 36 if printssa { 37 fmt.Println("generating SSA for", name) 38 dumplist("buildssa-enter", fn.Func.Enter) 39 dumplist("buildssa-body", fn.Nbody) 40 dumplist("buildssa-exit", fn.Func.Exit) 41 } 42 43 var s state 44 s.pushLine(fn.Lineno) 45 defer s.popLine() 46 47 if fn.Func.Pragma&CgoUnsafeArgs != 0 { 48 s.cgoUnsafeArgs = true 49 } 50 if fn.Func.Pragma&Nowritebarrier != 0 { 51 s.noWB = true 52 } 53 defer func() { 54 if s.WBLineno != 0 { 55 fn.Func.WBLineno = s.WBLineno 56 } 57 }() 58 // TODO(khr): build config just once at the start of the compiler binary 59 60 ssaExp.log = printssa 61 62 s.config = initssa() 63 s.f = s.config.NewFunc() 64 s.f.Name = name 65 s.exitCode = fn.Func.Exit 66 s.panics = map[funcLine]*ssa.Block{} 67 s.config.DebugTest = s.config.DebugHashMatch("GOSSAHASH", name) 68 69 if name == os.Getenv("GOSSAFUNC") { 70 // TODO: tempfile? it is handy to have the location 71 // of this file be stable, so you can just reload in the browser. 72 s.config.HTML = ssa.NewHTMLWriter("ssa.html", s.config, name) 73 // TODO: generate and print a mapping from nodes to values and blocks 74 } 75 76 // Allocate starting block 77 s.f.Entry = s.f.NewBlock(ssa.BlockPlain) 78 79 // Allocate starting values 80 s.labels = map[string]*ssaLabel{} 81 s.labeledNodes = map[*Node]*ssaLabel{} 82 s.fwdVars = map[*Node]*ssa.Value{} 83 s.startmem = s.entryNewValue0(ssa.OpInitMem, ssa.TypeMem) 84 s.sp = s.entryNewValue0(ssa.OpSP, Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead 85 s.sb = s.entryNewValue0(ssa.OpSB, Types[TUINTPTR]) 86 87 s.startBlock(s.f.Entry) 88 s.vars[&memVar] = s.startmem 89 90 s.varsyms = map[*Node]interface{}{} 91 92 // Generate addresses of local declarations 93 s.decladdrs = map[*Node]*ssa.Value{} 94 for _, n := range fn.Func.Dcl { 95 switch n.Class { 96 case PPARAM, PPARAMOUT: 97 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n}) 98 s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, ptrto(n.Type), aux, s.sp) 99 if n.Class == PPARAMOUT && s.canSSA(n) { 100 // Save ssa-able PPARAMOUT variables so we can 101 // store them back to the stack at the end of 102 // the function. 103 s.returns = append(s.returns, n) 104 } 105 case PAUTO: 106 // processed at each use, to prevent Addr coming 107 // before the decl. 108 case PAUTOHEAP: 109 // moved to heap - already handled by frontend 110 case PFUNC: 111 // local function - already handled by frontend 112 default: 113 s.Fatalf("local variable with class %s unimplemented", classnames[n.Class]) 114 } 115 } 116 117 // Populate arguments. 118 for _, n := range fn.Func.Dcl { 119 if n.Class != PPARAM { 120 continue 121 } 122 var v *ssa.Value 123 if s.canSSA(n) { 124 v = s.newValue0A(ssa.OpArg, n.Type, n) 125 } else { 126 // Not SSAable. Load it. 127 v = s.newValue2(ssa.OpLoad, n.Type, s.decladdrs[n], s.startmem) 128 } 129 s.vars[n] = v 130 } 131 132 // Convert the AST-based IR to the SSA-based IR 133 s.stmtList(fn.Func.Enter) 134 s.stmtList(fn.Nbody) 135 136 // fallthrough to exit 137 if s.curBlock != nil { 138 s.pushLine(fn.Func.Endlineno) 139 s.exit() 140 s.popLine() 141 } 142 143 // Check that we used all labels 144 for name, lab := range s.labels { 145 if !lab.used() && !lab.reported && !lab.defNode.Used { 146 yyerrorl(lab.defNode.Lineno, "label %v defined and not used", name) 147 lab.reported = true 148 } 149 if lab.used() && !lab.defined() && !lab.reported { 150 yyerrorl(lab.useNode.Lineno, "label %v not defined", name) 151 lab.reported = true 152 } 153 } 154 155 // Check any forward gotos. Non-forward gotos have already been checked. 156 for _, n := range s.fwdGotos { 157 lab := s.labels[n.Left.Sym.Name] 158 // If the label is undefined, we have already have printed an error. 159 if lab.defined() { 160 s.checkgoto(n, lab.defNode) 161 } 162 } 163 164 if nerrors > 0 { 165 s.f.Free() 166 return nil 167 } 168 169 s.insertPhis() 170 171 // Don't carry reference this around longer than necessary 172 s.exitCode = Nodes{} 173 174 // Main call to ssa package to compile function 175 ssa.Compile(s.f) 176 177 return s.f 178 } 179 180 type state struct { 181 // configuration (arch) information 182 config *ssa.Config 183 184 // function we're building 185 f *ssa.Func 186 187 // labels and labeled control flow nodes (OFOR, OSWITCH, OSELECT) in f 188 labels map[string]*ssaLabel 189 labeledNodes map[*Node]*ssaLabel 190 191 // gotos that jump forward; required for deferred checkgoto calls 192 fwdGotos []*Node 193 // Code that must precede any return 194 // (e.g., copying value of heap-escaped paramout back to true paramout) 195 exitCode Nodes 196 197 // unlabeled break and continue statement tracking 198 breakTo *ssa.Block // current target for plain break statement 199 continueTo *ssa.Block // current target for plain continue statement 200 201 // current location where we're interpreting the AST 202 curBlock *ssa.Block 203 204 // variable assignments in the current block (map from variable symbol to ssa value) 205 // *Node is the unique identifier (an ONAME Node) for the variable. 206 // TODO: keep a single varnum map, then make all of these maps slices instead? 207 vars map[*Node]*ssa.Value 208 209 // fwdVars are variables that are used before they are defined in the current block. 210 // This map exists just to coalesce multiple references into a single FwdRef op. 211 // *Node is the unique identifier (an ONAME Node) for the variable. 212 fwdVars map[*Node]*ssa.Value 213 214 // all defined variables at the end of each block. Indexed by block ID. 215 defvars []map[*Node]*ssa.Value 216 217 // addresses of PPARAM and PPARAMOUT variables. 218 decladdrs map[*Node]*ssa.Value 219 220 // symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused. 221 varsyms map[*Node]interface{} 222 223 // starting values. Memory, stack pointer, and globals pointer 224 startmem *ssa.Value 225 sp *ssa.Value 226 sb *ssa.Value 227 228 // line number stack. The current line number is top of stack 229 line []int32 230 231 // list of panic calls by function name and line number. 232 // Used to deduplicate panic calls. 233 panics map[funcLine]*ssa.Block 234 235 // list of PPARAMOUT (return) variables. 236 returns []*Node 237 238 // A dummy value used during phi construction. 239 placeholder *ssa.Value 240 241 cgoUnsafeArgs bool 242 noWB bool 243 WBLineno int32 // line number of first write barrier. 0=no write barriers 244 } 245 246 type funcLine struct { 247 f *Node 248 line int32 249 } 250 251 type ssaLabel struct { 252 target *ssa.Block // block identified by this label 253 breakTarget *ssa.Block // block to break to in control flow node identified by this label 254 continueTarget *ssa.Block // block to continue to in control flow node identified by this label 255 defNode *Node // label definition Node (OLABEL) 256 // Label use Node (OGOTO, OBREAK, OCONTINUE). 257 // Used only for error detection and reporting. 258 // There might be multiple uses, but we only need to track one. 259 useNode *Node 260 reported bool // reported indicates whether an error has already been reported for this label 261 } 262 263 // defined reports whether the label has a definition (OLABEL node). 264 func (l *ssaLabel) defined() bool { return l.defNode != nil } 265 266 // used reports whether the label has a use (OGOTO, OBREAK, or OCONTINUE node). 267 func (l *ssaLabel) used() bool { return l.useNode != nil } 268 269 // label returns the label associated with sym, creating it if necessary. 270 func (s *state) label(sym *Sym) *ssaLabel { 271 lab := s.labels[sym.Name] 272 if lab == nil { 273 lab = new(ssaLabel) 274 s.labels[sym.Name] = lab 275 } 276 return lab 277 } 278 279 func (s *state) Logf(msg string, args ...interface{}) { s.config.Logf(msg, args...) } 280 func (s *state) Log() bool { return s.config.Log() } 281 func (s *state) Fatalf(msg string, args ...interface{}) { s.config.Fatalf(s.peekLine(), msg, args...) } 282 func (s *state) Warnl(line int32, msg string, args ...interface{}) { s.config.Warnl(line, msg, args...) } 283 func (s *state) Debug_checknil() bool { return s.config.Debug_checknil() } 284 285 var ( 286 // dummy node for the memory variable 287 memVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "mem"}} 288 289 // dummy nodes for temporary variables 290 ptrVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ptr"}} 291 lenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "len"}} 292 newlenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "newlen"}} 293 capVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "cap"}} 294 typVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "typ"}} 295 idataVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "idata"}} 296 okVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ok"}} 297 ) 298 299 // startBlock sets the current block we're generating code in to b. 300 func (s *state) startBlock(b *ssa.Block) { 301 if s.curBlock != nil { 302 s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock) 303 } 304 s.curBlock = b 305 s.vars = map[*Node]*ssa.Value{} 306 for n := range s.fwdVars { 307 delete(s.fwdVars, n) 308 } 309 } 310 311 // endBlock marks the end of generating code for the current block. 312 // Returns the (former) current block. Returns nil if there is no current 313 // block, i.e. if no code flows to the current execution point. 314 func (s *state) endBlock() *ssa.Block { 315 b := s.curBlock 316 if b == nil { 317 return nil 318 } 319 for len(s.defvars) <= int(b.ID) { 320 s.defvars = append(s.defvars, nil) 321 } 322 s.defvars[b.ID] = s.vars 323 s.curBlock = nil 324 s.vars = nil 325 b.Line = s.peekLine() 326 return b 327 } 328 329 // pushLine pushes a line number on the line number stack. 330 func (s *state) pushLine(line int32) { 331 if line == 0 { 332 // the frontend may emit node with line number missing, 333 // use the parent line number in this case. 334 line = s.peekLine() 335 if Debug['K'] != 0 { 336 Warn("buildssa: line 0") 337 } 338 } 339 s.line = append(s.line, line) 340 } 341 342 // popLine pops the top of the line number stack. 343 func (s *state) popLine() { 344 s.line = s.line[:len(s.line)-1] 345 } 346 347 // peekLine peek the top of the line number stack. 348 func (s *state) peekLine() int32 { 349 return s.line[len(s.line)-1] 350 } 351 352 func (s *state) Error(msg string, args ...interface{}) { 353 yyerrorl(s.peekLine(), msg, args...) 354 } 355 356 // newValue0 adds a new value with no arguments to the current block. 357 func (s *state) newValue0(op ssa.Op, t ssa.Type) *ssa.Value { 358 return s.curBlock.NewValue0(s.peekLine(), op, t) 359 } 360 361 // newValue0A adds a new value with no arguments and an aux value to the current block. 362 func (s *state) newValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value { 363 return s.curBlock.NewValue0A(s.peekLine(), op, t, aux) 364 } 365 366 // newValue0I adds a new value with no arguments and an auxint value to the current block. 367 func (s *state) newValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value { 368 return s.curBlock.NewValue0I(s.peekLine(), op, t, auxint) 369 } 370 371 // newValue1 adds a new value with one argument to the current block. 372 func (s *state) newValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value { 373 return s.curBlock.NewValue1(s.peekLine(), op, t, arg) 374 } 375 376 // newValue1A adds a new value with one argument and an aux value to the current block. 377 func (s *state) newValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 378 return s.curBlock.NewValue1A(s.peekLine(), op, t, aux, arg) 379 } 380 381 // newValue1I adds a new value with one argument and an auxint value to the current block. 382 func (s *state) newValue1I(op ssa.Op, t ssa.Type, aux int64, arg *ssa.Value) *ssa.Value { 383 return s.curBlock.NewValue1I(s.peekLine(), op, t, aux, arg) 384 } 385 386 // newValue2 adds a new value with two arguments to the current block. 387 func (s *state) newValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value { 388 return s.curBlock.NewValue2(s.peekLine(), op, t, arg0, arg1) 389 } 390 391 // newValue2I adds a new value with two arguments and an auxint value to the current block. 392 func (s *state) newValue2I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value { 393 return s.curBlock.NewValue2I(s.peekLine(), op, t, aux, arg0, arg1) 394 } 395 396 // newValue3 adds a new value with three arguments to the current block. 397 func (s *state) newValue3(op ssa.Op, t ssa.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 398 return s.curBlock.NewValue3(s.peekLine(), op, t, arg0, arg1, arg2) 399 } 400 401 // newValue3I adds a new value with three arguments and an auxint value to the current block. 402 func (s *state) newValue3I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 403 return s.curBlock.NewValue3I(s.peekLine(), op, t, aux, arg0, arg1, arg2) 404 } 405 406 // newValue4 adds a new value with four arguments to the current block. 407 func (s *state) newValue4(op ssa.Op, t ssa.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value { 408 return s.curBlock.NewValue4(s.peekLine(), op, t, arg0, arg1, arg2, arg3) 409 } 410 411 // entryNewValue0 adds a new value with no arguments to the entry block. 412 func (s *state) entryNewValue0(op ssa.Op, t ssa.Type) *ssa.Value { 413 return s.f.Entry.NewValue0(s.peekLine(), op, t) 414 } 415 416 // entryNewValue0A adds a new value with no arguments and an aux value to the entry block. 417 func (s *state) entryNewValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value { 418 return s.f.Entry.NewValue0A(s.peekLine(), op, t, aux) 419 } 420 421 // entryNewValue0I adds a new value with no arguments and an auxint value to the entry block. 422 func (s *state) entryNewValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value { 423 return s.f.Entry.NewValue0I(s.peekLine(), op, t, auxint) 424 } 425 426 // entryNewValue1 adds a new value with one argument to the entry block. 427 func (s *state) entryNewValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value { 428 return s.f.Entry.NewValue1(s.peekLine(), op, t, arg) 429 } 430 431 // entryNewValue1 adds a new value with one argument and an auxint value to the entry block. 432 func (s *state) entryNewValue1I(op ssa.Op, t ssa.Type, auxint int64, arg *ssa.Value) *ssa.Value { 433 return s.f.Entry.NewValue1I(s.peekLine(), op, t, auxint, arg) 434 } 435 436 // entryNewValue1A adds a new value with one argument and an aux value to the entry block. 437 func (s *state) entryNewValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 438 return s.f.Entry.NewValue1A(s.peekLine(), op, t, aux, arg) 439 } 440 441 // entryNewValue2 adds a new value with two arguments to the entry block. 442 func (s *state) entryNewValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value { 443 return s.f.Entry.NewValue2(s.peekLine(), op, t, arg0, arg1) 444 } 445 446 // const* routines add a new const value to the entry block. 447 func (s *state) constSlice(t ssa.Type) *ssa.Value { return s.f.ConstSlice(s.peekLine(), t) } 448 func (s *state) constInterface(t ssa.Type) *ssa.Value { return s.f.ConstInterface(s.peekLine(), t) } 449 func (s *state) constNil(t ssa.Type) *ssa.Value { return s.f.ConstNil(s.peekLine(), t) } 450 func (s *state) constEmptyString(t ssa.Type) *ssa.Value { return s.f.ConstEmptyString(s.peekLine(), t) } 451 func (s *state) constBool(c bool) *ssa.Value { 452 return s.f.ConstBool(s.peekLine(), Types[TBOOL], c) 453 } 454 func (s *state) constInt8(t ssa.Type, c int8) *ssa.Value { 455 return s.f.ConstInt8(s.peekLine(), t, c) 456 } 457 func (s *state) constInt16(t ssa.Type, c int16) *ssa.Value { 458 return s.f.ConstInt16(s.peekLine(), t, c) 459 } 460 func (s *state) constInt32(t ssa.Type, c int32) *ssa.Value { 461 return s.f.ConstInt32(s.peekLine(), t, c) 462 } 463 func (s *state) constInt64(t ssa.Type, c int64) *ssa.Value { 464 return s.f.ConstInt64(s.peekLine(), t, c) 465 } 466 func (s *state) constFloat32(t ssa.Type, c float64) *ssa.Value { 467 return s.f.ConstFloat32(s.peekLine(), t, c) 468 } 469 func (s *state) constFloat64(t ssa.Type, c float64) *ssa.Value { 470 return s.f.ConstFloat64(s.peekLine(), t, c) 471 } 472 func (s *state) constInt(t ssa.Type, c int64) *ssa.Value { 473 if s.config.IntSize == 8 { 474 return s.constInt64(t, c) 475 } 476 if int64(int32(c)) != c { 477 s.Fatalf("integer constant too big %d", c) 478 } 479 return s.constInt32(t, int32(c)) 480 } 481 482 // stmtList converts the statement list n to SSA and adds it to s. 483 func (s *state) stmtList(l Nodes) { 484 for _, n := range l.Slice() { 485 s.stmt(n) 486 } 487 } 488 489 // stmt converts the statement n to SSA and adds it to s. 490 func (s *state) stmt(n *Node) { 491 s.pushLine(n.Lineno) 492 defer s.popLine() 493 494 // If s.curBlock is nil, then we're about to generate dead code. 495 // We can't just short-circuit here, though, 496 // because we check labels and gotos as part of SSA generation. 497 // Provide a block for the dead code so that we don't have 498 // to add special cases everywhere else. 499 if s.curBlock == nil { 500 dead := s.f.NewBlock(ssa.BlockPlain) 501 s.startBlock(dead) 502 } 503 504 s.stmtList(n.Ninit) 505 switch n.Op { 506 507 case OBLOCK: 508 s.stmtList(n.List) 509 510 // No-ops 511 case OEMPTY, ODCLCONST, ODCLTYPE, OFALL: 512 513 // Expression statements 514 case OCALLFUNC: 515 if isIntrinsicCall(n) { 516 s.intrinsicCall(n) 517 return 518 } 519 fallthrough 520 521 case OCALLMETH, OCALLINTER: 522 s.call(n, callNormal) 523 if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class == PFUNC { 524 if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" || 525 n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "selectgo" || fn == "block") { 526 m := s.mem() 527 b := s.endBlock() 528 b.Kind = ssa.BlockExit 529 b.SetControl(m) 530 // TODO: never rewrite OPANIC to OCALLFUNC in the 531 // first place. Need to wait until all backends 532 // go through SSA. 533 } 534 } 535 case ODEFER: 536 s.call(n.Left, callDefer) 537 case OPROC: 538 s.call(n.Left, callGo) 539 540 case OAS2DOTTYPE: 541 res, resok := s.dottype(n.Rlist.First(), true) 542 s.assign(n.List.First(), res, needwritebarrier(n.List.First(), n.Rlist.First()), false, n.Lineno, 0, false) 543 s.assign(n.List.Second(), resok, false, false, n.Lineno, 0, false) 544 return 545 546 case OAS2FUNC: 547 // We come here only when it is an intrinsic call returning two values. 548 if !isIntrinsicCall(n.Rlist.First()) { 549 s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Rlist.First()) 550 } 551 v := s.intrinsicCall(n.Rlist.First()) 552 v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v) 553 v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v) 554 // Make a fake node to mimic loading return value, ONLY for write barrier test. 555 // This is future-proofing against non-scalar 2-result intrinsics. 556 // Currently we only have scalar ones, which result in no write barrier. 557 fakeret := &Node{Op: OINDREGSP} 558 s.assign(n.List.First(), v1, needwritebarrier(n.List.First(), fakeret), false, n.Lineno, 0, false) 559 s.assign(n.List.Second(), v2, needwritebarrier(n.List.Second(), fakeret), false, n.Lineno, 0, false) 560 return 561 562 case ODCL: 563 if n.Left.Class == PAUTOHEAP { 564 Fatalf("DCL %v", n) 565 } 566 567 case OLABEL: 568 sym := n.Left.Sym 569 570 if isblanksym(sym) { 571 // Empty identifier is valid but useless. 572 // See issues 11589, 11593. 573 return 574 } 575 576 lab := s.label(sym) 577 578 // Associate label with its control flow node, if any 579 if ctl := n.Name.Defn; ctl != nil { 580 switch ctl.Op { 581 case OFOR, OSWITCH, OSELECT: 582 s.labeledNodes[ctl] = lab 583 } 584 } 585 586 if !lab.defined() { 587 lab.defNode = n 588 } else { 589 s.Error("label %v already defined at %v", sym, linestr(lab.defNode.Lineno)) 590 lab.reported = true 591 } 592 // The label might already have a target block via a goto. 593 if lab.target == nil { 594 lab.target = s.f.NewBlock(ssa.BlockPlain) 595 } 596 597 // go to that label (we pretend "label:" is preceded by "goto label") 598 b := s.endBlock() 599 b.AddEdgeTo(lab.target) 600 s.startBlock(lab.target) 601 602 case OGOTO: 603 sym := n.Left.Sym 604 605 lab := s.label(sym) 606 if lab.target == nil { 607 lab.target = s.f.NewBlock(ssa.BlockPlain) 608 } 609 if !lab.used() { 610 lab.useNode = n 611 } 612 613 if lab.defined() { 614 s.checkgoto(n, lab.defNode) 615 } else { 616 s.fwdGotos = append(s.fwdGotos, n) 617 } 618 619 b := s.endBlock() 620 b.AddEdgeTo(lab.target) 621 622 case OAS, OASWB: 623 // Check whether we can generate static data rather than code. 624 // If so, ignore n and defer data generation until codegen. 625 // Failure to do this causes writes to readonly symbols. 626 if gen_as_init(n, true) { 627 var data []*Node 628 if s.f.StaticData != nil { 629 data = s.f.StaticData.([]*Node) 630 } 631 s.f.StaticData = append(data, n) 632 return 633 } 634 635 if n.Left == n.Right && n.Left.Op == ONAME { 636 // An x=x assignment. No point in doing anything 637 // here. In addition, skipping this assignment 638 // prevents generating: 639 // VARDEF x 640 // COPY x -> x 641 // which is bad because x is incorrectly considered 642 // dead before the vardef. See issue #14904. 643 return 644 } 645 646 var t *Type 647 if n.Right != nil { 648 t = n.Right.Type 649 } else { 650 t = n.Left.Type 651 } 652 653 // Evaluate RHS. 654 rhs := n.Right 655 if rhs != nil { 656 switch rhs.Op { 657 case OSTRUCTLIT, OARRAYLIT, OSLICELIT: 658 // All literals with nonzero fields have already been 659 // rewritten during walk. Any that remain are just T{} 660 // or equivalents. Use the zero value. 661 if !iszero(rhs) { 662 Fatalf("literal with nonzero value in SSA: %v", rhs) 663 } 664 rhs = nil 665 case OAPPEND: 666 // If we're writing the result of an append back to the same slice, 667 // handle it specially to avoid write barriers on the fast (non-growth) path. 668 // If the slice can be SSA'd, it'll be on the stack, 669 // so there will be no write barriers, 670 // so there's no need to attempt to prevent them. 671 if samesafeexpr(n.Left, rhs.List.First()) { 672 if !s.canSSA(n.Left) { 673 if Debug_append > 0 { 674 Warnl(n.Lineno, "append: len-only update") 675 } 676 s.append(rhs, true) 677 return 678 } else { 679 if Debug_append > 0 { // replicating old diagnostic message 680 Warnl(n.Lineno, "append: len-only update (in local slice)") 681 } 682 } 683 } 684 } 685 } 686 var r *ssa.Value 687 var isVolatile bool 688 needwb := n.Op == OASWB 689 deref := !canSSAType(t) 690 if deref { 691 if rhs == nil { 692 r = nil // Signal assign to use OpZero. 693 } else { 694 r, isVolatile = s.addr(rhs, false) 695 } 696 } else { 697 if rhs == nil { 698 r = s.zeroVal(t) 699 } else { 700 r = s.expr(rhs) 701 } 702 } 703 if rhs != nil && rhs.Op == OAPPEND && needwritebarrier(n.Left, rhs) { 704 // The frontend gets rid of the write barrier to enable the special OAPPEND 705 // handling above, but since this is not a special case, we need it. 706 // TODO: just add a ptr graying to the end of growslice? 707 // TODO: check whether we need to provide special handling and a write barrier 708 // for ODOTTYPE and ORECV also. 709 // They get similar wb-removal treatment in walk.go:OAS. 710 needwb = true 711 } 712 713 var skip skipMask 714 if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) { 715 // We're assigning a slicing operation back to its source. 716 // Don't write back fields we aren't changing. See issue #14855. 717 i, j, k := rhs.SliceBounds() 718 if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) { 719 // [0:...] is the same as [:...] 720 i = nil 721 } 722 // TODO: detect defaults for len/cap also. 723 // Currently doesn't really work because (*p)[:len(*p)] appears here as: 724 // tmp = len(*p) 725 // (*p)[:tmp] 726 //if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) { 727 // j = nil 728 //} 729 //if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) { 730 // k = nil 731 //} 732 if i == nil { 733 skip |= skipPtr 734 if j == nil { 735 skip |= skipLen 736 } 737 if k == nil { 738 skip |= skipCap 739 } 740 } 741 } 742 743 s.assign(n.Left, r, needwb, deref, n.Lineno, skip, isVolatile) 744 745 case OIF: 746 bThen := s.f.NewBlock(ssa.BlockPlain) 747 bEnd := s.f.NewBlock(ssa.BlockPlain) 748 var bElse *ssa.Block 749 if n.Rlist.Len() != 0 { 750 bElse = s.f.NewBlock(ssa.BlockPlain) 751 s.condBranch(n.Left, bThen, bElse, n.Likely) 752 } else { 753 s.condBranch(n.Left, bThen, bEnd, n.Likely) 754 } 755 756 s.startBlock(bThen) 757 s.stmtList(n.Nbody) 758 if b := s.endBlock(); b != nil { 759 b.AddEdgeTo(bEnd) 760 } 761 762 if n.Rlist.Len() != 0 { 763 s.startBlock(bElse) 764 s.stmtList(n.Rlist) 765 if b := s.endBlock(); b != nil { 766 b.AddEdgeTo(bEnd) 767 } 768 } 769 s.startBlock(bEnd) 770 771 case ORETURN: 772 s.stmtList(n.List) 773 s.exit() 774 case ORETJMP: 775 s.stmtList(n.List) 776 b := s.exit() 777 b.Kind = ssa.BlockRetJmp // override BlockRet 778 b.Aux = n.Left.Sym 779 780 case OCONTINUE, OBREAK: 781 var op string 782 var to *ssa.Block 783 switch n.Op { 784 case OCONTINUE: 785 op = "continue" 786 to = s.continueTo 787 case OBREAK: 788 op = "break" 789 to = s.breakTo 790 } 791 if n.Left == nil { 792 // plain break/continue 793 if to == nil { 794 s.Error("%s is not in a loop", op) 795 return 796 } 797 // nothing to do; "to" is already the correct target 798 } else { 799 // labeled break/continue; look up the target 800 sym := n.Left.Sym 801 lab := s.label(sym) 802 if !lab.used() { 803 lab.useNode = n.Left 804 } 805 if !lab.defined() { 806 s.Error("%s label not defined: %v", op, sym) 807 lab.reported = true 808 return 809 } 810 switch n.Op { 811 case OCONTINUE: 812 to = lab.continueTarget 813 case OBREAK: 814 to = lab.breakTarget 815 } 816 if to == nil { 817 // Valid label but not usable with a break/continue here, e.g.: 818 // for { 819 // continue abc 820 // } 821 // abc: 822 // for {} 823 s.Error("invalid %s label %v", op, sym) 824 lab.reported = true 825 return 826 } 827 } 828 829 b := s.endBlock() 830 b.AddEdgeTo(to) 831 832 case OFOR: 833 // OFOR: for Ninit; Left; Right { Nbody } 834 bCond := s.f.NewBlock(ssa.BlockPlain) 835 bBody := s.f.NewBlock(ssa.BlockPlain) 836 bIncr := s.f.NewBlock(ssa.BlockPlain) 837 bEnd := s.f.NewBlock(ssa.BlockPlain) 838 839 // first, jump to condition test 840 b := s.endBlock() 841 b.AddEdgeTo(bCond) 842 843 // generate code to test condition 844 s.startBlock(bCond) 845 if n.Left != nil { 846 s.condBranch(n.Left, bBody, bEnd, 1) 847 } else { 848 b := s.endBlock() 849 b.Kind = ssa.BlockPlain 850 b.AddEdgeTo(bBody) 851 } 852 853 // set up for continue/break in body 854 prevContinue := s.continueTo 855 prevBreak := s.breakTo 856 s.continueTo = bIncr 857 s.breakTo = bEnd 858 lab := s.labeledNodes[n] 859 if lab != nil { 860 // labeled for loop 861 lab.continueTarget = bIncr 862 lab.breakTarget = bEnd 863 } 864 865 // generate body 866 s.startBlock(bBody) 867 s.stmtList(n.Nbody) 868 869 // tear down continue/break 870 s.continueTo = prevContinue 871 s.breakTo = prevBreak 872 if lab != nil { 873 lab.continueTarget = nil 874 lab.breakTarget = nil 875 } 876 877 // done with body, goto incr 878 if b := s.endBlock(); b != nil { 879 b.AddEdgeTo(bIncr) 880 } 881 882 // generate incr 883 s.startBlock(bIncr) 884 if n.Right != nil { 885 s.stmt(n.Right) 886 } 887 if b := s.endBlock(); b != nil { 888 b.AddEdgeTo(bCond) 889 } 890 s.startBlock(bEnd) 891 892 case OSWITCH, OSELECT: 893 // These have been mostly rewritten by the front end into their Nbody fields. 894 // Our main task is to correctly hook up any break statements. 895 bEnd := s.f.NewBlock(ssa.BlockPlain) 896 897 prevBreak := s.breakTo 898 s.breakTo = bEnd 899 lab := s.labeledNodes[n] 900 if lab != nil { 901 // labeled 902 lab.breakTarget = bEnd 903 } 904 905 // generate body code 906 s.stmtList(n.Nbody) 907 908 s.breakTo = prevBreak 909 if lab != nil { 910 lab.breakTarget = nil 911 } 912 913 // OSWITCH never falls through (s.curBlock == nil here). 914 // OSELECT does not fall through if we're calling selectgo. 915 // OSELECT does fall through if we're calling selectnb{send,recv}[2]. 916 // In those latter cases, go to the code after the select. 917 if b := s.endBlock(); b != nil { 918 b.AddEdgeTo(bEnd) 919 } 920 s.startBlock(bEnd) 921 922 case OVARKILL: 923 // Insert a varkill op to record that a variable is no longer live. 924 // We only care about liveness info at call sites, so putting the 925 // varkill in the store chain is enough to keep it correctly ordered 926 // with respect to call ops. 927 if !s.canSSA(n.Left) { 928 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, n.Left, s.mem()) 929 } 930 931 case OVARLIVE: 932 // Insert a varlive op to record that a variable is still live. 933 if !n.Left.Addrtaken { 934 s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left) 935 } 936 s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, ssa.TypeMem, n.Left, s.mem()) 937 938 case OCHECKNIL: 939 p := s.expr(n.Left) 940 s.nilCheck(p) 941 942 case OSQRT: 943 s.expr(n.Left) 944 945 default: 946 s.Fatalf("unhandled stmt %v", n.Op) 947 } 948 } 949 950 // exit processes any code that needs to be generated just before returning. 951 // It returns a BlockRet block that ends the control flow. Its control value 952 // will be set to the final memory state. 953 func (s *state) exit() *ssa.Block { 954 if hasdefer { 955 s.rtcall(Deferreturn, true, nil) 956 } 957 958 // Run exit code. Typically, this code copies heap-allocated PPARAMOUT 959 // variables back to the stack. 960 s.stmtList(s.exitCode) 961 962 // Store SSAable PPARAMOUT variables back to stack locations. 963 for _, n := range s.returns { 964 addr := s.decladdrs[n] 965 val := s.variable(n, n.Type) 966 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, n, s.mem()) 967 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, n.Type.Size(), addr, val, s.mem()) 968 // TODO: if val is ever spilled, we'd like to use the 969 // PPARAMOUT slot for spilling it. That won't happen 970 // currently. 971 } 972 973 // Do actual return. 974 m := s.mem() 975 b := s.endBlock() 976 b.Kind = ssa.BlockRet 977 b.SetControl(m) 978 return b 979 } 980 981 type opAndType struct { 982 op Op 983 etype EType 984 } 985 986 var opToSSA = map[opAndType]ssa.Op{ 987 opAndType{OADD, TINT8}: ssa.OpAdd8, 988 opAndType{OADD, TUINT8}: ssa.OpAdd8, 989 opAndType{OADD, TINT16}: ssa.OpAdd16, 990 opAndType{OADD, TUINT16}: ssa.OpAdd16, 991 opAndType{OADD, TINT32}: ssa.OpAdd32, 992 opAndType{OADD, TUINT32}: ssa.OpAdd32, 993 opAndType{OADD, TPTR32}: ssa.OpAdd32, 994 opAndType{OADD, TINT64}: ssa.OpAdd64, 995 opAndType{OADD, TUINT64}: ssa.OpAdd64, 996 opAndType{OADD, TPTR64}: ssa.OpAdd64, 997 opAndType{OADD, TFLOAT32}: ssa.OpAdd32F, 998 opAndType{OADD, TFLOAT64}: ssa.OpAdd64F, 999 1000 opAndType{OSUB, TINT8}: ssa.OpSub8, 1001 opAndType{OSUB, TUINT8}: ssa.OpSub8, 1002 opAndType{OSUB, TINT16}: ssa.OpSub16, 1003 opAndType{OSUB, TUINT16}: ssa.OpSub16, 1004 opAndType{OSUB, TINT32}: ssa.OpSub32, 1005 opAndType{OSUB, TUINT32}: ssa.OpSub32, 1006 opAndType{OSUB, TINT64}: ssa.OpSub64, 1007 opAndType{OSUB, TUINT64}: ssa.OpSub64, 1008 opAndType{OSUB, TFLOAT32}: ssa.OpSub32F, 1009 opAndType{OSUB, TFLOAT64}: ssa.OpSub64F, 1010 1011 opAndType{ONOT, TBOOL}: ssa.OpNot, 1012 1013 opAndType{OMINUS, TINT8}: ssa.OpNeg8, 1014 opAndType{OMINUS, TUINT8}: ssa.OpNeg8, 1015 opAndType{OMINUS, TINT16}: ssa.OpNeg16, 1016 opAndType{OMINUS, TUINT16}: ssa.OpNeg16, 1017 opAndType{OMINUS, TINT32}: ssa.OpNeg32, 1018 opAndType{OMINUS, TUINT32}: ssa.OpNeg32, 1019 opAndType{OMINUS, TINT64}: ssa.OpNeg64, 1020 opAndType{OMINUS, TUINT64}: ssa.OpNeg64, 1021 opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F, 1022 opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F, 1023 1024 opAndType{OCOM, TINT8}: ssa.OpCom8, 1025 opAndType{OCOM, TUINT8}: ssa.OpCom8, 1026 opAndType{OCOM, TINT16}: ssa.OpCom16, 1027 opAndType{OCOM, TUINT16}: ssa.OpCom16, 1028 opAndType{OCOM, TINT32}: ssa.OpCom32, 1029 opAndType{OCOM, TUINT32}: ssa.OpCom32, 1030 opAndType{OCOM, TINT64}: ssa.OpCom64, 1031 opAndType{OCOM, TUINT64}: ssa.OpCom64, 1032 1033 opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag, 1034 opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag, 1035 opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal, 1036 opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal, 1037 1038 opAndType{OMUL, TINT8}: ssa.OpMul8, 1039 opAndType{OMUL, TUINT8}: ssa.OpMul8, 1040 opAndType{OMUL, TINT16}: ssa.OpMul16, 1041 opAndType{OMUL, TUINT16}: ssa.OpMul16, 1042 opAndType{OMUL, TINT32}: ssa.OpMul32, 1043 opAndType{OMUL, TUINT32}: ssa.OpMul32, 1044 opAndType{OMUL, TINT64}: ssa.OpMul64, 1045 opAndType{OMUL, TUINT64}: ssa.OpMul64, 1046 opAndType{OMUL, TFLOAT32}: ssa.OpMul32F, 1047 opAndType{OMUL, TFLOAT64}: ssa.OpMul64F, 1048 1049 opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F, 1050 opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F, 1051 1052 opAndType{OHMUL, TINT8}: ssa.OpHmul8, 1053 opAndType{OHMUL, TUINT8}: ssa.OpHmul8u, 1054 opAndType{OHMUL, TINT16}: ssa.OpHmul16, 1055 opAndType{OHMUL, TUINT16}: ssa.OpHmul16u, 1056 opAndType{OHMUL, TINT32}: ssa.OpHmul32, 1057 opAndType{OHMUL, TUINT32}: ssa.OpHmul32u, 1058 1059 opAndType{ODIV, TINT8}: ssa.OpDiv8, 1060 opAndType{ODIV, TUINT8}: ssa.OpDiv8u, 1061 opAndType{ODIV, TINT16}: ssa.OpDiv16, 1062 opAndType{ODIV, TUINT16}: ssa.OpDiv16u, 1063 opAndType{ODIV, TINT32}: ssa.OpDiv32, 1064 opAndType{ODIV, TUINT32}: ssa.OpDiv32u, 1065 opAndType{ODIV, TINT64}: ssa.OpDiv64, 1066 opAndType{ODIV, TUINT64}: ssa.OpDiv64u, 1067 1068 opAndType{OMOD, TINT8}: ssa.OpMod8, 1069 opAndType{OMOD, TUINT8}: ssa.OpMod8u, 1070 opAndType{OMOD, TINT16}: ssa.OpMod16, 1071 opAndType{OMOD, TUINT16}: ssa.OpMod16u, 1072 opAndType{OMOD, TINT32}: ssa.OpMod32, 1073 opAndType{OMOD, TUINT32}: ssa.OpMod32u, 1074 opAndType{OMOD, TINT64}: ssa.OpMod64, 1075 opAndType{OMOD, TUINT64}: ssa.OpMod64u, 1076 1077 opAndType{OAND, TINT8}: ssa.OpAnd8, 1078 opAndType{OAND, TUINT8}: ssa.OpAnd8, 1079 opAndType{OAND, TINT16}: ssa.OpAnd16, 1080 opAndType{OAND, TUINT16}: ssa.OpAnd16, 1081 opAndType{OAND, TINT32}: ssa.OpAnd32, 1082 opAndType{OAND, TUINT32}: ssa.OpAnd32, 1083 opAndType{OAND, TINT64}: ssa.OpAnd64, 1084 opAndType{OAND, TUINT64}: ssa.OpAnd64, 1085 1086 opAndType{OOR, TINT8}: ssa.OpOr8, 1087 opAndType{OOR, TUINT8}: ssa.OpOr8, 1088 opAndType{OOR, TINT16}: ssa.OpOr16, 1089 opAndType{OOR, TUINT16}: ssa.OpOr16, 1090 opAndType{OOR, TINT32}: ssa.OpOr32, 1091 opAndType{OOR, TUINT32}: ssa.OpOr32, 1092 opAndType{OOR, TINT64}: ssa.OpOr64, 1093 opAndType{OOR, TUINT64}: ssa.OpOr64, 1094 1095 opAndType{OXOR, TINT8}: ssa.OpXor8, 1096 opAndType{OXOR, TUINT8}: ssa.OpXor8, 1097 opAndType{OXOR, TINT16}: ssa.OpXor16, 1098 opAndType{OXOR, TUINT16}: ssa.OpXor16, 1099 opAndType{OXOR, TINT32}: ssa.OpXor32, 1100 opAndType{OXOR, TUINT32}: ssa.OpXor32, 1101 opAndType{OXOR, TINT64}: ssa.OpXor64, 1102 opAndType{OXOR, TUINT64}: ssa.OpXor64, 1103 1104 opAndType{OEQ, TBOOL}: ssa.OpEqB, 1105 opAndType{OEQ, TINT8}: ssa.OpEq8, 1106 opAndType{OEQ, TUINT8}: ssa.OpEq8, 1107 opAndType{OEQ, TINT16}: ssa.OpEq16, 1108 opAndType{OEQ, TUINT16}: ssa.OpEq16, 1109 opAndType{OEQ, TINT32}: ssa.OpEq32, 1110 opAndType{OEQ, TUINT32}: ssa.OpEq32, 1111 opAndType{OEQ, TINT64}: ssa.OpEq64, 1112 opAndType{OEQ, TUINT64}: ssa.OpEq64, 1113 opAndType{OEQ, TINTER}: ssa.OpEqInter, 1114 opAndType{OEQ, TSLICE}: ssa.OpEqSlice, 1115 opAndType{OEQ, TFUNC}: ssa.OpEqPtr, 1116 opAndType{OEQ, TMAP}: ssa.OpEqPtr, 1117 opAndType{OEQ, TCHAN}: ssa.OpEqPtr, 1118 opAndType{OEQ, TPTR32}: ssa.OpEqPtr, 1119 opAndType{OEQ, TPTR64}: ssa.OpEqPtr, 1120 opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr, 1121 opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr, 1122 opAndType{OEQ, TFLOAT64}: ssa.OpEq64F, 1123 opAndType{OEQ, TFLOAT32}: ssa.OpEq32F, 1124 1125 opAndType{ONE, TBOOL}: ssa.OpNeqB, 1126 opAndType{ONE, TINT8}: ssa.OpNeq8, 1127 opAndType{ONE, TUINT8}: ssa.OpNeq8, 1128 opAndType{ONE, TINT16}: ssa.OpNeq16, 1129 opAndType{ONE, TUINT16}: ssa.OpNeq16, 1130 opAndType{ONE, TINT32}: ssa.OpNeq32, 1131 opAndType{ONE, TUINT32}: ssa.OpNeq32, 1132 opAndType{ONE, TINT64}: ssa.OpNeq64, 1133 opAndType{ONE, TUINT64}: ssa.OpNeq64, 1134 opAndType{ONE, TINTER}: ssa.OpNeqInter, 1135 opAndType{ONE, TSLICE}: ssa.OpNeqSlice, 1136 opAndType{ONE, TFUNC}: ssa.OpNeqPtr, 1137 opAndType{ONE, TMAP}: ssa.OpNeqPtr, 1138 opAndType{ONE, TCHAN}: ssa.OpNeqPtr, 1139 opAndType{ONE, TPTR32}: ssa.OpNeqPtr, 1140 opAndType{ONE, TPTR64}: ssa.OpNeqPtr, 1141 opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr, 1142 opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr, 1143 opAndType{ONE, TFLOAT64}: ssa.OpNeq64F, 1144 opAndType{ONE, TFLOAT32}: ssa.OpNeq32F, 1145 1146 opAndType{OLT, TINT8}: ssa.OpLess8, 1147 opAndType{OLT, TUINT8}: ssa.OpLess8U, 1148 opAndType{OLT, TINT16}: ssa.OpLess16, 1149 opAndType{OLT, TUINT16}: ssa.OpLess16U, 1150 opAndType{OLT, TINT32}: ssa.OpLess32, 1151 opAndType{OLT, TUINT32}: ssa.OpLess32U, 1152 opAndType{OLT, TINT64}: ssa.OpLess64, 1153 opAndType{OLT, TUINT64}: ssa.OpLess64U, 1154 opAndType{OLT, TFLOAT64}: ssa.OpLess64F, 1155 opAndType{OLT, TFLOAT32}: ssa.OpLess32F, 1156 1157 opAndType{OGT, TINT8}: ssa.OpGreater8, 1158 opAndType{OGT, TUINT8}: ssa.OpGreater8U, 1159 opAndType{OGT, TINT16}: ssa.OpGreater16, 1160 opAndType{OGT, TUINT16}: ssa.OpGreater16U, 1161 opAndType{OGT, TINT32}: ssa.OpGreater32, 1162 opAndType{OGT, TUINT32}: ssa.OpGreater32U, 1163 opAndType{OGT, TINT64}: ssa.OpGreater64, 1164 opAndType{OGT, TUINT64}: ssa.OpGreater64U, 1165 opAndType{OGT, TFLOAT64}: ssa.OpGreater64F, 1166 opAndType{OGT, TFLOAT32}: ssa.OpGreater32F, 1167 1168 opAndType{OLE, TINT8}: ssa.OpLeq8, 1169 opAndType{OLE, TUINT8}: ssa.OpLeq8U, 1170 opAndType{OLE, TINT16}: ssa.OpLeq16, 1171 opAndType{OLE, TUINT16}: ssa.OpLeq16U, 1172 opAndType{OLE, TINT32}: ssa.OpLeq32, 1173 opAndType{OLE, TUINT32}: ssa.OpLeq32U, 1174 opAndType{OLE, TINT64}: ssa.OpLeq64, 1175 opAndType{OLE, TUINT64}: ssa.OpLeq64U, 1176 opAndType{OLE, TFLOAT64}: ssa.OpLeq64F, 1177 opAndType{OLE, TFLOAT32}: ssa.OpLeq32F, 1178 1179 opAndType{OGE, TINT8}: ssa.OpGeq8, 1180 opAndType{OGE, TUINT8}: ssa.OpGeq8U, 1181 opAndType{OGE, TINT16}: ssa.OpGeq16, 1182 opAndType{OGE, TUINT16}: ssa.OpGeq16U, 1183 opAndType{OGE, TINT32}: ssa.OpGeq32, 1184 opAndType{OGE, TUINT32}: ssa.OpGeq32U, 1185 opAndType{OGE, TINT64}: ssa.OpGeq64, 1186 opAndType{OGE, TUINT64}: ssa.OpGeq64U, 1187 opAndType{OGE, TFLOAT64}: ssa.OpGeq64F, 1188 opAndType{OGE, TFLOAT32}: ssa.OpGeq32F, 1189 1190 opAndType{OLROT, TUINT8}: ssa.OpLrot8, 1191 opAndType{OLROT, TUINT16}: ssa.OpLrot16, 1192 opAndType{OLROT, TUINT32}: ssa.OpLrot32, 1193 opAndType{OLROT, TUINT64}: ssa.OpLrot64, 1194 1195 opAndType{OSQRT, TFLOAT64}: ssa.OpSqrt, 1196 } 1197 1198 func (s *state) concreteEtype(t *Type) EType { 1199 e := t.Etype 1200 switch e { 1201 default: 1202 return e 1203 case TINT: 1204 if s.config.IntSize == 8 { 1205 return TINT64 1206 } 1207 return TINT32 1208 case TUINT: 1209 if s.config.IntSize == 8 { 1210 return TUINT64 1211 } 1212 return TUINT32 1213 case TUINTPTR: 1214 if s.config.PtrSize == 8 { 1215 return TUINT64 1216 } 1217 return TUINT32 1218 } 1219 } 1220 1221 func (s *state) ssaOp(op Op, t *Type) ssa.Op { 1222 etype := s.concreteEtype(t) 1223 x, ok := opToSSA[opAndType{op, etype}] 1224 if !ok { 1225 s.Fatalf("unhandled binary op %v %s", op, etype) 1226 } 1227 return x 1228 } 1229 1230 func floatForComplex(t *Type) *Type { 1231 if t.Size() == 8 { 1232 return Types[TFLOAT32] 1233 } else { 1234 return Types[TFLOAT64] 1235 } 1236 } 1237 1238 type opAndTwoTypes struct { 1239 op Op 1240 etype1 EType 1241 etype2 EType 1242 } 1243 1244 type twoTypes struct { 1245 etype1 EType 1246 etype2 EType 1247 } 1248 1249 type twoOpsAndType struct { 1250 op1 ssa.Op 1251 op2 ssa.Op 1252 intermediateType EType 1253 } 1254 1255 var fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1256 1257 twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32}, 1258 twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32}, 1259 twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32}, 1260 twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64}, 1261 1262 twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32}, 1263 twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32}, 1264 twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32}, 1265 twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64}, 1266 1267 twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1268 twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1269 twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32}, 1270 twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64}, 1271 1272 twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1273 twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1274 twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32}, 1275 twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64}, 1276 // unsigned 1277 twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32}, 1278 twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32}, 1279 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned 1280 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead 1281 1282 twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32}, 1283 twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32}, 1284 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned 1285 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead 1286 1287 twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1288 twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1289 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1290 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead 1291 1292 twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1293 twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1294 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1295 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead 1296 1297 // float 1298 twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32}, 1299 twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT64}, 1300 twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT32}, 1301 twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64}, 1302 } 1303 1304 // this map is used only for 32-bit arch, and only includes the difference 1305 // on 32-bit arch, don't use int64<->float conversion for uint32 1306 var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{ 1307 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32}, 1308 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32}, 1309 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32}, 1310 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32}, 1311 } 1312 1313 // uint64<->float conversions, only on machines that have intructions for that 1314 var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1315 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64}, 1316 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64}, 1317 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64}, 1318 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64}, 1319 } 1320 1321 var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{ 1322 opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8, 1323 opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8, 1324 opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16, 1325 opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16, 1326 opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32, 1327 opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32, 1328 opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64, 1329 opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64, 1330 1331 opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8, 1332 opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8, 1333 opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16, 1334 opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16, 1335 opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32, 1336 opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32, 1337 opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64, 1338 opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64, 1339 1340 opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8, 1341 opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8, 1342 opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16, 1343 opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16, 1344 opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32, 1345 opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32, 1346 opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64, 1347 opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64, 1348 1349 opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8, 1350 opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8, 1351 opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16, 1352 opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16, 1353 opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32, 1354 opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32, 1355 opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64, 1356 opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64, 1357 1358 opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8, 1359 opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8, 1360 opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16, 1361 opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16, 1362 opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32, 1363 opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32, 1364 opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64, 1365 opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64, 1366 1367 opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8, 1368 opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8, 1369 opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16, 1370 opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16, 1371 opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32, 1372 opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32, 1373 opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64, 1374 opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64, 1375 1376 opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8, 1377 opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8, 1378 opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16, 1379 opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16, 1380 opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32, 1381 opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32, 1382 opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64, 1383 opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64, 1384 1385 opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8, 1386 opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8, 1387 opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16, 1388 opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16, 1389 opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32, 1390 opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32, 1391 opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64, 1392 opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64, 1393 } 1394 1395 func (s *state) ssaShiftOp(op Op, t *Type, u *Type) ssa.Op { 1396 etype1 := s.concreteEtype(t) 1397 etype2 := s.concreteEtype(u) 1398 x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}] 1399 if !ok { 1400 s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2) 1401 } 1402 return x 1403 } 1404 1405 func (s *state) ssaRotateOp(op Op, t *Type) ssa.Op { 1406 etype1 := s.concreteEtype(t) 1407 x, ok := opToSSA[opAndType{op, etype1}] 1408 if !ok { 1409 s.Fatalf("unhandled rotate op %v etype=%s", op, etype1) 1410 } 1411 return x 1412 } 1413 1414 // expr converts the expression n to ssa, adds it to s and returns the ssa result. 1415 func (s *state) expr(n *Node) *ssa.Value { 1416 if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) { 1417 // ONAMEs and named OLITERALs have the line number 1418 // of the decl, not the use. See issue 14742. 1419 s.pushLine(n.Lineno) 1420 defer s.popLine() 1421 } 1422 1423 s.stmtList(n.Ninit) 1424 switch n.Op { 1425 case OARRAYBYTESTRTMP: 1426 slice := s.expr(n.Left) 1427 ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), slice) 1428 len := s.newValue1(ssa.OpSliceLen, Types[TINT], slice) 1429 return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) 1430 case OSTRARRAYBYTETMP: 1431 str := s.expr(n.Left) 1432 ptr := s.newValue1(ssa.OpStringPtr, ptrto(Types[TUINT8]), str) 1433 len := s.newValue1(ssa.OpStringLen, Types[TINT], str) 1434 return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len) 1435 case OCFUNC: 1436 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: n.Left.Sym}) 1437 return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb) 1438 case ONAME: 1439 if n.Class == PFUNC { 1440 // "value" of a function is the address of the function's closure 1441 sym := funcsym(n.Sym) 1442 aux := &ssa.ExternSymbol{Typ: n.Type, Sym: sym} 1443 return s.entryNewValue1A(ssa.OpAddr, ptrto(n.Type), aux, s.sb) 1444 } 1445 if s.canSSA(n) { 1446 return s.variable(n, n.Type) 1447 } 1448 addr, _ := s.addr(n, false) 1449 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1450 case OCLOSUREVAR: 1451 addr, _ := s.addr(n, false) 1452 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1453 case OLITERAL: 1454 switch u := n.Val().U.(type) { 1455 case *Mpint: 1456 i := u.Int64() 1457 switch n.Type.Size() { 1458 case 1: 1459 return s.constInt8(n.Type, int8(i)) 1460 case 2: 1461 return s.constInt16(n.Type, int16(i)) 1462 case 4: 1463 return s.constInt32(n.Type, int32(i)) 1464 case 8: 1465 return s.constInt64(n.Type, i) 1466 default: 1467 s.Fatalf("bad integer size %d", n.Type.Size()) 1468 return nil 1469 } 1470 case string: 1471 if u == "" { 1472 return s.constEmptyString(n.Type) 1473 } 1474 return s.entryNewValue0A(ssa.OpConstString, n.Type, u) 1475 case bool: 1476 return s.constBool(u) 1477 case *NilVal: 1478 t := n.Type 1479 switch { 1480 case t.IsSlice(): 1481 return s.constSlice(t) 1482 case t.IsInterface(): 1483 return s.constInterface(t) 1484 default: 1485 return s.constNil(t) 1486 } 1487 case *Mpflt: 1488 switch n.Type.Size() { 1489 case 4: 1490 return s.constFloat32(n.Type, u.Float32()) 1491 case 8: 1492 return s.constFloat64(n.Type, u.Float64()) 1493 default: 1494 s.Fatalf("bad float size %d", n.Type.Size()) 1495 return nil 1496 } 1497 case *Mpcplx: 1498 r := &u.Real 1499 i := &u.Imag 1500 switch n.Type.Size() { 1501 case 8: 1502 pt := Types[TFLOAT32] 1503 return s.newValue2(ssa.OpComplexMake, n.Type, 1504 s.constFloat32(pt, r.Float32()), 1505 s.constFloat32(pt, i.Float32())) 1506 case 16: 1507 pt := Types[TFLOAT64] 1508 return s.newValue2(ssa.OpComplexMake, n.Type, 1509 s.constFloat64(pt, r.Float64()), 1510 s.constFloat64(pt, i.Float64())) 1511 default: 1512 s.Fatalf("bad float size %d", n.Type.Size()) 1513 return nil 1514 } 1515 1516 default: 1517 s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype()) 1518 return nil 1519 } 1520 case OCONVNOP: 1521 to := n.Type 1522 from := n.Left.Type 1523 1524 // Assume everything will work out, so set up our return value. 1525 // Anything interesting that happens from here is a fatal. 1526 x := s.expr(n.Left) 1527 1528 // Special case for not confusing GC and liveness. 1529 // We don't want pointers accidentally classified 1530 // as not-pointers or vice-versa because of copy 1531 // elision. 1532 if to.IsPtrShaped() != from.IsPtrShaped() { 1533 return s.newValue2(ssa.OpConvert, to, x, s.mem()) 1534 } 1535 1536 v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type 1537 1538 // CONVNOP closure 1539 if to.Etype == TFUNC && from.IsPtrShaped() { 1540 return v 1541 } 1542 1543 // named <--> unnamed type or typed <--> untyped const 1544 if from.Etype == to.Etype { 1545 return v 1546 } 1547 1548 // unsafe.Pointer <--> *T 1549 if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() { 1550 return v 1551 } 1552 1553 dowidth(from) 1554 dowidth(to) 1555 if from.Width != to.Width { 1556 s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width) 1557 return nil 1558 } 1559 if etypesign(from.Etype) != etypesign(to.Etype) { 1560 s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype) 1561 return nil 1562 } 1563 1564 if instrumenting { 1565 // These appear to be fine, but they fail the 1566 // integer constraint below, so okay them here. 1567 // Sample non-integer conversion: map[string]string -> *uint8 1568 return v 1569 } 1570 1571 if etypesign(from.Etype) == 0 { 1572 s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to) 1573 return nil 1574 } 1575 1576 // integer, same width, same sign 1577 return v 1578 1579 case OCONV: 1580 x := s.expr(n.Left) 1581 ft := n.Left.Type // from type 1582 tt := n.Type // to type 1583 if ft.IsInteger() && tt.IsInteger() { 1584 var op ssa.Op 1585 if tt.Size() == ft.Size() { 1586 op = ssa.OpCopy 1587 } else if tt.Size() < ft.Size() { 1588 // truncation 1589 switch 10*ft.Size() + tt.Size() { 1590 case 21: 1591 op = ssa.OpTrunc16to8 1592 case 41: 1593 op = ssa.OpTrunc32to8 1594 case 42: 1595 op = ssa.OpTrunc32to16 1596 case 81: 1597 op = ssa.OpTrunc64to8 1598 case 82: 1599 op = ssa.OpTrunc64to16 1600 case 84: 1601 op = ssa.OpTrunc64to32 1602 default: 1603 s.Fatalf("weird integer truncation %v -> %v", ft, tt) 1604 } 1605 } else if ft.IsSigned() { 1606 // sign extension 1607 switch 10*ft.Size() + tt.Size() { 1608 case 12: 1609 op = ssa.OpSignExt8to16 1610 case 14: 1611 op = ssa.OpSignExt8to32 1612 case 18: 1613 op = ssa.OpSignExt8to64 1614 case 24: 1615 op = ssa.OpSignExt16to32 1616 case 28: 1617 op = ssa.OpSignExt16to64 1618 case 48: 1619 op = ssa.OpSignExt32to64 1620 default: 1621 s.Fatalf("bad integer sign extension %v -> %v", ft, tt) 1622 } 1623 } else { 1624 // zero extension 1625 switch 10*ft.Size() + tt.Size() { 1626 case 12: 1627 op = ssa.OpZeroExt8to16 1628 case 14: 1629 op = ssa.OpZeroExt8to32 1630 case 18: 1631 op = ssa.OpZeroExt8to64 1632 case 24: 1633 op = ssa.OpZeroExt16to32 1634 case 28: 1635 op = ssa.OpZeroExt16to64 1636 case 48: 1637 op = ssa.OpZeroExt32to64 1638 default: 1639 s.Fatalf("weird integer sign extension %v -> %v", ft, tt) 1640 } 1641 } 1642 return s.newValue1(op, n.Type, x) 1643 } 1644 1645 if ft.IsFloat() || tt.IsFloat() { 1646 conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}] 1647 if s.config.IntSize == 4 && Thearch.LinkArch.Name != "amd64p32" { 1648 if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { 1649 conv = conv1 1650 } 1651 } 1652 if Thearch.LinkArch.Name == "arm64" { 1653 if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { 1654 conv = conv1 1655 } 1656 } 1657 if !ok { 1658 s.Fatalf("weird float conversion %v -> %v", ft, tt) 1659 } 1660 op1, op2, it := conv.op1, conv.op2, conv.intermediateType 1661 1662 if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid { 1663 // normal case, not tripping over unsigned 64 1664 if op1 == ssa.OpCopy { 1665 if op2 == ssa.OpCopy { 1666 return x 1667 } 1668 return s.newValue1(op2, n.Type, x) 1669 } 1670 if op2 == ssa.OpCopy { 1671 return s.newValue1(op1, n.Type, x) 1672 } 1673 return s.newValue1(op2, n.Type, s.newValue1(op1, Types[it], x)) 1674 } 1675 // Tricky 64-bit unsigned cases. 1676 if ft.IsInteger() { 1677 // therefore tt is float32 or float64, and ft is also unsigned 1678 if tt.Size() == 4 { 1679 return s.uint64Tofloat32(n, x, ft, tt) 1680 } 1681 if tt.Size() == 8 { 1682 return s.uint64Tofloat64(n, x, ft, tt) 1683 } 1684 s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt) 1685 } 1686 // therefore ft is float32 or float64, and tt is unsigned integer 1687 if ft.Size() == 4 { 1688 return s.float32ToUint64(n, x, ft, tt) 1689 } 1690 if ft.Size() == 8 { 1691 return s.float64ToUint64(n, x, ft, tt) 1692 } 1693 s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt) 1694 return nil 1695 } 1696 1697 if ft.IsComplex() && tt.IsComplex() { 1698 var op ssa.Op 1699 if ft.Size() == tt.Size() { 1700 op = ssa.OpCopy 1701 } else if ft.Size() == 8 && tt.Size() == 16 { 1702 op = ssa.OpCvt32Fto64F 1703 } else if ft.Size() == 16 && tt.Size() == 8 { 1704 op = ssa.OpCvt64Fto32F 1705 } else { 1706 s.Fatalf("weird complex conversion %v -> %v", ft, tt) 1707 } 1708 ftp := floatForComplex(ft) 1709 ttp := floatForComplex(tt) 1710 return s.newValue2(ssa.OpComplexMake, tt, 1711 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)), 1712 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x))) 1713 } 1714 1715 s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype) 1716 return nil 1717 1718 case ODOTTYPE: 1719 res, _ := s.dottype(n, false) 1720 return res 1721 1722 // binary ops 1723 case OLT, OEQ, ONE, OLE, OGE, OGT: 1724 a := s.expr(n.Left) 1725 b := s.expr(n.Right) 1726 if n.Left.Type.IsComplex() { 1727 pt := floatForComplex(n.Left.Type) 1728 op := s.ssaOp(OEQ, pt) 1729 r := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)) 1730 i := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)) 1731 c := s.newValue2(ssa.OpAndB, Types[TBOOL], r, i) 1732 switch n.Op { 1733 case OEQ: 1734 return c 1735 case ONE: 1736 return s.newValue1(ssa.OpNot, Types[TBOOL], c) 1737 default: 1738 s.Fatalf("ordered complex compare %v", n.Op) 1739 } 1740 } 1741 return s.newValue2(s.ssaOp(n.Op, n.Left.Type), Types[TBOOL], a, b) 1742 case OMUL: 1743 a := s.expr(n.Left) 1744 b := s.expr(n.Right) 1745 if n.Type.IsComplex() { 1746 mulop := ssa.OpMul64F 1747 addop := ssa.OpAdd64F 1748 subop := ssa.OpSub64F 1749 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1750 wt := Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 1751 1752 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1753 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1754 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1755 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1756 1757 if pt != wt { // Widen for calculation 1758 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal) 1759 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal) 1760 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag) 1761 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag) 1762 } 1763 1764 xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag)) 1765 ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal)) 1766 1767 if pt != wt { // Narrow to store back 1768 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal) 1769 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag) 1770 } 1771 1772 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1773 } 1774 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1775 1776 case ODIV: 1777 a := s.expr(n.Left) 1778 b := s.expr(n.Right) 1779 if n.Type.IsComplex() { 1780 // TODO this is not executed because the front-end substitutes a runtime call. 1781 // That probably ought to change; with modest optimization the widen/narrow 1782 // conversions could all be elided in larger expression trees. 1783 mulop := ssa.OpMul64F 1784 addop := ssa.OpAdd64F 1785 subop := ssa.OpSub64F 1786 divop := ssa.OpDiv64F 1787 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1788 wt := Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 1789 1790 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1791 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1792 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1793 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1794 1795 if pt != wt { // Widen for calculation 1796 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal) 1797 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal) 1798 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag) 1799 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag) 1800 } 1801 1802 denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag)) 1803 xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag)) 1804 ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag)) 1805 1806 // TODO not sure if this is best done in wide precision or narrow 1807 // Double-rounding might be an issue. 1808 // Note that the pre-SSA implementation does the entire calculation 1809 // in wide format, so wide is compatible. 1810 xreal = s.newValue2(divop, wt, xreal, denom) 1811 ximag = s.newValue2(divop, wt, ximag, denom) 1812 1813 if pt != wt { // Narrow to store back 1814 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal) 1815 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag) 1816 } 1817 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1818 } 1819 if n.Type.IsFloat() { 1820 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1821 } 1822 return s.intDivide(n, a, b) 1823 case OMOD: 1824 a := s.expr(n.Left) 1825 b := s.expr(n.Right) 1826 return s.intDivide(n, a, b) 1827 case OADD, OSUB: 1828 a := s.expr(n.Left) 1829 b := s.expr(n.Right) 1830 if n.Type.IsComplex() { 1831 pt := floatForComplex(n.Type) 1832 op := s.ssaOp(n.Op, pt) 1833 return s.newValue2(ssa.OpComplexMake, n.Type, 1834 s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)), 1835 s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))) 1836 } 1837 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1838 case OAND, OOR, OHMUL, OXOR: 1839 a := s.expr(n.Left) 1840 b := s.expr(n.Right) 1841 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1842 case OLSH, ORSH: 1843 a := s.expr(n.Left) 1844 b := s.expr(n.Right) 1845 return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b) 1846 case OLROT: 1847 a := s.expr(n.Left) 1848 i := n.Right.Int64() 1849 if i <= 0 || i >= n.Type.Size()*8 { 1850 s.Fatalf("Wrong rotate distance for LROT, expected 1 through %d, saw %d", n.Type.Size()*8-1, i) 1851 } 1852 return s.newValue1I(s.ssaRotateOp(n.Op, n.Type), a.Type, i, a) 1853 case OANDAND, OOROR: 1854 // To implement OANDAND (and OOROR), we introduce a 1855 // new temporary variable to hold the result. The 1856 // variable is associated with the OANDAND node in the 1857 // s.vars table (normally variables are only 1858 // associated with ONAME nodes). We convert 1859 // A && B 1860 // to 1861 // var = A 1862 // if var { 1863 // var = B 1864 // } 1865 // Using var in the subsequent block introduces the 1866 // necessary phi variable. 1867 el := s.expr(n.Left) 1868 s.vars[n] = el 1869 1870 b := s.endBlock() 1871 b.Kind = ssa.BlockIf 1872 b.SetControl(el) 1873 // In theory, we should set b.Likely here based on context. 1874 // However, gc only gives us likeliness hints 1875 // in a single place, for plain OIF statements, 1876 // and passing around context is finnicky, so don't bother for now. 1877 1878 bRight := s.f.NewBlock(ssa.BlockPlain) 1879 bResult := s.f.NewBlock(ssa.BlockPlain) 1880 if n.Op == OANDAND { 1881 b.AddEdgeTo(bRight) 1882 b.AddEdgeTo(bResult) 1883 } else if n.Op == OOROR { 1884 b.AddEdgeTo(bResult) 1885 b.AddEdgeTo(bRight) 1886 } 1887 1888 s.startBlock(bRight) 1889 er := s.expr(n.Right) 1890 s.vars[n] = er 1891 1892 b = s.endBlock() 1893 b.AddEdgeTo(bResult) 1894 1895 s.startBlock(bResult) 1896 return s.variable(n, Types[TBOOL]) 1897 case OCOMPLEX: 1898 r := s.expr(n.Left) 1899 i := s.expr(n.Right) 1900 return s.newValue2(ssa.OpComplexMake, n.Type, r, i) 1901 1902 // unary ops 1903 case OMINUS: 1904 a := s.expr(n.Left) 1905 if n.Type.IsComplex() { 1906 tp := floatForComplex(n.Type) 1907 negop := s.ssaOp(n.Op, tp) 1908 return s.newValue2(ssa.OpComplexMake, n.Type, 1909 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)), 1910 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a))) 1911 } 1912 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 1913 case ONOT, OCOM, OSQRT: 1914 a := s.expr(n.Left) 1915 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 1916 case OIMAG, OREAL: 1917 a := s.expr(n.Left) 1918 return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a) 1919 case OPLUS: 1920 return s.expr(n.Left) 1921 1922 case OADDR: 1923 a, _ := s.addr(n.Left, n.Bounded) 1924 // Note we know the volatile result is false because you can't write &f() in Go. 1925 return a 1926 1927 case OINDREGSP: 1928 addr := s.entryNewValue1I(ssa.OpOffPtr, ptrto(n.Type), n.Xoffset, s.sp) 1929 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1930 1931 case OIND: 1932 p := s.exprPtr(n.Left, false, n.Lineno) 1933 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1934 1935 case ODOT: 1936 t := n.Left.Type 1937 if canSSAType(t) { 1938 v := s.expr(n.Left) 1939 return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v) 1940 } 1941 p, _ := s.addr(n, false) 1942 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1943 1944 case ODOTPTR: 1945 p := s.exprPtr(n.Left, false, n.Lineno) 1946 p = s.newValue1I(ssa.OpOffPtr, p.Type, n.Xoffset, p) 1947 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1948 1949 case OINDEX: 1950 switch { 1951 case n.Left.Type.IsString(): 1952 if n.Bounded && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) { 1953 // Replace "abc"[1] with 'b'. 1954 // Delayed until now because "abc"[1] is not an ideal constant. 1955 // See test/fixedbugs/issue11370.go. 1956 return s.newValue0I(ssa.OpConst8, Types[TUINT8], int64(int8(n.Left.Val().U.(string)[n.Right.Int64()]))) 1957 } 1958 a := s.expr(n.Left) 1959 i := s.expr(n.Right) 1960 i = s.extendIndex(i, panicindex) 1961 if !n.Bounded { 1962 len := s.newValue1(ssa.OpStringLen, Types[TINT], a) 1963 s.boundsCheck(i, len) 1964 } 1965 ptrtyp := ptrto(Types[TUINT8]) 1966 ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a) 1967 if Isconst(n.Right, CTINT) { 1968 ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr) 1969 } else { 1970 ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i) 1971 } 1972 return s.newValue2(ssa.OpLoad, Types[TUINT8], ptr, s.mem()) 1973 case n.Left.Type.IsSlice(): 1974 p, _ := s.addr(n, false) 1975 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 1976 case n.Left.Type.IsArray(): 1977 if bound := n.Left.Type.NumElem(); bound <= 1 { 1978 // SSA can handle arrays of length at most 1. 1979 a := s.expr(n.Left) 1980 i := s.expr(n.Right) 1981 if bound == 0 { 1982 // Bounds check will never succeed. Might as well 1983 // use constants for the bounds check. 1984 z := s.constInt(Types[TINT], 0) 1985 s.boundsCheck(z, z) 1986 // The return value won't be live, return junk. 1987 return s.newValue0(ssa.OpUnknown, n.Type) 1988 } 1989 i = s.extendIndex(i, panicindex) 1990 s.boundsCheck(i, s.constInt(Types[TINT], bound)) 1991 return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a) 1992 } 1993 p, _ := s.addr(n, false) 1994 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 1995 default: 1996 s.Fatalf("bad type for index %v", n.Left.Type) 1997 return nil 1998 } 1999 2000 case OLEN, OCAP: 2001 switch { 2002 case n.Left.Type.IsSlice(): 2003 op := ssa.OpSliceLen 2004 if n.Op == OCAP { 2005 op = ssa.OpSliceCap 2006 } 2007 return s.newValue1(op, Types[TINT], s.expr(n.Left)) 2008 case n.Left.Type.IsString(): // string; not reachable for OCAP 2009 return s.newValue1(ssa.OpStringLen, Types[TINT], s.expr(n.Left)) 2010 case n.Left.Type.IsMap(), n.Left.Type.IsChan(): 2011 return s.referenceTypeBuiltin(n, s.expr(n.Left)) 2012 default: // array 2013 return s.constInt(Types[TINT], n.Left.Type.NumElem()) 2014 } 2015 2016 case OSPTR: 2017 a := s.expr(n.Left) 2018 if n.Left.Type.IsSlice() { 2019 return s.newValue1(ssa.OpSlicePtr, n.Type, a) 2020 } else { 2021 return s.newValue1(ssa.OpStringPtr, n.Type, a) 2022 } 2023 2024 case OITAB: 2025 a := s.expr(n.Left) 2026 return s.newValue1(ssa.OpITab, n.Type, a) 2027 2028 case OIDATA: 2029 a := s.expr(n.Left) 2030 return s.newValue1(ssa.OpIData, n.Type, a) 2031 2032 case OEFACE: 2033 tab := s.expr(n.Left) 2034 data := s.expr(n.Right) 2035 return s.newValue2(ssa.OpIMake, n.Type, tab, data) 2036 2037 case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR: 2038 v := s.expr(n.Left) 2039 var i, j, k *ssa.Value 2040 low, high, max := n.SliceBounds() 2041 if low != nil { 2042 i = s.extendIndex(s.expr(low), panicslice) 2043 } 2044 if high != nil { 2045 j = s.extendIndex(s.expr(high), panicslice) 2046 } 2047 if max != nil { 2048 k = s.extendIndex(s.expr(max), panicslice) 2049 } 2050 p, l, c := s.slice(n.Left.Type, v, i, j, k) 2051 return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c) 2052 2053 case OSLICESTR: 2054 v := s.expr(n.Left) 2055 var i, j *ssa.Value 2056 low, high, _ := n.SliceBounds() 2057 if low != nil { 2058 i = s.extendIndex(s.expr(low), panicslice) 2059 } 2060 if high != nil { 2061 j = s.extendIndex(s.expr(high), panicslice) 2062 } 2063 p, l, _ := s.slice(n.Left.Type, v, i, j, nil) 2064 return s.newValue2(ssa.OpStringMake, n.Type, p, l) 2065 2066 case OCALLFUNC: 2067 if isIntrinsicCall(n) { 2068 return s.intrinsicCall(n) 2069 } 2070 fallthrough 2071 2072 case OCALLINTER, OCALLMETH: 2073 a := s.call(n, callNormal) 2074 return s.newValue2(ssa.OpLoad, n.Type, a, s.mem()) 2075 2076 case OGETG: 2077 return s.newValue1(ssa.OpGetG, n.Type, s.mem()) 2078 2079 case OAPPEND: 2080 return s.append(n, false) 2081 2082 default: 2083 s.Fatalf("unhandled expr %v", n.Op) 2084 return nil 2085 } 2086 } 2087 2088 // append converts an OAPPEND node to SSA. 2089 // If inplace is false, it converts the OAPPEND expression n to an ssa.Value, 2090 // adds it to s, and returns the Value. 2091 // If inplace is true, it writes the result of the OAPPEND expression n 2092 // back to the slice being appended to, and returns nil. 2093 // inplace MUST be set to false if the slice can be SSA'd. 2094 func (s *state) append(n *Node, inplace bool) *ssa.Value { 2095 // If inplace is false, process as expression "append(s, e1, e2, e3)": 2096 // 2097 // ptr, len, cap := s 2098 // newlen := len + 3 2099 // if newlen > cap { 2100 // ptr, len, cap = growslice(s, newlen) 2101 // newlen = len + 3 // recalculate to avoid a spill 2102 // } 2103 // // with write barriers, if needed: 2104 // *(ptr+len) = e1 2105 // *(ptr+len+1) = e2 2106 // *(ptr+len+2) = e3 2107 // return makeslice(ptr, newlen, cap) 2108 // 2109 // 2110 // If inplace is true, process as statement "s = append(s, e1, e2, e3)": 2111 // 2112 // a := &s 2113 // ptr, len, cap := s 2114 // newlen := len + 3 2115 // if newlen > cap { 2116 // newptr, len, newcap = growslice(ptr, len, cap, newlen) 2117 // vardef(a) // if necessary, advise liveness we are writing a new a 2118 // *a.cap = newcap // write before ptr to avoid a spill 2119 // *a.ptr = newptr // with write barrier 2120 // } 2121 // newlen = len + 3 // recalculate to avoid a spill 2122 // *a.len = newlen 2123 // // with write barriers, if needed: 2124 // *(ptr+len) = e1 2125 // *(ptr+len+1) = e2 2126 // *(ptr+len+2) = e3 2127 2128 et := n.Type.Elem() 2129 pt := ptrto(et) 2130 2131 // Evaluate slice 2132 sn := n.List.First() // the slice node is the first in the list 2133 2134 var slice, addr *ssa.Value 2135 if inplace { 2136 addr, _ = s.addr(sn, false) 2137 slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 2138 } else { 2139 slice = s.expr(sn) 2140 } 2141 2142 // Allocate new blocks 2143 grow := s.f.NewBlock(ssa.BlockPlain) 2144 assign := s.f.NewBlock(ssa.BlockPlain) 2145 2146 // Decide if we need to grow 2147 nargs := int64(n.List.Len() - 1) 2148 p := s.newValue1(ssa.OpSlicePtr, pt, slice) 2149 l := s.newValue1(ssa.OpSliceLen, Types[TINT], slice) 2150 c := s.newValue1(ssa.OpSliceCap, Types[TINT], slice) 2151 nl := s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs)) 2152 2153 cmp := s.newValue2(s.ssaOp(OGT, Types[TINT]), Types[TBOOL], nl, c) 2154 s.vars[&ptrVar] = p 2155 2156 if !inplace { 2157 s.vars[&newlenVar] = nl 2158 s.vars[&capVar] = c 2159 } else { 2160 s.vars[&lenVar] = l 2161 } 2162 2163 b := s.endBlock() 2164 b.Kind = ssa.BlockIf 2165 b.Likely = ssa.BranchUnlikely 2166 b.SetControl(cmp) 2167 b.AddEdgeTo(grow) 2168 b.AddEdgeTo(assign) 2169 2170 // Call growslice 2171 s.startBlock(grow) 2172 taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(n.Type.Elem())}, s.sb) 2173 2174 r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl) 2175 2176 if inplace { 2177 if sn.Op == ONAME { 2178 // Tell liveness we're about to build a new slice 2179 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, sn, s.mem()) 2180 } 2181 capaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(array_cap), addr) 2182 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capaddr, r[2], s.mem()) 2183 if ssa.IsStackAddr(addr) { 2184 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, pt.Size(), addr, r[0], s.mem()) 2185 } else { 2186 s.insertWBstore(pt, addr, r[0], n.Lineno, 0) 2187 } 2188 // load the value we just stored to avoid having to spill it 2189 s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem()) 2190 s.vars[&lenVar] = r[1] // avoid a spill in the fast path 2191 } else { 2192 s.vars[&ptrVar] = r[0] 2193 s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], r[1], s.constInt(Types[TINT], nargs)) 2194 s.vars[&capVar] = r[2] 2195 } 2196 2197 b = s.endBlock() 2198 b.AddEdgeTo(assign) 2199 2200 // assign new elements to slots 2201 s.startBlock(assign) 2202 2203 if inplace { 2204 l = s.variable(&lenVar, Types[TINT]) // generates phi for len 2205 nl = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs)) 2206 lenaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(array_nel), addr) 2207 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenaddr, nl, s.mem()) 2208 } 2209 2210 // Evaluate args 2211 type argRec struct { 2212 // if store is true, we're appending the value v. If false, we're appending the 2213 // value at *v. If store==false, isVolatile reports whether the source 2214 // is in the outargs section of the stack frame. 2215 v *ssa.Value 2216 store bool 2217 isVolatile bool 2218 } 2219 args := make([]argRec, 0, nargs) 2220 for _, n := range n.List.Slice()[1:] { 2221 if canSSAType(n.Type) { 2222 args = append(args, argRec{v: s.expr(n), store: true}) 2223 } else { 2224 v, isVolatile := s.addr(n, false) 2225 args = append(args, argRec{v: v, isVolatile: isVolatile}) 2226 } 2227 } 2228 2229 p = s.variable(&ptrVar, pt) // generates phi for ptr 2230 if !inplace { 2231 nl = s.variable(&newlenVar, Types[TINT]) // generates phi for nl 2232 c = s.variable(&capVar, Types[TINT]) // generates phi for cap 2233 } 2234 p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l) 2235 // TODO: just one write barrier call for all of these writes? 2236 // TODO: maybe just one writeBarrier.enabled check? 2237 for i, arg := range args { 2238 addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(Types[TINT], int64(i))) 2239 if arg.store { 2240 if haspointers(et) { 2241 s.insertWBstore(et, addr, arg.v, n.Lineno, 0) 2242 } else { 2243 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, et.Size(), addr, arg.v, s.mem()) 2244 } 2245 } else { 2246 if haspointers(et) { 2247 s.insertWBmove(et, addr, arg.v, n.Lineno, arg.isVolatile) 2248 } else { 2249 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, sizeAlignAuxInt(et), addr, arg.v, s.mem()) 2250 } 2251 } 2252 } 2253 2254 delete(s.vars, &ptrVar) 2255 if inplace { 2256 delete(s.vars, &lenVar) 2257 return nil 2258 } 2259 delete(s.vars, &newlenVar) 2260 delete(s.vars, &capVar) 2261 // make result 2262 return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c) 2263 } 2264 2265 // condBranch evaluates the boolean expression cond and branches to yes 2266 // if cond is true and no if cond is false. 2267 // This function is intended to handle && and || better than just calling 2268 // s.expr(cond) and branching on the result. 2269 func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) { 2270 if cond.Op == OANDAND { 2271 mid := s.f.NewBlock(ssa.BlockPlain) 2272 s.stmtList(cond.Ninit) 2273 s.condBranch(cond.Left, mid, no, max8(likely, 0)) 2274 s.startBlock(mid) 2275 s.condBranch(cond.Right, yes, no, likely) 2276 return 2277 // Note: if likely==1, then both recursive calls pass 1. 2278 // If likely==-1, then we don't have enough information to decide 2279 // whether the first branch is likely or not. So we pass 0 for 2280 // the likeliness of the first branch. 2281 // TODO: have the frontend give us branch prediction hints for 2282 // OANDAND and OOROR nodes (if it ever has such info). 2283 } 2284 if cond.Op == OOROR { 2285 mid := s.f.NewBlock(ssa.BlockPlain) 2286 s.stmtList(cond.Ninit) 2287 s.condBranch(cond.Left, yes, mid, min8(likely, 0)) 2288 s.startBlock(mid) 2289 s.condBranch(cond.Right, yes, no, likely) 2290 return 2291 // Note: if likely==-1, then both recursive calls pass -1. 2292 // If likely==1, then we don't have enough info to decide 2293 // the likelihood of the first branch. 2294 } 2295 if cond.Op == ONOT { 2296 s.stmtList(cond.Ninit) 2297 s.condBranch(cond.Left, no, yes, -likely) 2298 return 2299 } 2300 c := s.expr(cond) 2301 b := s.endBlock() 2302 b.Kind = ssa.BlockIf 2303 b.SetControl(c) 2304 b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness 2305 b.AddEdgeTo(yes) 2306 b.AddEdgeTo(no) 2307 } 2308 2309 type skipMask uint8 2310 2311 const ( 2312 skipPtr skipMask = 1 << iota 2313 skipLen 2314 skipCap 2315 ) 2316 2317 // assign does left = right. 2318 // Right has already been evaluated to ssa, left has not. 2319 // If deref is true, then we do left = *right instead (and right has already been nil-checked). 2320 // If deref is true and right == nil, just do left = 0. 2321 // If deref is true, rightIsVolatile reports whether right points to volatile (clobbered by a call) storage. 2322 // Include a write barrier if wb is true. 2323 // skip indicates assignments (at the top level) that can be avoided. 2324 func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, line int32, skip skipMask, rightIsVolatile bool) { 2325 if left.Op == ONAME && isblank(left) { 2326 return 2327 } 2328 t := left.Type 2329 dowidth(t) 2330 if s.canSSA(left) { 2331 if deref { 2332 s.Fatalf("can SSA LHS %v but not RHS %s", left, right) 2333 } 2334 if left.Op == ODOT { 2335 // We're assigning to a field of an ssa-able value. 2336 // We need to build a new structure with the new value for the 2337 // field we're assigning and the old values for the other fields. 2338 // For instance: 2339 // type T struct {a, b, c int} 2340 // var T x 2341 // x.b = 5 2342 // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c} 2343 2344 // Grab information about the structure type. 2345 t := left.Left.Type 2346 nf := t.NumFields() 2347 idx := fieldIdx(left) 2348 2349 // Grab old value of structure. 2350 old := s.expr(left.Left) 2351 2352 // Make new structure. 2353 new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t) 2354 2355 // Add fields as args. 2356 for i := 0; i < nf; i++ { 2357 if i == idx { 2358 new.AddArg(right) 2359 } else { 2360 new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old)) 2361 } 2362 } 2363 2364 // Recursively assign the new value we've made to the base of the dot op. 2365 s.assign(left.Left, new, false, false, line, 0, rightIsVolatile) 2366 // TODO: do we need to update named values here? 2367 return 2368 } 2369 if left.Op == OINDEX && left.Left.Type.IsArray() { 2370 // We're assigning to an element of an ssa-able array. 2371 // a[i] = v 2372 t := left.Left.Type 2373 n := t.NumElem() 2374 2375 i := s.expr(left.Right) // index 2376 if n == 0 { 2377 // The bounds check must fail. Might as well 2378 // ignore the actual index and just use zeros. 2379 z := s.constInt(Types[TINT], 0) 2380 s.boundsCheck(z, z) 2381 return 2382 } 2383 if n != 1 { 2384 s.Fatalf("assigning to non-1-length array") 2385 } 2386 // Rewrite to a = [1]{v} 2387 i = s.extendIndex(i, panicindex) 2388 s.boundsCheck(i, s.constInt(Types[TINT], 1)) 2389 v := s.newValue1(ssa.OpArrayMake1, t, right) 2390 s.assign(left.Left, v, false, false, line, 0, rightIsVolatile) 2391 return 2392 } 2393 // Update variable assignment. 2394 s.vars[left] = right 2395 s.addNamedValue(left, right) 2396 return 2397 } 2398 // Left is not ssa-able. Compute its address. 2399 addr, _ := s.addr(left, false) 2400 if left.Op == ONAME && skip == 0 { 2401 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, left, s.mem()) 2402 } 2403 if deref { 2404 // Treat as a mem->mem move. 2405 if wb && !ssa.IsStackAddr(addr) { 2406 s.insertWBmove(t, addr, right, line, rightIsVolatile) 2407 return 2408 } 2409 if right == nil { 2410 s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, sizeAlignAuxInt(t), addr, s.mem()) 2411 return 2412 } 2413 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, sizeAlignAuxInt(t), addr, right, s.mem()) 2414 return 2415 } 2416 // Treat as a store. 2417 if wb && !ssa.IsStackAddr(addr) { 2418 if skip&skipPtr != 0 { 2419 // Special case: if we don't write back the pointers, don't bother 2420 // doing the write barrier check. 2421 s.storeTypeScalars(t, addr, right, skip) 2422 return 2423 } 2424 s.insertWBstore(t, addr, right, line, skip) 2425 return 2426 } 2427 if skip != 0 { 2428 if skip&skipPtr == 0 { 2429 s.storeTypePtrs(t, addr, right) 2430 } 2431 s.storeTypeScalars(t, addr, right, skip) 2432 return 2433 } 2434 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), addr, right, s.mem()) 2435 } 2436 2437 // zeroVal returns the zero value for type t. 2438 func (s *state) zeroVal(t *Type) *ssa.Value { 2439 switch { 2440 case t.IsInteger(): 2441 switch t.Size() { 2442 case 1: 2443 return s.constInt8(t, 0) 2444 case 2: 2445 return s.constInt16(t, 0) 2446 case 4: 2447 return s.constInt32(t, 0) 2448 case 8: 2449 return s.constInt64(t, 0) 2450 default: 2451 s.Fatalf("bad sized integer type %v", t) 2452 } 2453 case t.IsFloat(): 2454 switch t.Size() { 2455 case 4: 2456 return s.constFloat32(t, 0) 2457 case 8: 2458 return s.constFloat64(t, 0) 2459 default: 2460 s.Fatalf("bad sized float type %v", t) 2461 } 2462 case t.IsComplex(): 2463 switch t.Size() { 2464 case 8: 2465 z := s.constFloat32(Types[TFLOAT32], 0) 2466 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2467 case 16: 2468 z := s.constFloat64(Types[TFLOAT64], 0) 2469 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2470 default: 2471 s.Fatalf("bad sized complex type %v", t) 2472 } 2473 2474 case t.IsString(): 2475 return s.constEmptyString(t) 2476 case t.IsPtrShaped(): 2477 return s.constNil(t) 2478 case t.IsBoolean(): 2479 return s.constBool(false) 2480 case t.IsInterface(): 2481 return s.constInterface(t) 2482 case t.IsSlice(): 2483 return s.constSlice(t) 2484 case t.IsStruct(): 2485 n := t.NumFields() 2486 v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t) 2487 for i := 0; i < n; i++ { 2488 v.AddArg(s.zeroVal(t.FieldType(i).(*Type))) 2489 } 2490 return v 2491 case t.IsArray(): 2492 switch t.NumElem() { 2493 case 0: 2494 return s.entryNewValue0(ssa.OpArrayMake0, t) 2495 case 1: 2496 return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem())) 2497 } 2498 } 2499 s.Fatalf("zero for type %v not implemented", t) 2500 return nil 2501 } 2502 2503 type callKind int8 2504 2505 const ( 2506 callNormal callKind = iota 2507 callDefer 2508 callGo 2509 ) 2510 2511 // TODO: make this a field of a configuration object instead of a global. 2512 var intrinsics *intrinsicInfo 2513 2514 type intrinsicInfo struct { 2515 std map[intrinsicKey]intrinsicBuilder 2516 intSized map[sizedIntrinsicKey]intrinsicBuilder 2517 ptrSized map[sizedIntrinsicKey]intrinsicBuilder 2518 } 2519 2520 // An intrinsicBuilder converts a call node n into an ssa value that 2521 // implements that call as an intrinsic. 2522 type intrinsicBuilder func(s *state, n *Node) *ssa.Value 2523 2524 type intrinsicKey struct { 2525 pkg string 2526 fn string 2527 } 2528 2529 type sizedIntrinsicKey struct { 2530 pkg string 2531 fn string 2532 size int 2533 } 2534 2535 // disableForInstrumenting returns nil when instrumenting, fn otherwise 2536 func disableForInstrumenting(fn intrinsicBuilder) intrinsicBuilder { 2537 if instrumenting { 2538 return nil 2539 } 2540 return fn 2541 } 2542 2543 // enableOnArch returns fn on given archs, nil otherwise 2544 func enableOnArch(fn intrinsicBuilder, archs ...sys.ArchFamily) intrinsicBuilder { 2545 if Thearch.LinkArch.InFamily(archs...) { 2546 return fn 2547 } 2548 return nil 2549 } 2550 2551 func intrinsicInit() { 2552 i := &intrinsicInfo{} 2553 intrinsics = i 2554 2555 // initial set of intrinsics. 2556 i.std = map[intrinsicKey]intrinsicBuilder{ 2557 /******** runtime ********/ 2558 intrinsicKey{"runtime", "slicebytetostringtmp"}: disableForInstrumenting(func(s *state, n *Node) *ssa.Value { 2559 // Compiler frontend optimizations emit OARRAYBYTESTRTMP nodes 2560 // for the backend instead of slicebytetostringtmp calls 2561 // when not instrumenting. 2562 slice := s.intrinsicFirstArg(n) 2563 ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), slice) 2564 len := s.newValue1(ssa.OpSliceLen, Types[TINT], slice) 2565 return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) 2566 }), 2567 intrinsicKey{"runtime", "KeepAlive"}: func(s *state, n *Node) *ssa.Value { 2568 data := s.newValue1(ssa.OpIData, ptrto(Types[TUINT8]), s.intrinsicFirstArg(n)) 2569 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, data, s.mem()) 2570 return nil 2571 }, 2572 2573 /******** runtime/internal/sys ********/ 2574 intrinsicKey{"runtime/internal/sys", "Ctz32"}: enableOnArch(func(s *state, n *Node) *ssa.Value { 2575 return s.newValue1(ssa.OpCtz32, Types[TUINT32], s.intrinsicFirstArg(n)) 2576 }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X), 2577 intrinsicKey{"runtime/internal/sys", "Ctz64"}: enableOnArch(func(s *state, n *Node) *ssa.Value { 2578 return s.newValue1(ssa.OpCtz64, Types[TUINT64], s.intrinsicFirstArg(n)) 2579 }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X), 2580 intrinsicKey{"runtime/internal/sys", "Bswap32"}: enableOnArch(func(s *state, n *Node) *ssa.Value { 2581 return s.newValue1(ssa.OpBswap32, Types[TUINT32], s.intrinsicFirstArg(n)) 2582 }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X), 2583 intrinsicKey{"runtime/internal/sys", "Bswap64"}: enableOnArch(func(s *state, n *Node) *ssa.Value { 2584 return s.newValue1(ssa.OpBswap64, Types[TUINT64], s.intrinsicFirstArg(n)) 2585 }, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X), 2586 2587 /******** runtime/internal/atomic ********/ 2588 intrinsicKey{"runtime/internal/atomic", "Load"}: enableOnArch(func(s *state, n *Node) *ssa.Value { 2589 v := s.newValue2(ssa.OpAtomicLoad32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), s.intrinsicArg(n, 0), s.mem()) 2590 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2591 return s.newValue1(ssa.OpSelect0, Types[TUINT32], v) 2592 }, sys.AMD64, sys.ARM64, sys.S390X), 2593 intrinsicKey{"runtime/internal/atomic", "Load64"}: enableOnArch(func(s *state, n *Node) *ssa.Value { 2594 v := s.newValue2(ssa.OpAtomicLoad64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), s.intrinsicArg(n, 0), s.mem()) 2595 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2596 return s.newValue1(ssa.OpSelect0, Types[TUINT64], v) 2597 }, sys.AMD64, sys.ARM64, sys.S390X), 2598 intrinsicKey{"runtime/internal/atomic", "Loadp"}: enableOnArch(func(s *state, n *Node) *ssa.Value { 2599 v := s.newValue2(ssa.OpAtomicLoadPtr, ssa.MakeTuple(ptrto(Types[TUINT8]), ssa.TypeMem), s.intrinsicArg(n, 0), s.mem()) 2600 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2601 return s.newValue1(ssa.OpSelect0, ptrto(Types[TUINT8]), v) 2602 }, sys.AMD64, sys.ARM64, sys.S390X), 2603 2604 intrinsicKey{"runtime/internal/atomic", "Store"}: enableOnArch(func(s *state, n *Node) *ssa.Value { 2605 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem()) 2606 return nil 2607 }, sys.AMD64, sys.ARM64, sys.S390X), 2608 intrinsicKey{"runtime/internal/atomic", "Store64"}: enableOnArch(func(s *state, n *Node) *ssa.Value { 2609 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem()) 2610 return nil 2611 }, sys.AMD64, sys.ARM64, sys.S390X), 2612 intrinsicKey{"runtime/internal/atomic", "StorepNoWB"}: enableOnArch(func(s *state, n *Node) *ssa.Value { 2613 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem()) 2614 return nil 2615 }, sys.AMD64, sys.ARM64, sys.S390X), 2616 2617 intrinsicKey{"runtime/internal/atomic", "Xchg"}: enableOnArch(func(s *state, n *Node) *ssa.Value { 2618 v := s.newValue3(ssa.OpAtomicExchange32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem()) 2619 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2620 return s.newValue1(ssa.OpSelect0, Types[TUINT32], v) 2621 }, sys.AMD64, sys.ARM64, sys.S390X), 2622 intrinsicKey{"runtime/internal/atomic", "Xchg64"}: enableOnArch(func(s *state, n *Node) *ssa.Value { 2623 v := s.newValue3(ssa.OpAtomicExchange64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem()) 2624 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2625 return s.newValue1(ssa.OpSelect0, Types[TUINT64], v) 2626 }, sys.AMD64, sys.ARM64, sys.S390X), 2627 2628 intrinsicKey{"runtime/internal/atomic", "Xadd"}: enableOnArch(func(s *state, n *Node) *ssa.Value { 2629 v := s.newValue3(ssa.OpAtomicAdd32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem()) 2630 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2631 return s.newValue1(ssa.OpSelect0, Types[TUINT32], v) 2632 }, sys.AMD64, sys.ARM64, sys.S390X), 2633 intrinsicKey{"runtime/internal/atomic", "Xadd64"}: enableOnArch(func(s *state, n *Node) *ssa.Value { 2634 v := s.newValue3(ssa.OpAtomicAdd64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem()) 2635 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2636 return s.newValue1(ssa.OpSelect0, Types[TUINT64], v) 2637 }, sys.AMD64, sys.ARM64, sys.S390X), 2638 2639 intrinsicKey{"runtime/internal/atomic", "Cas"}: enableOnArch(func(s *state, n *Node) *ssa.Value { 2640 v := s.newValue4(ssa.OpAtomicCompareAndSwap32, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.intrinsicArg(n, 2), s.mem()) 2641 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2642 return s.newValue1(ssa.OpSelect0, Types[TBOOL], v) 2643 }, sys.AMD64, sys.ARM64, sys.S390X), 2644 intrinsicKey{"runtime/internal/atomic", "Cas64"}: enableOnArch(func(s *state, n *Node) *ssa.Value { 2645 v := s.newValue4(ssa.OpAtomicCompareAndSwap64, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.intrinsicArg(n, 2), s.mem()) 2646 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2647 return s.newValue1(ssa.OpSelect0, Types[TBOOL], v) 2648 }, sys.AMD64, sys.ARM64, sys.S390X), 2649 2650 intrinsicKey{"runtime/internal/atomic", "And8"}: enableOnArch(func(s *state, n *Node) *ssa.Value { 2651 s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem()) 2652 return nil 2653 }, sys.AMD64, sys.ARM64), 2654 intrinsicKey{"runtime/internal/atomic", "Or8"}: enableOnArch(func(s *state, n *Node) *ssa.Value { 2655 s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem()) 2656 return nil 2657 }, sys.AMD64, sys.ARM64), 2658 } 2659 2660 // aliases internal to runtime/internal/atomic 2661 i.std[intrinsicKey{"runtime/internal/atomic", "Loadint64"}] = 2662 i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}] 2663 i.std[intrinsicKey{"runtime/internal/atomic", "Xaddint64"}] = 2664 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}] 2665 2666 // intrinsics which vary depending on the size of int/ptr. 2667 i.intSized = map[sizedIntrinsicKey]intrinsicBuilder{ 2668 sizedIntrinsicKey{"runtime/internal/atomic", "Loaduint", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Load"}], 2669 sizedIntrinsicKey{"runtime/internal/atomic", "Loaduint", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}], 2670 } 2671 i.ptrSized = map[sizedIntrinsicKey]intrinsicBuilder{ 2672 sizedIntrinsicKey{"runtime/internal/atomic", "Loaduintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Load"}], 2673 sizedIntrinsicKey{"runtime/internal/atomic", "Loaduintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}], 2674 sizedIntrinsicKey{"runtime/internal/atomic", "Storeuintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Store"}], 2675 sizedIntrinsicKey{"runtime/internal/atomic", "Storeuintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Store64"}], 2676 sizedIntrinsicKey{"runtime/internal/atomic", "Xchguintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Xchg"}], 2677 sizedIntrinsicKey{"runtime/internal/atomic", "Xchguintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Xchg64"}], 2678 sizedIntrinsicKey{"runtime/internal/atomic", "Xadduintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Xadd"}], 2679 sizedIntrinsicKey{"runtime/internal/atomic", "Xadduintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}], 2680 sizedIntrinsicKey{"runtime/internal/atomic", "Casuintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}], 2681 sizedIntrinsicKey{"runtime/internal/atomic", "Casuintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}], 2682 sizedIntrinsicKey{"runtime/internal/atomic", "Casp1", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}], 2683 sizedIntrinsicKey{"runtime/internal/atomic", "Casp1", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}], 2684 } 2685 2686 /******** sync/atomic ********/ 2687 if flag_race { 2688 // The race detector needs to be able to intercept these calls. 2689 // We can't intrinsify them. 2690 return 2691 } 2692 // these are all aliases to runtime/internal/atomic implementations. 2693 i.std[intrinsicKey{"sync/atomic", "LoadInt32"}] = 2694 i.std[intrinsicKey{"runtime/internal/atomic", "Load"}] 2695 i.std[intrinsicKey{"sync/atomic", "LoadInt64"}] = 2696 i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}] 2697 i.std[intrinsicKey{"sync/atomic", "LoadPointer"}] = 2698 i.std[intrinsicKey{"runtime/internal/atomic", "Loadp"}] 2699 i.std[intrinsicKey{"sync/atomic", "LoadUint32"}] = 2700 i.std[intrinsicKey{"runtime/internal/atomic", "Load"}] 2701 i.std[intrinsicKey{"sync/atomic", "LoadUint64"}] = 2702 i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}] 2703 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "LoadUintptr", 4}] = 2704 i.std[intrinsicKey{"runtime/internal/atomic", "Load"}] 2705 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "LoadUintptr", 8}] = 2706 i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}] 2707 2708 i.std[intrinsicKey{"sync/atomic", "StoreInt32"}] = 2709 i.std[intrinsicKey{"runtime/internal/atomic", "Store"}] 2710 i.std[intrinsicKey{"sync/atomic", "StoreInt64"}] = 2711 i.std[intrinsicKey{"runtime/internal/atomic", "Store64"}] 2712 // Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap. 2713 i.std[intrinsicKey{"sync/atomic", "StoreUint32"}] = 2714 i.std[intrinsicKey{"runtime/internal/atomic", "Store"}] 2715 i.std[intrinsicKey{"sync/atomic", "StoreUint64"}] = 2716 i.std[intrinsicKey{"runtime/internal/atomic", "Store64"}] 2717 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "StoreUintptr", 4}] = 2718 i.std[intrinsicKey{"runtime/internal/atomic", "Store"}] 2719 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "StoreUintptr", 8}] = 2720 i.std[intrinsicKey{"runtime/internal/atomic", "Store64"}] 2721 2722 i.std[intrinsicKey{"sync/atomic", "SwapInt32"}] = 2723 i.std[intrinsicKey{"runtime/internal/atomic", "Xchg"}] 2724 i.std[intrinsicKey{"sync/atomic", "SwapInt64"}] = 2725 i.std[intrinsicKey{"runtime/internal/atomic", "Xchg64"}] 2726 i.std[intrinsicKey{"sync/atomic", "SwapUint32"}] = 2727 i.std[intrinsicKey{"runtime/internal/atomic", "Xchg"}] 2728 i.std[intrinsicKey{"sync/atomic", "SwapUint64"}] = 2729 i.std[intrinsicKey{"runtime/internal/atomic", "Xchg64"}] 2730 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "SwapUintptr", 4}] = 2731 i.std[intrinsicKey{"runtime/internal/atomic", "Xchg"}] 2732 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "SwapUintptr", 8}] = 2733 i.std[intrinsicKey{"runtime/internal/atomic", "Xchg64"}] 2734 2735 i.std[intrinsicKey{"sync/atomic", "CompareAndSwapInt32"}] = 2736 i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}] 2737 i.std[intrinsicKey{"sync/atomic", "CompareAndSwapInt64"}] = 2738 i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}] 2739 i.std[intrinsicKey{"sync/atomic", "CompareAndSwapUint32"}] = 2740 i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}] 2741 i.std[intrinsicKey{"sync/atomic", "CompareAndSwapUint64"}] = 2742 i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}] 2743 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "CompareAndSwapUintptr", 4}] = 2744 i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}] 2745 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "CompareAndSwapUintptr", 8}] = 2746 i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}] 2747 2748 i.std[intrinsicKey{"sync/atomic", "AddInt32"}] = 2749 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd"}] 2750 i.std[intrinsicKey{"sync/atomic", "AddInt64"}] = 2751 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}] 2752 i.std[intrinsicKey{"sync/atomic", "AddUint32"}] = 2753 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd"}] 2754 i.std[intrinsicKey{"sync/atomic", "AddUint64"}] = 2755 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}] 2756 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "AddUintptr", 4}] = 2757 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd"}] 2758 i.ptrSized[sizedIntrinsicKey{"sync/atomic", "AddUintptr", 8}] = 2759 i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}] 2760 2761 /******** math/big ********/ 2762 i.intSized[sizedIntrinsicKey{"math/big", "mulWW", 8}] = 2763 enableOnArch(func(s *state, n *Node) *ssa.Value { 2764 return s.newValue2(ssa.OpMul64uhilo, ssa.MakeTuple(Types[TUINT64], Types[TUINT64]), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1)) 2765 }, sys.AMD64) 2766 i.intSized[sizedIntrinsicKey{"math/big", "divWW", 8}] = 2767 enableOnArch(func(s *state, n *Node) *ssa.Value { 2768 return s.newValue3(ssa.OpDiv128u, ssa.MakeTuple(Types[TUINT64], Types[TUINT64]), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.intrinsicArg(n, 2)) 2769 }, sys.AMD64) 2770 } 2771 2772 // findIntrinsic returns a function which builds the SSA equivalent of the 2773 // function identified by the symbol sym. If sym is not an intrinsic call, returns nil. 2774 func findIntrinsic(sym *Sym) intrinsicBuilder { 2775 if ssa.IntrinsicsDisable { 2776 return nil 2777 } 2778 if sym == nil || sym.Pkg == nil { 2779 return nil 2780 } 2781 if intrinsics == nil { 2782 intrinsicInit() 2783 } 2784 pkg := sym.Pkg.Path 2785 if sym.Pkg == localpkg { 2786 pkg = myimportpath 2787 } 2788 fn := sym.Name 2789 f := intrinsics.std[intrinsicKey{pkg, fn}] 2790 if f != nil { 2791 return f 2792 } 2793 f = intrinsics.intSized[sizedIntrinsicKey{pkg, fn, Widthint}] 2794 if f != nil { 2795 return f 2796 } 2797 return intrinsics.ptrSized[sizedIntrinsicKey{pkg, fn, Widthptr}] 2798 } 2799 2800 func isIntrinsicCall(n *Node) bool { 2801 if n == nil || n.Left == nil { 2802 return false 2803 } 2804 return findIntrinsic(n.Left.Sym) != nil 2805 } 2806 2807 // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation. 2808 func (s *state) intrinsicCall(n *Node) *ssa.Value { 2809 v := findIntrinsic(n.Left.Sym)(s, n) 2810 if ssa.IntrinsicsDebug > 0 { 2811 x := v 2812 if x == nil { 2813 x = s.mem() 2814 } 2815 if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 { 2816 x = x.Args[0] 2817 } 2818 Warnl(n.Lineno, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString()) 2819 } 2820 return v 2821 } 2822 2823 // intrinsicArg extracts the ith arg from n.List and returns its value. 2824 func (s *state) intrinsicArg(n *Node, i int) *ssa.Value { 2825 x := n.List.Slice()[i] 2826 if x.Op == OAS { 2827 x = x.Right 2828 } 2829 return s.expr(x) 2830 } 2831 func (s *state) intrinsicFirstArg(n *Node) *ssa.Value { 2832 return s.intrinsicArg(n, 0) 2833 } 2834 2835 // Calls the function n using the specified call type. 2836 // Returns the address of the return value (or nil if none). 2837 func (s *state) call(n *Node, k callKind) *ssa.Value { 2838 var sym *Sym // target symbol (if static) 2839 var closure *ssa.Value // ptr to closure to run (if dynamic) 2840 var codeptr *ssa.Value // ptr to target code (if dynamic) 2841 var rcvr *ssa.Value // receiver to set 2842 fn := n.Left 2843 switch n.Op { 2844 case OCALLFUNC: 2845 if k == callNormal && fn.Op == ONAME && fn.Class == PFUNC { 2846 sym = fn.Sym 2847 break 2848 } 2849 closure = s.expr(fn) 2850 case OCALLMETH: 2851 if fn.Op != ODOTMETH { 2852 Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) 2853 } 2854 if k == callNormal { 2855 sym = fn.Sym 2856 break 2857 } 2858 // Make a name n2 for the function. 2859 // fn.Sym might be sync.(*Mutex).Unlock. 2860 // Make a PFUNC node out of that, then evaluate it. 2861 // We get back an SSA value representing &sync.(*Mutex).Unlock·f. 2862 // We can then pass that to defer or go. 2863 n2 := newname(fn.Sym) 2864 n2.Class = PFUNC 2865 n2.Lineno = fn.Lineno 2866 n2.Type = Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it. 2867 closure = s.expr(n2) 2868 // Note: receiver is already assigned in n.List, so we don't 2869 // want to set it here. 2870 case OCALLINTER: 2871 if fn.Op != ODOTINTER { 2872 Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op) 2873 } 2874 i := s.expr(fn.Left) 2875 itab := s.newValue1(ssa.OpITab, Types[TUINTPTR], i) 2876 if k != callNormal { 2877 s.nilCheck(itab) 2878 } 2879 itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab 2880 itab = s.newValue1I(ssa.OpOffPtr, ptrto(Types[TUINTPTR]), itabidx, itab) 2881 if k == callNormal { 2882 codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], itab, s.mem()) 2883 } else { 2884 closure = itab 2885 } 2886 rcvr = s.newValue1(ssa.OpIData, Types[TUINTPTR], i) 2887 } 2888 dowidth(fn.Type) 2889 stksize := fn.Type.ArgWidth() // includes receiver 2890 2891 // Run all argument assignments. The arg slots have already 2892 // been offset by the appropriate amount (+2*widthptr for go/defer, 2893 // +widthptr for interface calls). 2894 // For OCALLMETH, the receiver is set in these statements. 2895 s.stmtList(n.List) 2896 2897 // Set receiver (for interface calls) 2898 if rcvr != nil { 2899 argStart := Ctxt.FixedFrameSize() 2900 if k != callNormal { 2901 argStart += int64(2 * Widthptr) 2902 } 2903 addr := s.entryNewValue1I(ssa.OpOffPtr, ptrto(Types[TUINTPTR]), argStart, s.sp) 2904 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, rcvr, s.mem()) 2905 } 2906 2907 // Defer/go args 2908 if k != callNormal { 2909 // Write argsize and closure (args to Newproc/Deferproc). 2910 argStart := Ctxt.FixedFrameSize() 2911 argsize := s.constInt32(Types[TUINT32], int32(stksize)) 2912 addr := s.entryNewValue1I(ssa.OpOffPtr, ptrto(Types[TUINT32]), argStart, s.sp) 2913 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, 4, addr, argsize, s.mem()) 2914 addr = s.entryNewValue1I(ssa.OpOffPtr, ptrto(Types[TUINTPTR]), argStart+int64(Widthptr), s.sp) 2915 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, closure, s.mem()) 2916 stksize += 2 * int64(Widthptr) 2917 } 2918 2919 // call target 2920 var call *ssa.Value 2921 switch { 2922 case k == callDefer: 2923 call = s.newValue1(ssa.OpDeferCall, ssa.TypeMem, s.mem()) 2924 case k == callGo: 2925 call = s.newValue1(ssa.OpGoCall, ssa.TypeMem, s.mem()) 2926 case closure != nil: 2927 codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], closure, s.mem()) 2928 call = s.newValue3(ssa.OpClosureCall, ssa.TypeMem, codeptr, closure, s.mem()) 2929 case codeptr != nil: 2930 call = s.newValue2(ssa.OpInterCall, ssa.TypeMem, codeptr, s.mem()) 2931 case sym != nil: 2932 call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, sym, s.mem()) 2933 default: 2934 Fatalf("bad call type %v %v", n.Op, n) 2935 } 2936 call.AuxInt = stksize // Call operations carry the argsize of the callee along with them 2937 s.vars[&memVar] = call 2938 2939 // Finish block for defers 2940 if k == callDefer { 2941 b := s.endBlock() 2942 b.Kind = ssa.BlockDefer 2943 b.SetControl(call) 2944 bNext := s.f.NewBlock(ssa.BlockPlain) 2945 b.AddEdgeTo(bNext) 2946 // Add recover edge to exit code. 2947 r := s.f.NewBlock(ssa.BlockPlain) 2948 s.startBlock(r) 2949 s.exit() 2950 b.AddEdgeTo(r) 2951 b.Likely = ssa.BranchLikely 2952 s.startBlock(bNext) 2953 } 2954 2955 res := n.Left.Type.Results() 2956 if res.NumFields() == 0 || k != callNormal { 2957 // call has no return value. Continue with the next statement. 2958 return nil 2959 } 2960 fp := res.Field(0) 2961 return s.entryNewValue1I(ssa.OpOffPtr, ptrto(fp.Type), fp.Offset+Ctxt.FixedFrameSize(), s.sp) 2962 } 2963 2964 // etypesign returns the signed-ness of e, for integer/pointer etypes. 2965 // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer. 2966 func etypesign(e EType) int8 { 2967 switch e { 2968 case TINT8, TINT16, TINT32, TINT64, TINT: 2969 return -1 2970 case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR: 2971 return +1 2972 } 2973 return 0 2974 } 2975 2976 // lookupSymbol is used to retrieve the symbol (Extern, Arg or Auto) used for a particular node. 2977 // This improves the effectiveness of cse by using the same Aux values for the 2978 // same symbols. 2979 func (s *state) lookupSymbol(n *Node, sym interface{}) interface{} { 2980 switch sym.(type) { 2981 default: 2982 s.Fatalf("sym %v is of uknown type %T", sym, sym) 2983 case *ssa.ExternSymbol, *ssa.ArgSymbol, *ssa.AutoSymbol: 2984 // these are the only valid types 2985 } 2986 2987 if lsym, ok := s.varsyms[n]; ok { 2988 return lsym 2989 } else { 2990 s.varsyms[n] = sym 2991 return sym 2992 } 2993 } 2994 2995 // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result. 2996 // Also returns a bool reporting whether the returned value is "volatile", that is it 2997 // points to the outargs section and thus the referent will be clobbered by any call. 2998 // The value that the returned Value represents is guaranteed to be non-nil. 2999 // If bounded is true then this address does not require a nil check for its operand 3000 // even if that would otherwise be implied. 3001 func (s *state) addr(n *Node, bounded bool) (*ssa.Value, bool) { 3002 t := ptrto(n.Type) 3003 switch n.Op { 3004 case ONAME: 3005 switch n.Class { 3006 case PEXTERN: 3007 // global variable 3008 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: n.Sym}) 3009 v := s.entryNewValue1A(ssa.OpAddr, t, aux, s.sb) 3010 // TODO: Make OpAddr use AuxInt as well as Aux. 3011 if n.Xoffset != 0 { 3012 v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v) 3013 } 3014 return v, false 3015 case PPARAM: 3016 // parameter slot 3017 v := s.decladdrs[n] 3018 if v != nil { 3019 return v, false 3020 } 3021 if n == nodfp { 3022 // Special arg that points to the frame pointer (Used by ORECOVER). 3023 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n}) 3024 return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp), false 3025 } 3026 s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs) 3027 return nil, false 3028 case PAUTO: 3029 aux := s.lookupSymbol(n, &ssa.AutoSymbol{Typ: n.Type, Node: n}) 3030 return s.newValue1A(ssa.OpAddr, t, aux, s.sp), false 3031 case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early. 3032 // ensure that we reuse symbols for out parameters so 3033 // that cse works on their addresses 3034 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n}) 3035 return s.newValue1A(ssa.OpAddr, t, aux, s.sp), false 3036 default: 3037 s.Fatalf("variable address class %v not implemented", classnames[n.Class]) 3038 return nil, false 3039 } 3040 case OINDREGSP: 3041 // indirect off REGSP 3042 // used for storing/loading arguments/returns to/from callees 3043 return s.entryNewValue1I(ssa.OpOffPtr, t, n.Xoffset, s.sp), true 3044 case OINDEX: 3045 if n.Left.Type.IsSlice() { 3046 a := s.expr(n.Left) 3047 i := s.expr(n.Right) 3048 i = s.extendIndex(i, panicindex) 3049 len := s.newValue1(ssa.OpSliceLen, Types[TINT], a) 3050 if !n.Bounded { 3051 s.boundsCheck(i, len) 3052 } 3053 p := s.newValue1(ssa.OpSlicePtr, t, a) 3054 return s.newValue2(ssa.OpPtrIndex, t, p, i), false 3055 } else { // array 3056 a, isVolatile := s.addr(n.Left, bounded) 3057 i := s.expr(n.Right) 3058 i = s.extendIndex(i, panicindex) 3059 len := s.constInt(Types[TINT], n.Left.Type.NumElem()) 3060 if !n.Bounded { 3061 s.boundsCheck(i, len) 3062 } 3063 return s.newValue2(ssa.OpPtrIndex, ptrto(n.Left.Type.Elem()), a, i), isVolatile 3064 } 3065 case OIND: 3066 return s.exprPtr(n.Left, bounded, n.Lineno), false 3067 case ODOT: 3068 p, isVolatile := s.addr(n.Left, bounded) 3069 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p), isVolatile 3070 case ODOTPTR: 3071 p := s.exprPtr(n.Left, bounded, n.Lineno) 3072 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p), false 3073 case OCLOSUREVAR: 3074 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, 3075 s.entryNewValue0(ssa.OpGetClosurePtr, ptrto(Types[TUINT8]))), false 3076 case OCONVNOP: 3077 addr, isVolatile := s.addr(n.Left, bounded) 3078 return s.newValue1(ssa.OpCopy, t, addr), isVolatile // ensure that addr has the right type 3079 case OCALLFUNC, OCALLINTER, OCALLMETH: 3080 return s.call(n, callNormal), true 3081 3082 default: 3083 s.Fatalf("unhandled addr %v", n.Op) 3084 return nil, false 3085 } 3086 } 3087 3088 // canSSA reports whether n is SSA-able. 3089 // n must be an ONAME (or an ODOT sequence with an ONAME base). 3090 func (s *state) canSSA(n *Node) bool { 3091 if Debug['N'] != 0 { 3092 return false 3093 } 3094 for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) { 3095 n = n.Left 3096 } 3097 if n.Op != ONAME { 3098 return false 3099 } 3100 if n.Addrtaken { 3101 return false 3102 } 3103 if n.isParamHeapCopy() { 3104 return false 3105 } 3106 if n.Class == PAUTOHEAP { 3107 Fatalf("canSSA of PAUTOHEAP %v", n) 3108 } 3109 switch n.Class { 3110 case PEXTERN: 3111 return false 3112 case PPARAMOUT: 3113 if hasdefer { 3114 // TODO: handle this case? Named return values must be 3115 // in memory so that the deferred function can see them. 3116 // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false } 3117 return false 3118 } 3119 if s.cgoUnsafeArgs { 3120 // Cgo effectively takes the address of all result args, 3121 // but the compiler can't see that. 3122 return false 3123 } 3124 } 3125 if n.Class == PPARAM && n.String() == ".this" { 3126 // wrappers generated by genwrapper need to update 3127 // the .this pointer in place. 3128 // TODO: treat as a PPARMOUT? 3129 return false 3130 } 3131 return canSSAType(n.Type) 3132 // TODO: try to make more variables SSAable? 3133 } 3134 3135 // canSSA reports whether variables of type t are SSA-able. 3136 func canSSAType(t *Type) bool { 3137 dowidth(t) 3138 if t.Width > int64(4*Widthptr) { 3139 // 4*Widthptr is an arbitrary constant. We want it 3140 // to be at least 3*Widthptr so slices can be registerized. 3141 // Too big and we'll introduce too much register pressure. 3142 return false 3143 } 3144 switch t.Etype { 3145 case TARRAY: 3146 // We can't do larger arrays because dynamic indexing is 3147 // not supported on SSA variables. 3148 // TODO: allow if all indexes are constant. 3149 if t.NumElem() == 0 { 3150 return true 3151 } 3152 if t.NumElem() == 1 { 3153 return canSSAType(t.Elem()) 3154 } 3155 return false 3156 case TSTRUCT: 3157 if t.NumFields() > ssa.MaxStruct { 3158 return false 3159 } 3160 for _, t1 := range t.Fields().Slice() { 3161 if !canSSAType(t1.Type) { 3162 return false 3163 } 3164 } 3165 return true 3166 default: 3167 return true 3168 } 3169 } 3170 3171 // exprPtr evaluates n to a pointer and nil-checks it. 3172 func (s *state) exprPtr(n *Node, bounded bool, lineno int32) *ssa.Value { 3173 p := s.expr(n) 3174 if bounded || n.NonNil { 3175 if s.f.Config.Debug_checknil() && lineno > 1 { 3176 s.f.Config.Warnl(lineno, "removed nil check") 3177 } 3178 return p 3179 } 3180 s.nilCheck(p) 3181 return p 3182 } 3183 3184 // nilCheck generates nil pointer checking code. 3185 // Used only for automatically inserted nil checks, 3186 // not for user code like 'x != nil'. 3187 func (s *state) nilCheck(ptr *ssa.Value) { 3188 if disable_checknil != 0 { 3189 return 3190 } 3191 s.newValue2(ssa.OpNilCheck, ssa.TypeVoid, ptr, s.mem()) 3192 } 3193 3194 // boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not. 3195 // Starts a new block on return. 3196 // idx is already converted to full int width. 3197 func (s *state) boundsCheck(idx, len *ssa.Value) { 3198 if Debug['B'] != 0 { 3199 return 3200 } 3201 3202 // bounds check 3203 cmp := s.newValue2(ssa.OpIsInBounds, Types[TBOOL], idx, len) 3204 s.check(cmp, panicindex) 3205 } 3206 3207 // sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not. 3208 // Starts a new block on return. 3209 // idx and len are already converted to full int width. 3210 func (s *state) sliceBoundsCheck(idx, len *ssa.Value) { 3211 if Debug['B'] != 0 { 3212 return 3213 } 3214 3215 // bounds check 3216 cmp := s.newValue2(ssa.OpIsSliceInBounds, Types[TBOOL], idx, len) 3217 s.check(cmp, panicslice) 3218 } 3219 3220 // If cmp (a bool) is false, panic using the given function. 3221 func (s *state) check(cmp *ssa.Value, fn *Node) { 3222 b := s.endBlock() 3223 b.Kind = ssa.BlockIf 3224 b.SetControl(cmp) 3225 b.Likely = ssa.BranchLikely 3226 bNext := s.f.NewBlock(ssa.BlockPlain) 3227 line := s.peekLine() 3228 bPanic := s.panics[funcLine{fn, line}] 3229 if bPanic == nil { 3230 bPanic = s.f.NewBlock(ssa.BlockPlain) 3231 s.panics[funcLine{fn, line}] = bPanic 3232 s.startBlock(bPanic) 3233 // The panic call takes/returns memory to ensure that the right 3234 // memory state is observed if the panic happens. 3235 s.rtcall(fn, false, nil) 3236 } 3237 b.AddEdgeTo(bNext) 3238 b.AddEdgeTo(bPanic) 3239 s.startBlock(bNext) 3240 } 3241 3242 func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value { 3243 needcheck := true 3244 switch b.Op { 3245 case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64: 3246 if b.AuxInt != 0 { 3247 needcheck = false 3248 } 3249 } 3250 if needcheck { 3251 // do a size-appropriate check for zero 3252 cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type)) 3253 s.check(cmp, panicdivide) 3254 } 3255 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 3256 } 3257 3258 // rtcall issues a call to the given runtime function fn with the listed args. 3259 // Returns a slice of results of the given result types. 3260 // The call is added to the end of the current block. 3261 // If returns is false, the block is marked as an exit block. 3262 // If returns is true, the block is marked as a call block. A new block 3263 // is started to load the return values. 3264 func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Value) []*ssa.Value { 3265 // Write args to the stack 3266 off := Ctxt.FixedFrameSize() 3267 for _, arg := range args { 3268 t := arg.Type 3269 off = Rnd(off, t.Alignment()) 3270 ptr := s.sp 3271 if off != 0 { 3272 ptr = s.newValue1I(ssa.OpOffPtr, t.PtrTo(), off, s.sp) 3273 } 3274 size := t.Size() 3275 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, size, ptr, arg, s.mem()) 3276 off += size 3277 } 3278 off = Rnd(off, int64(Widthptr)) 3279 if Thearch.LinkArch.Name == "amd64p32" { 3280 // amd64p32 wants 8-byte alignment of the start of the return values. 3281 off = Rnd(off, 8) 3282 } 3283 3284 // Issue call 3285 call := s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, fn.Sym, s.mem()) 3286 s.vars[&memVar] = call 3287 3288 if !returns { 3289 // Finish block 3290 b := s.endBlock() 3291 b.Kind = ssa.BlockExit 3292 b.SetControl(call) 3293 call.AuxInt = off - Ctxt.FixedFrameSize() 3294 if len(results) > 0 { 3295 Fatalf("panic call can't have results") 3296 } 3297 return nil 3298 } 3299 3300 // Load results 3301 res := make([]*ssa.Value, len(results)) 3302 for i, t := range results { 3303 off = Rnd(off, t.Alignment()) 3304 ptr := s.sp 3305 if off != 0 { 3306 ptr = s.newValue1I(ssa.OpOffPtr, ptrto(t), off, s.sp) 3307 } 3308 res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem()) 3309 off += t.Size() 3310 } 3311 off = Rnd(off, int64(Widthptr)) 3312 3313 // Remember how much callee stack space we needed. 3314 call.AuxInt = off 3315 3316 return res 3317 } 3318 3319 // insertWBmove inserts the assignment *left = *right including a write barrier. 3320 // t is the type being assigned. 3321 // If right == nil, then we're zeroing *left. 3322 func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32, rightIsVolatile bool) { 3323 // if writeBarrier.enabled { 3324 // typedmemmove(&t, left, right) 3325 // } else { 3326 // *left = *right 3327 // } 3328 // 3329 // or 3330 // 3331 // if writeBarrier.enabled { 3332 // typedmemclr(&t, left) 3333 // } else { 3334 // *left = zeroValue 3335 // } 3336 3337 if s.noWB { 3338 s.Error("write barrier prohibited") 3339 } 3340 if s.WBLineno == 0 { 3341 s.WBLineno = left.Line 3342 } 3343 3344 var val *ssa.Value 3345 if right == nil { 3346 val = s.newValue2I(ssa.OpZeroWB, ssa.TypeMem, sizeAlignAuxInt(t), left, s.mem()) 3347 } else { 3348 var op ssa.Op 3349 if rightIsVolatile { 3350 op = ssa.OpMoveWBVolatile 3351 } else { 3352 op = ssa.OpMoveWB 3353 } 3354 val = s.newValue3I(op, ssa.TypeMem, sizeAlignAuxInt(t), left, right, s.mem()) 3355 } 3356 val.Aux = &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(t)} 3357 s.vars[&memVar] = val 3358 3359 // WB ops will be expanded to branches at writebarrier phase. 3360 // To make it easy, we put WB ops at the end of a block, so 3361 // that it does not need to split a block into two parts when 3362 // expanding WB ops. 3363 b := s.f.NewBlock(ssa.BlockPlain) 3364 s.endBlock().AddEdgeTo(b) 3365 s.startBlock(b) 3366 } 3367 3368 // insertWBstore inserts the assignment *left = right including a write barrier. 3369 // t is the type being assigned. 3370 func (s *state) insertWBstore(t *Type, left, right *ssa.Value, line int32, skip skipMask) { 3371 // store scalar fields 3372 // if writeBarrier.enabled { 3373 // writebarrierptr for pointer fields 3374 // } else { 3375 // store pointer fields 3376 // } 3377 3378 if s.noWB { 3379 s.Error("write barrier prohibited") 3380 } 3381 if s.WBLineno == 0 { 3382 s.WBLineno = left.Line 3383 } 3384 s.storeTypeScalars(t, left, right, skip) 3385 s.storeTypePtrsWB(t, left, right) 3386 3387 // WB ops will be expanded to branches at writebarrier phase. 3388 // To make it easy, we put WB ops at the end of a block, so 3389 // that it does not need to split a block into two parts when 3390 // expanding WB ops. 3391 b := s.f.NewBlock(ssa.BlockPlain) 3392 s.endBlock().AddEdgeTo(b) 3393 s.startBlock(b) 3394 } 3395 3396 // do *left = right for all scalar (non-pointer) parts of t. 3397 func (s *state) storeTypeScalars(t *Type, left, right *ssa.Value, skip skipMask) { 3398 switch { 3399 case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex(): 3400 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), left, right, s.mem()) 3401 case t.IsPtrShaped(): 3402 // no scalar fields. 3403 case t.IsString(): 3404 if skip&skipLen != 0 { 3405 return 3406 } 3407 len := s.newValue1(ssa.OpStringLen, Types[TINT], right) 3408 lenAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TINT]), s.config.IntSize, left) 3409 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem()) 3410 case t.IsSlice(): 3411 if skip&skipLen == 0 { 3412 len := s.newValue1(ssa.OpSliceLen, Types[TINT], right) 3413 lenAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TINT]), s.config.IntSize, left) 3414 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem()) 3415 } 3416 if skip&skipCap == 0 { 3417 cap := s.newValue1(ssa.OpSliceCap, Types[TINT], right) 3418 capAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TINT]), 2*s.config.IntSize, left) 3419 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capAddr, cap, s.mem()) 3420 } 3421 case t.IsInterface(): 3422 // itab field doesn't need a write barrier (even though it is a pointer). 3423 itab := s.newValue1(ssa.OpITab, ptrto(Types[TUINT8]), right) 3424 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, left, itab, s.mem()) 3425 case t.IsStruct(): 3426 n := t.NumFields() 3427 for i := 0; i < n; i++ { 3428 ft := t.FieldType(i) 3429 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3430 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3431 s.storeTypeScalars(ft.(*Type), addr, val, 0) 3432 } 3433 case t.IsArray() && t.NumElem() == 0: 3434 // nothing 3435 case t.IsArray() && t.NumElem() == 1: 3436 s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0) 3437 default: 3438 s.Fatalf("bad write barrier type %v", t) 3439 } 3440 } 3441 3442 // do *left = right for all pointer parts of t. 3443 func (s *state) storeTypePtrs(t *Type, left, right *ssa.Value) { 3444 switch { 3445 case t.IsPtrShaped(): 3446 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, right, s.mem()) 3447 case t.IsString(): 3448 ptr := s.newValue1(ssa.OpStringPtr, ptrto(Types[TUINT8]), right) 3449 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem()) 3450 case t.IsSlice(): 3451 ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), right) 3452 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem()) 3453 case t.IsInterface(): 3454 // itab field is treated as a scalar. 3455 idata := s.newValue1(ssa.OpIData, ptrto(Types[TUINT8]), right) 3456 idataAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TUINT8]), s.config.PtrSize, left) 3457 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, idataAddr, idata, s.mem()) 3458 case t.IsStruct(): 3459 n := t.NumFields() 3460 for i := 0; i < n; i++ { 3461 ft := t.FieldType(i) 3462 if !haspointers(ft.(*Type)) { 3463 continue 3464 } 3465 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3466 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3467 s.storeTypePtrs(ft.(*Type), addr, val) 3468 } 3469 case t.IsArray() && t.NumElem() == 0: 3470 // nothing 3471 case t.IsArray() && t.NumElem() == 1: 3472 s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right)) 3473 default: 3474 s.Fatalf("bad write barrier type %v", t) 3475 } 3476 } 3477 3478 // do *left = right for all pointer parts of t, with write barriers if necessary. 3479 func (s *state) storeTypePtrsWB(t *Type, left, right *ssa.Value) { 3480 switch { 3481 case t.IsPtrShaped(): 3482 s.vars[&memVar] = s.newValue3I(ssa.OpStoreWB, ssa.TypeMem, s.config.PtrSize, left, right, s.mem()) 3483 case t.IsString(): 3484 ptr := s.newValue1(ssa.OpStringPtr, ptrto(Types[TUINT8]), right) 3485 s.vars[&memVar] = s.newValue3I(ssa.OpStoreWB, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem()) 3486 case t.IsSlice(): 3487 ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), right) 3488 s.vars[&memVar] = s.newValue3I(ssa.OpStoreWB, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem()) 3489 case t.IsInterface(): 3490 // itab field is treated as a scalar. 3491 idata := s.newValue1(ssa.OpIData, ptrto(Types[TUINT8]), right) 3492 idataAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TUINT8]), s.config.PtrSize, left) 3493 s.vars[&memVar] = s.newValue3I(ssa.OpStoreWB, ssa.TypeMem, s.config.PtrSize, idataAddr, idata, s.mem()) 3494 case t.IsStruct(): 3495 n := t.NumFields() 3496 for i := 0; i < n; i++ { 3497 ft := t.FieldType(i) 3498 if !haspointers(ft.(*Type)) { 3499 continue 3500 } 3501 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3502 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3503 s.storeTypePtrsWB(ft.(*Type), addr, val) 3504 } 3505 case t.IsArray() && t.NumElem() == 0: 3506 // nothing 3507 case t.IsArray() && t.NumElem() == 1: 3508 s.storeTypePtrsWB(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right)) 3509 default: 3510 s.Fatalf("bad write barrier type %v", t) 3511 } 3512 } 3513 3514 // slice computes the slice v[i:j:k] and returns ptr, len, and cap of result. 3515 // i,j,k may be nil, in which case they are set to their default value. 3516 // t is a slice, ptr to array, or string type. 3517 func (s *state) slice(t *Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) { 3518 var elemtype *Type 3519 var ptrtype *Type 3520 var ptr *ssa.Value 3521 var len *ssa.Value 3522 var cap *ssa.Value 3523 zero := s.constInt(Types[TINT], 0) 3524 switch { 3525 case t.IsSlice(): 3526 elemtype = t.Elem() 3527 ptrtype = ptrto(elemtype) 3528 ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v) 3529 len = s.newValue1(ssa.OpSliceLen, Types[TINT], v) 3530 cap = s.newValue1(ssa.OpSliceCap, Types[TINT], v) 3531 case t.IsString(): 3532 elemtype = Types[TUINT8] 3533 ptrtype = ptrto(elemtype) 3534 ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v) 3535 len = s.newValue1(ssa.OpStringLen, Types[TINT], v) 3536 cap = len 3537 case t.IsPtr(): 3538 if !t.Elem().IsArray() { 3539 s.Fatalf("bad ptr to array in slice %v\n", t) 3540 } 3541 elemtype = t.Elem().Elem() 3542 ptrtype = ptrto(elemtype) 3543 s.nilCheck(v) 3544 ptr = v 3545 len = s.constInt(Types[TINT], t.Elem().NumElem()) 3546 cap = len 3547 default: 3548 s.Fatalf("bad type in slice %v\n", t) 3549 } 3550 3551 // Set default values 3552 if i == nil { 3553 i = zero 3554 } 3555 if j == nil { 3556 j = len 3557 } 3558 if k == nil { 3559 k = cap 3560 } 3561 3562 // Panic if slice indices are not in bounds. 3563 s.sliceBoundsCheck(i, j) 3564 if j != k { 3565 s.sliceBoundsCheck(j, k) 3566 } 3567 if k != cap { 3568 s.sliceBoundsCheck(k, cap) 3569 } 3570 3571 // Generate the following code assuming that indexes are in bounds. 3572 // The masking is to make sure that we don't generate a slice 3573 // that points to the next object in memory. 3574 // rlen = j - i 3575 // rcap = k - i 3576 // delta = i * elemsize 3577 // rptr = p + delta&mask(rcap) 3578 // result = (SliceMake rptr rlen rcap) 3579 // where mask(x) is 0 if x==0 and -1 if x>0. 3580 subOp := s.ssaOp(OSUB, Types[TINT]) 3581 mulOp := s.ssaOp(OMUL, Types[TINT]) 3582 andOp := s.ssaOp(OAND, Types[TINT]) 3583 rlen := s.newValue2(subOp, Types[TINT], j, i) 3584 var rcap *ssa.Value 3585 switch { 3586 case t.IsString(): 3587 // Capacity of the result is unimportant. However, we use 3588 // rcap to test if we've generated a zero-length slice. 3589 // Use length of strings for that. 3590 rcap = rlen 3591 case j == k: 3592 rcap = rlen 3593 default: 3594 rcap = s.newValue2(subOp, Types[TINT], k, i) 3595 } 3596 3597 var rptr *ssa.Value 3598 if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 { 3599 // No pointer arithmetic necessary. 3600 rptr = ptr 3601 } else { 3602 // delta = # of bytes to offset pointer by. 3603 delta := s.newValue2(mulOp, Types[TINT], i, s.constInt(Types[TINT], elemtype.Width)) 3604 // If we're slicing to the point where the capacity is zero, 3605 // zero out the delta. 3606 mask := s.newValue1(ssa.OpSlicemask, Types[TINT], rcap) 3607 delta = s.newValue2(andOp, Types[TINT], delta, mask) 3608 // Compute rptr = ptr + delta 3609 rptr = s.newValue2(ssa.OpAddPtr, ptrtype, ptr, delta) 3610 } 3611 3612 return rptr, rlen, rcap 3613 } 3614 3615 type u2fcvtTab struct { 3616 geq, cvt2F, and, rsh, or, add ssa.Op 3617 one func(*state, ssa.Type, int64) *ssa.Value 3618 } 3619 3620 var u64_f64 u2fcvtTab = u2fcvtTab{ 3621 geq: ssa.OpGeq64, 3622 cvt2F: ssa.OpCvt64to64F, 3623 and: ssa.OpAnd64, 3624 rsh: ssa.OpRsh64Ux64, 3625 or: ssa.OpOr64, 3626 add: ssa.OpAdd64F, 3627 one: (*state).constInt64, 3628 } 3629 3630 var u64_f32 u2fcvtTab = u2fcvtTab{ 3631 geq: ssa.OpGeq64, 3632 cvt2F: ssa.OpCvt64to32F, 3633 and: ssa.OpAnd64, 3634 rsh: ssa.OpRsh64Ux64, 3635 or: ssa.OpOr64, 3636 add: ssa.OpAdd32F, 3637 one: (*state).constInt64, 3638 } 3639 3640 func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3641 return s.uintTofloat(&u64_f64, n, x, ft, tt) 3642 } 3643 3644 func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3645 return s.uintTofloat(&u64_f32, n, x, ft, tt) 3646 } 3647 3648 func (s *state) uintTofloat(cvttab *u2fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3649 // if x >= 0 { 3650 // result = (floatY) x 3651 // } else { 3652 // y = uintX(x) ; y = x & 1 3653 // z = uintX(x) ; z = z >> 1 3654 // z = z >> 1 3655 // z = z | y 3656 // result = floatY(z) 3657 // result = result + result 3658 // } 3659 // 3660 // Code borrowed from old code generator. 3661 // What's going on: large 64-bit "unsigned" looks like 3662 // negative number to hardware's integer-to-float 3663 // conversion. However, because the mantissa is only 3664 // 63 bits, we don't need the LSB, so instead we do an 3665 // unsigned right shift (divide by two), convert, and 3666 // double. However, before we do that, we need to be 3667 // sure that we do not lose a "1" if that made the 3668 // difference in the resulting rounding. Therefore, we 3669 // preserve it, and OR (not ADD) it back in. The case 3670 // that matters is when the eleven discarded bits are 3671 // equal to 10000000001; that rounds up, and the 1 cannot 3672 // be lost else it would round down if the LSB of the 3673 // candidate mantissa is 0. 3674 cmp := s.newValue2(cvttab.geq, Types[TBOOL], x, s.zeroVal(ft)) 3675 b := s.endBlock() 3676 b.Kind = ssa.BlockIf 3677 b.SetControl(cmp) 3678 b.Likely = ssa.BranchLikely 3679 3680 bThen := s.f.NewBlock(ssa.BlockPlain) 3681 bElse := s.f.NewBlock(ssa.BlockPlain) 3682 bAfter := s.f.NewBlock(ssa.BlockPlain) 3683 3684 b.AddEdgeTo(bThen) 3685 s.startBlock(bThen) 3686 a0 := s.newValue1(cvttab.cvt2F, tt, x) 3687 s.vars[n] = a0 3688 s.endBlock() 3689 bThen.AddEdgeTo(bAfter) 3690 3691 b.AddEdgeTo(bElse) 3692 s.startBlock(bElse) 3693 one := cvttab.one(s, ft, 1) 3694 y := s.newValue2(cvttab.and, ft, x, one) 3695 z := s.newValue2(cvttab.rsh, ft, x, one) 3696 z = s.newValue2(cvttab.or, ft, z, y) 3697 a := s.newValue1(cvttab.cvt2F, tt, z) 3698 a1 := s.newValue2(cvttab.add, tt, a, a) 3699 s.vars[n] = a1 3700 s.endBlock() 3701 bElse.AddEdgeTo(bAfter) 3702 3703 s.startBlock(bAfter) 3704 return s.variable(n, n.Type) 3705 } 3706 3707 // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels. 3708 func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value { 3709 if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() { 3710 s.Fatalf("node must be a map or a channel") 3711 } 3712 // if n == nil { 3713 // return 0 3714 // } else { 3715 // // len 3716 // return *((*int)n) 3717 // // cap 3718 // return *(((*int)n)+1) 3719 // } 3720 lenType := n.Type 3721 nilValue := s.constNil(Types[TUINTPTR]) 3722 cmp := s.newValue2(ssa.OpEqPtr, Types[TBOOL], x, nilValue) 3723 b := s.endBlock() 3724 b.Kind = ssa.BlockIf 3725 b.SetControl(cmp) 3726 b.Likely = ssa.BranchUnlikely 3727 3728 bThen := s.f.NewBlock(ssa.BlockPlain) 3729 bElse := s.f.NewBlock(ssa.BlockPlain) 3730 bAfter := s.f.NewBlock(ssa.BlockPlain) 3731 3732 // length/capacity of a nil map/chan is zero 3733 b.AddEdgeTo(bThen) 3734 s.startBlock(bThen) 3735 s.vars[n] = s.zeroVal(lenType) 3736 s.endBlock() 3737 bThen.AddEdgeTo(bAfter) 3738 3739 b.AddEdgeTo(bElse) 3740 s.startBlock(bElse) 3741 if n.Op == OLEN { 3742 // length is stored in the first word for map/chan 3743 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem()) 3744 } else if n.Op == OCAP { 3745 // capacity is stored in the second word for chan 3746 sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x) 3747 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem()) 3748 } else { 3749 s.Fatalf("op must be OLEN or OCAP") 3750 } 3751 s.endBlock() 3752 bElse.AddEdgeTo(bAfter) 3753 3754 s.startBlock(bAfter) 3755 return s.variable(n, lenType) 3756 } 3757 3758 type f2uCvtTab struct { 3759 ltf, cvt2U, subf ssa.Op 3760 value func(*state, ssa.Type, float64) *ssa.Value 3761 } 3762 3763 var f32_u64 f2uCvtTab = f2uCvtTab{ 3764 ltf: ssa.OpLess32F, 3765 cvt2U: ssa.OpCvt32Fto64, 3766 subf: ssa.OpSub32F, 3767 value: (*state).constFloat32, 3768 } 3769 3770 var f64_u64 f2uCvtTab = f2uCvtTab{ 3771 ltf: ssa.OpLess64F, 3772 cvt2U: ssa.OpCvt64Fto64, 3773 subf: ssa.OpSub64F, 3774 value: (*state).constFloat64, 3775 } 3776 3777 func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3778 return s.floatToUint(&f32_u64, n, x, ft, tt) 3779 } 3780 func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3781 return s.floatToUint(&f64_u64, n, x, ft, tt) 3782 } 3783 3784 func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value { 3785 // if x < 9223372036854775808.0 { 3786 // result = uintY(x) 3787 // } else { 3788 // y = x - 9223372036854775808.0 3789 // z = uintY(y) 3790 // result = z | -9223372036854775808 3791 // } 3792 twoToThe63 := cvttab.value(s, ft, 9223372036854775808.0) 3793 cmp := s.newValue2(cvttab.ltf, Types[TBOOL], x, twoToThe63) 3794 b := s.endBlock() 3795 b.Kind = ssa.BlockIf 3796 b.SetControl(cmp) 3797 b.Likely = ssa.BranchLikely 3798 3799 bThen := s.f.NewBlock(ssa.BlockPlain) 3800 bElse := s.f.NewBlock(ssa.BlockPlain) 3801 bAfter := s.f.NewBlock(ssa.BlockPlain) 3802 3803 b.AddEdgeTo(bThen) 3804 s.startBlock(bThen) 3805 a0 := s.newValue1(cvttab.cvt2U, tt, x) 3806 s.vars[n] = a0 3807 s.endBlock() 3808 bThen.AddEdgeTo(bAfter) 3809 3810 b.AddEdgeTo(bElse) 3811 s.startBlock(bElse) 3812 y := s.newValue2(cvttab.subf, ft, x, twoToThe63) 3813 y = s.newValue1(cvttab.cvt2U, tt, y) 3814 z := s.constInt64(tt, -9223372036854775808) 3815 a1 := s.newValue2(ssa.OpOr64, tt, y, z) 3816 s.vars[n] = a1 3817 s.endBlock() 3818 bElse.AddEdgeTo(bAfter) 3819 3820 s.startBlock(bAfter) 3821 return s.variable(n, n.Type) 3822 } 3823 3824 // ifaceType returns the value for the word containing the type. 3825 // n is the node for the interface expression. 3826 // v is the corresponding value. 3827 func (s *state) ifaceType(n *Node, v *ssa.Value) *ssa.Value { 3828 byteptr := ptrto(Types[TUINT8]) // type used in runtime prototypes for runtime type (*byte) 3829 3830 if n.Type.IsEmptyInterface() { 3831 // Have *eface. The type is the first word in the struct. 3832 return s.newValue1(ssa.OpITab, byteptr, v) 3833 } 3834 3835 // Have *iface. 3836 // The first word in the struct is the *itab. 3837 // If the *itab is nil, return 0. 3838 // Otherwise, the second word in the *itab is the type. 3839 3840 tab := s.newValue1(ssa.OpITab, byteptr, v) 3841 s.vars[&typVar] = tab 3842 isnonnil := s.newValue2(ssa.OpNeqPtr, Types[TBOOL], tab, s.constNil(byteptr)) 3843 b := s.endBlock() 3844 b.Kind = ssa.BlockIf 3845 b.SetControl(isnonnil) 3846 b.Likely = ssa.BranchLikely 3847 3848 bLoad := s.f.NewBlock(ssa.BlockPlain) 3849 bEnd := s.f.NewBlock(ssa.BlockPlain) 3850 3851 b.AddEdgeTo(bLoad) 3852 b.AddEdgeTo(bEnd) 3853 bLoad.AddEdgeTo(bEnd) 3854 3855 s.startBlock(bLoad) 3856 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), tab) 3857 s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem()) 3858 s.endBlock() 3859 3860 s.startBlock(bEnd) 3861 typ := s.variable(&typVar, byteptr) 3862 delete(s.vars, &typVar) 3863 return typ 3864 } 3865 3866 // dottype generates SSA for a type assertion node. 3867 // commaok indicates whether to panic or return a bool. 3868 // If commaok is false, resok will be nil. 3869 func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { 3870 iface := s.expr(n.Left) 3871 typ := s.ifaceType(n.Left, iface) // actual concrete type 3872 target := s.expr(typename(n.Type)) // target type 3873 if !isdirectiface(n.Type) { 3874 // walk rewrites ODOTTYPE/OAS2DOTTYPE into runtime calls except for this case. 3875 Fatalf("dottype needs a direct iface type %v", n.Type) 3876 } 3877 3878 if Debug_typeassert > 0 { 3879 Warnl(n.Lineno, "type assertion inlined") 3880 } 3881 3882 // TODO: If we have a nonempty interface and its itab field is nil, 3883 // then this test is redundant and ifaceType should just branch directly to bFail. 3884 cond := s.newValue2(ssa.OpEqPtr, Types[TBOOL], typ, target) 3885 b := s.endBlock() 3886 b.Kind = ssa.BlockIf 3887 b.SetControl(cond) 3888 b.Likely = ssa.BranchLikely 3889 3890 byteptr := ptrto(Types[TUINT8]) 3891 3892 bOk := s.f.NewBlock(ssa.BlockPlain) 3893 bFail := s.f.NewBlock(ssa.BlockPlain) 3894 b.AddEdgeTo(bOk) 3895 b.AddEdgeTo(bFail) 3896 3897 if !commaok { 3898 // on failure, panic by calling panicdottype 3899 s.startBlock(bFail) 3900 taddr := s.newValue1A(ssa.OpAddr, byteptr, &ssa.ExternSymbol{Typ: byteptr, Sym: typenamesym(n.Left.Type)}, s.sb) 3901 s.rtcall(panicdottype, false, nil, typ, target, taddr) 3902 3903 // on success, return idata field 3904 s.startBlock(bOk) 3905 return s.newValue1(ssa.OpIData, n.Type, iface), nil 3906 } 3907 3908 // commaok is the more complicated case because we have 3909 // a control flow merge point. 3910 bEnd := s.f.NewBlock(ssa.BlockPlain) 3911 3912 // type assertion succeeded 3913 s.startBlock(bOk) 3914 s.vars[&idataVar] = s.newValue1(ssa.OpIData, n.Type, iface) 3915 s.vars[&okVar] = s.constBool(true) 3916 s.endBlock() 3917 bOk.AddEdgeTo(bEnd) 3918 3919 // type assertion failed 3920 s.startBlock(bFail) 3921 s.vars[&idataVar] = s.constNil(byteptr) 3922 s.vars[&okVar] = s.constBool(false) 3923 s.endBlock() 3924 bFail.AddEdgeTo(bEnd) 3925 3926 // merge point 3927 s.startBlock(bEnd) 3928 res = s.variable(&idataVar, byteptr) 3929 resok = s.variable(&okVar, Types[TBOOL]) 3930 delete(s.vars, &idataVar) 3931 delete(s.vars, &okVar) 3932 return res, resok 3933 } 3934 3935 // checkgoto checks that a goto from from to to does not 3936 // jump into a block or jump over variable declarations. 3937 // It is a copy of checkgoto in the pre-SSA backend, 3938 // modified only for line number handling. 3939 // TODO: document how this works and why it is designed the way it is. 3940 func (s *state) checkgoto(from *Node, to *Node) { 3941 if from.Sym == to.Sym { 3942 return 3943 } 3944 3945 nf := 0 3946 for fs := from.Sym; fs != nil; fs = fs.Link { 3947 nf++ 3948 } 3949 nt := 0 3950 for fs := to.Sym; fs != nil; fs = fs.Link { 3951 nt++ 3952 } 3953 fs := from.Sym 3954 for ; nf > nt; nf-- { 3955 fs = fs.Link 3956 } 3957 if fs != to.Sym { 3958 // decide what to complain about. 3959 // prefer to complain about 'into block' over declarations, 3960 // so scan backward to find most recent block or else dcl. 3961 var block *Sym 3962 3963 var dcl *Sym 3964 ts := to.Sym 3965 for ; nt > nf; nt-- { 3966 if ts.Pkg == nil { 3967 block = ts 3968 } else { 3969 dcl = ts 3970 } 3971 ts = ts.Link 3972 } 3973 3974 for ts != fs { 3975 if ts.Pkg == nil { 3976 block = ts 3977 } else { 3978 dcl = ts 3979 } 3980 ts = ts.Link 3981 fs = fs.Link 3982 } 3983 3984 lno := from.Left.Lineno 3985 if block != nil { 3986 yyerrorl(lno, "goto %v jumps into block starting at %v", from.Left.Sym, linestr(block.Lastlineno)) 3987 } else { 3988 yyerrorl(lno, "goto %v jumps over declaration of %v at %v", from.Left.Sym, dcl, linestr(dcl.Lastlineno)) 3989 } 3990 } 3991 } 3992 3993 // variable returns the value of a variable at the current location. 3994 func (s *state) variable(name *Node, t ssa.Type) *ssa.Value { 3995 v := s.vars[name] 3996 if v != nil { 3997 return v 3998 } 3999 v = s.fwdVars[name] 4000 if v != nil { 4001 return v 4002 } 4003 4004 if s.curBlock == s.f.Entry { 4005 // No variable should be live at entry. 4006 s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, name, v) 4007 } 4008 // Make a FwdRef, which records a value that's live on block input. 4009 // We'll find the matching definition as part of insertPhis. 4010 v = s.newValue0A(ssa.OpFwdRef, t, name) 4011 s.fwdVars[name] = v 4012 s.addNamedValue(name, v) 4013 return v 4014 } 4015 4016 func (s *state) mem() *ssa.Value { 4017 return s.variable(&memVar, ssa.TypeMem) 4018 } 4019 4020 func (s *state) addNamedValue(n *Node, v *ssa.Value) { 4021 if n.Class == Pxxx { 4022 // Don't track our dummy nodes (&memVar etc.). 4023 return 4024 } 4025 if n.IsAutoTmp() { 4026 // Don't track temporary variables. 4027 return 4028 } 4029 if n.Class == PPARAMOUT { 4030 // Don't track named output values. This prevents return values 4031 // from being assigned too early. See #14591 and #14762. TODO: allow this. 4032 return 4033 } 4034 if n.Class == PAUTO && n.Xoffset != 0 { 4035 s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset) 4036 } 4037 loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0} 4038 values, ok := s.f.NamedValues[loc] 4039 if !ok { 4040 s.f.Names = append(s.f.Names, loc) 4041 } 4042 s.f.NamedValues[loc] = append(values, v) 4043 } 4044 4045 // Branch is an unresolved branch. 4046 type Branch struct { 4047 P *obj.Prog // branch instruction 4048 B *ssa.Block // target 4049 } 4050 4051 // SSAGenState contains state needed during Prog generation. 4052 type SSAGenState struct { 4053 // Branches remembers all the branch instructions we've seen 4054 // and where they would like to go. 4055 Branches []Branch 4056 4057 // bstart remembers where each block starts (indexed by block ID) 4058 bstart []*obj.Prog 4059 4060 // 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?) 4061 SSEto387 map[int16]int16 4062 // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8. 4063 ScratchFpMem *Node 4064 } 4065 4066 // Pc returns the current Prog. 4067 func (s *SSAGenState) Pc() *obj.Prog { 4068 return pc 4069 } 4070 4071 // SetLineno sets the current source line number. 4072 func (s *SSAGenState) SetLineno(l int32) { 4073 lineno = l 4074 } 4075 4076 // genssa appends entries to ptxt for each instruction in f. 4077 // gcargs and gclocals are filled in with pointer maps for the frame. 4078 func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) { 4079 var s SSAGenState 4080 4081 e := f.Config.Frontend().(*ssaExport) 4082 4083 // Remember where each block starts. 4084 s.bstart = make([]*obj.Prog, f.NumBlocks()) 4085 4086 var valueProgs map[*obj.Prog]*ssa.Value 4087 var blockProgs map[*obj.Prog]*ssa.Block 4088 var logProgs = e.log 4089 if logProgs { 4090 valueProgs = make(map[*obj.Prog]*ssa.Value, f.NumValues()) 4091 blockProgs = make(map[*obj.Prog]*ssa.Block, f.NumBlocks()) 4092 f.Logf("genssa %s\n", f.Name) 4093 blockProgs[pc] = f.Blocks[0] 4094 } 4095 4096 if Thearch.Use387 { 4097 s.SSEto387 = map[int16]int16{} 4098 } 4099 4100 s.ScratchFpMem = scratchFpMem 4101 scratchFpMem = nil 4102 4103 // Emit basic blocks 4104 for i, b := range f.Blocks { 4105 s.bstart[b.ID] = pc 4106 // Emit values in block 4107 Thearch.SSAMarkMoves(&s, b) 4108 for _, v := range b.Values { 4109 x := pc 4110 Thearch.SSAGenValue(&s, v) 4111 if logProgs { 4112 for ; x != pc; x = x.Link { 4113 valueProgs[x] = v 4114 } 4115 } 4116 } 4117 // Emit control flow instructions for block 4118 var next *ssa.Block 4119 if i < len(f.Blocks)-1 && Debug['N'] == 0 { 4120 // If -N, leave next==nil so every block with successors 4121 // ends in a JMP (except call blocks - plive doesn't like 4122 // select{send,recv} followed by a JMP call). Helps keep 4123 // line numbers for otherwise empty blocks. 4124 next = f.Blocks[i+1] 4125 } 4126 x := pc 4127 Thearch.SSAGenBlock(&s, b, next) 4128 if logProgs { 4129 for ; x != pc; x = x.Link { 4130 blockProgs[x] = b 4131 } 4132 } 4133 } 4134 4135 // Resolve branches 4136 for _, br := range s.Branches { 4137 br.P.To.Val = s.bstart[br.B.ID] 4138 } 4139 4140 if logProgs { 4141 for p := ptxt; p != nil; p = p.Link { 4142 var s string 4143 if v, ok := valueProgs[p]; ok { 4144 s = v.String() 4145 } else if b, ok := blockProgs[p]; ok { 4146 s = b.String() 4147 } else { 4148 s = " " // most value and branch strings are 2-3 characters long 4149 } 4150 f.Logf("%s\t%s\n", s, p) 4151 } 4152 if f.Config.HTML != nil { 4153 saved := ptxt.Ctxt.LineHist.PrintFilenameOnly 4154 ptxt.Ctxt.LineHist.PrintFilenameOnly = true 4155 var buf bytes.Buffer 4156 buf.WriteString("<code>") 4157 buf.WriteString("<dl class=\"ssa-gen\">") 4158 for p := ptxt; p != nil; p = p.Link { 4159 buf.WriteString("<dt class=\"ssa-prog-src\">") 4160 if v, ok := valueProgs[p]; ok { 4161 buf.WriteString(v.HTML()) 4162 } else if b, ok := blockProgs[p]; ok { 4163 buf.WriteString(b.HTML()) 4164 } 4165 buf.WriteString("</dt>") 4166 buf.WriteString("<dd class=\"ssa-prog\">") 4167 buf.WriteString(html.EscapeString(p.String())) 4168 buf.WriteString("</dd>") 4169 buf.WriteString("</li>") 4170 } 4171 buf.WriteString("</dl>") 4172 buf.WriteString("</code>") 4173 f.Config.HTML.WriteColumn("genssa", buf.String()) 4174 ptxt.Ctxt.LineHist.PrintFilenameOnly = saved 4175 } 4176 } 4177 4178 // Emit static data 4179 if f.StaticData != nil { 4180 for _, n := range f.StaticData.([]*Node) { 4181 if !gen_as_init(n, false) { 4182 Fatalf("non-static data marked as static: %v\n\n", n) 4183 } 4184 } 4185 } 4186 4187 // Generate gc bitmaps. 4188 liveness(Curfn, ptxt, gcargs, gclocals) 4189 4190 // Add frame prologue. Zero ambiguously live variables. 4191 Thearch.Defframe(ptxt) 4192 if Debug['f'] != 0 { 4193 frame(0) 4194 } 4195 4196 // Remove leftover instrumentation from the instruction stream. 4197 removevardef(ptxt) 4198 4199 f.Config.HTML.Close() 4200 f.Config.HTML = nil 4201 } 4202 4203 type FloatingEQNEJump struct { 4204 Jump obj.As 4205 Index int 4206 } 4207 4208 func oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPrediction, branches []Branch) []Branch { 4209 p := Prog(jumps.Jump) 4210 p.To.Type = obj.TYPE_BRANCH 4211 to := jumps.Index 4212 branches = append(branches, Branch{p, b.Succs[to].Block()}) 4213 if to == 1 { 4214 likely = -likely 4215 } 4216 // liblink reorders the instruction stream as it sees fit. 4217 // Pass along what we know so liblink can make use of it. 4218 // TODO: Once we've fully switched to SSA, 4219 // make liblink leave our output alone. 4220 switch likely { 4221 case ssa.BranchUnlikely: 4222 p.From.Type = obj.TYPE_CONST 4223 p.From.Offset = 0 4224 case ssa.BranchLikely: 4225 p.From.Type = obj.TYPE_CONST 4226 p.From.Offset = 1 4227 } 4228 return branches 4229 } 4230 4231 func SSAGenFPJump(s *SSAGenState, b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) { 4232 likely := b.Likely 4233 switch next { 4234 case b.Succs[0].Block(): 4235 s.Branches = oneFPJump(b, &jumps[0][0], likely, s.Branches) 4236 s.Branches = oneFPJump(b, &jumps[0][1], likely, s.Branches) 4237 case b.Succs[1].Block(): 4238 s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches) 4239 s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches) 4240 default: 4241 s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches) 4242 s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches) 4243 q := Prog(obj.AJMP) 4244 q.To.Type = obj.TYPE_BRANCH 4245 s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()}) 4246 } 4247 } 4248 4249 func AuxOffset(v *ssa.Value) (offset int64) { 4250 if v.Aux == nil { 4251 return 0 4252 } 4253 switch sym := v.Aux.(type) { 4254 4255 case *ssa.AutoSymbol: 4256 n := sym.Node.(*Node) 4257 return n.Xoffset 4258 } 4259 return 0 4260 } 4261 4262 // AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a. 4263 func AddAux(a *obj.Addr, v *ssa.Value) { 4264 AddAux2(a, v, v.AuxInt) 4265 } 4266 func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { 4267 if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR { 4268 v.Fatalf("bad AddAux addr %v", a) 4269 } 4270 // add integer offset 4271 a.Offset += offset 4272 4273 // If no additional symbol offset, we're done. 4274 if v.Aux == nil { 4275 return 4276 } 4277 // Add symbol's offset from its base register. 4278 switch sym := v.Aux.(type) { 4279 case *ssa.ExternSymbol: 4280 a.Name = obj.NAME_EXTERN 4281 switch s := sym.Sym.(type) { 4282 case *Sym: 4283 a.Sym = Linksym(s) 4284 case *obj.LSym: 4285 a.Sym = s 4286 default: 4287 v.Fatalf("ExternSymbol.Sym is %T", s) 4288 } 4289 case *ssa.ArgSymbol: 4290 n := sym.Node.(*Node) 4291 a.Name = obj.NAME_PARAM 4292 a.Node = n 4293 a.Sym = Linksym(n.Orig.Sym) 4294 a.Offset += n.Xoffset 4295 case *ssa.AutoSymbol: 4296 n := sym.Node.(*Node) 4297 a.Name = obj.NAME_AUTO 4298 a.Node = n 4299 a.Sym = Linksym(n.Sym) 4300 a.Offset += n.Xoffset 4301 default: 4302 v.Fatalf("aux in %s not implemented %#v", v, v.Aux) 4303 } 4304 } 4305 4306 // sizeAlignAuxInt returns an AuxInt encoding the size and alignment of type t. 4307 func sizeAlignAuxInt(t *Type) int64 { 4308 return ssa.MakeSizeAndAlign(t.Size(), t.Alignment()).Int64() 4309 } 4310 4311 // extendIndex extends v to a full int width. 4312 // panic using the given function if v does not fit in an int (only on 32-bit archs). 4313 func (s *state) extendIndex(v *ssa.Value, panicfn *Node) *ssa.Value { 4314 size := v.Type.Size() 4315 if size == s.config.IntSize { 4316 return v 4317 } 4318 if size > s.config.IntSize { 4319 // truncate 64-bit indexes on 32-bit pointer archs. Test the 4320 // high word and branch to out-of-bounds failure if it is not 0. 4321 if Debug['B'] == 0 { 4322 hi := s.newValue1(ssa.OpInt64Hi, Types[TUINT32], v) 4323 cmp := s.newValue2(ssa.OpEq32, Types[TBOOL], hi, s.constInt32(Types[TUINT32], 0)) 4324 s.check(cmp, panicfn) 4325 } 4326 return s.newValue1(ssa.OpTrunc64to32, Types[TINT], v) 4327 } 4328 4329 // Extend value to the required size 4330 var op ssa.Op 4331 if v.Type.IsSigned() { 4332 switch 10*size + s.config.IntSize { 4333 case 14: 4334 op = ssa.OpSignExt8to32 4335 case 18: 4336 op = ssa.OpSignExt8to64 4337 case 24: 4338 op = ssa.OpSignExt16to32 4339 case 28: 4340 op = ssa.OpSignExt16to64 4341 case 48: 4342 op = ssa.OpSignExt32to64 4343 default: 4344 s.Fatalf("bad signed index extension %s", v.Type) 4345 } 4346 } else { 4347 switch 10*size + s.config.IntSize { 4348 case 14: 4349 op = ssa.OpZeroExt8to32 4350 case 18: 4351 op = ssa.OpZeroExt8to64 4352 case 24: 4353 op = ssa.OpZeroExt16to32 4354 case 28: 4355 op = ssa.OpZeroExt16to64 4356 case 48: 4357 op = ssa.OpZeroExt32to64 4358 default: 4359 s.Fatalf("bad unsigned index extension %s", v.Type) 4360 } 4361 } 4362 return s.newValue1(op, Types[TINT], v) 4363 } 4364 4365 // CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values. 4366 // Called during ssaGenValue. 4367 func CheckLoweredPhi(v *ssa.Value) { 4368 if v.Op != ssa.OpPhi { 4369 v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString()) 4370 } 4371 if v.Type.IsMemory() { 4372 return 4373 } 4374 f := v.Block.Func 4375 loc := f.RegAlloc[v.ID] 4376 for _, a := range v.Args { 4377 if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead? 4378 v.Fatalf("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n%s\n", v, loc, a, aloc, v.Block.Func) 4379 } 4380 } 4381 } 4382 4383 // CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block. 4384 // The output of LoweredGetClosurePtr is generally hardwired to the correct register. 4385 // That register contains the closure pointer on closure entry. 4386 func CheckLoweredGetClosurePtr(v *ssa.Value) { 4387 entry := v.Block.Func.Entry 4388 if entry != v.Block || entry.Values[0] != v { 4389 Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v) 4390 } 4391 } 4392 4393 // KeepAlive marks the variable referenced by OpKeepAlive as live. 4394 // Called during ssaGenValue. 4395 func KeepAlive(v *ssa.Value) { 4396 if v.Op != ssa.OpKeepAlive { 4397 v.Fatalf("KeepAlive called with non-KeepAlive value: %v", v.LongString()) 4398 } 4399 if !v.Args[0].Type.IsPtrShaped() { 4400 v.Fatalf("keeping non-pointer alive %v", v.Args[0]) 4401 } 4402 n, off := AutoVar(v.Args[0]) 4403 if n == nil { 4404 v.Fatalf("KeepAlive with non-spilled value %s %s", v, v.Args[0]) 4405 } 4406 if off != 0 { 4407 v.Fatalf("KeepAlive with non-zero offset spill location %v:%d", n, off) 4408 } 4409 Gvarlive(n) 4410 } 4411 4412 // AutoVar returns a *Node and int64 representing the auto variable and offset within it 4413 // where v should be spilled. 4414 func AutoVar(v *ssa.Value) (*Node, int64) { 4415 loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot) 4416 if v.Type.Size() > loc.Type.Size() { 4417 v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) 4418 } 4419 return loc.N.(*Node), loc.Off 4420 } 4421 4422 func AddrAuto(a *obj.Addr, v *ssa.Value) { 4423 n, off := AutoVar(v) 4424 a.Type = obj.TYPE_MEM 4425 a.Node = n 4426 a.Sym = Linksym(n.Sym) 4427 a.Offset = n.Xoffset + off 4428 if n.Class == PPARAM || n.Class == PPARAMOUT { 4429 a.Name = obj.NAME_PARAM 4430 } else { 4431 a.Name = obj.NAME_AUTO 4432 } 4433 } 4434 4435 func (s *SSAGenState) AddrScratch(a *obj.Addr) { 4436 if s.ScratchFpMem == nil { 4437 panic("no scratch memory available; forgot to declare usesScratch for Op?") 4438 } 4439 a.Type = obj.TYPE_MEM 4440 a.Name = obj.NAME_AUTO 4441 a.Node = s.ScratchFpMem 4442 a.Sym = Linksym(s.ScratchFpMem.Sym) 4443 a.Reg = int16(Thearch.REGSP) 4444 a.Offset = s.ScratchFpMem.Xoffset 4445 } 4446 4447 // fieldIdx finds the index of the field referred to by the ODOT node n. 4448 func fieldIdx(n *Node) int { 4449 t := n.Left.Type 4450 f := n.Sym 4451 if !t.IsStruct() { 4452 panic("ODOT's LHS is not a struct") 4453 } 4454 4455 var i int 4456 for _, t1 := range t.Fields().Slice() { 4457 if t1.Sym != f { 4458 i++ 4459 continue 4460 } 4461 if t1.Offset != n.Xoffset { 4462 panic("field offset doesn't match") 4463 } 4464 return i 4465 } 4466 panic(fmt.Sprintf("can't find field in expr %v\n", n)) 4467 4468 // TODO: keep the result of this function somewhere in the ODOT Node 4469 // so we don't have to recompute it each time we need it. 4470 } 4471 4472 // ssaExport exports a bunch of compiler services for the ssa backend. 4473 type ssaExport struct { 4474 log bool 4475 } 4476 4477 func (s *ssaExport) TypeBool() ssa.Type { return Types[TBOOL] } 4478 func (s *ssaExport) TypeInt8() ssa.Type { return Types[TINT8] } 4479 func (s *ssaExport) TypeInt16() ssa.Type { return Types[TINT16] } 4480 func (s *ssaExport) TypeInt32() ssa.Type { return Types[TINT32] } 4481 func (s *ssaExport) TypeInt64() ssa.Type { return Types[TINT64] } 4482 func (s *ssaExport) TypeUInt8() ssa.Type { return Types[TUINT8] } 4483 func (s *ssaExport) TypeUInt16() ssa.Type { return Types[TUINT16] } 4484 func (s *ssaExport) TypeUInt32() ssa.Type { return Types[TUINT32] } 4485 func (s *ssaExport) TypeUInt64() ssa.Type { return Types[TUINT64] } 4486 func (s *ssaExport) TypeFloat32() ssa.Type { return Types[TFLOAT32] } 4487 func (s *ssaExport) TypeFloat64() ssa.Type { return Types[TFLOAT64] } 4488 func (s *ssaExport) TypeInt() ssa.Type { return Types[TINT] } 4489 func (s *ssaExport) TypeUintptr() ssa.Type { return Types[TUINTPTR] } 4490 func (s *ssaExport) TypeString() ssa.Type { return Types[TSTRING] } 4491 func (s *ssaExport) TypeBytePtr() ssa.Type { return ptrto(Types[TUINT8]) } 4492 4493 // StringData returns a symbol (a *Sym wrapped in an interface) which 4494 // is the data component of a global string constant containing s. 4495 func (*ssaExport) StringData(s string) interface{} { 4496 // TODO: is idealstring correct? It might not matter... 4497 data := stringsym(s) 4498 return &ssa.ExternSymbol{Typ: idealstring, Sym: data} 4499 } 4500 4501 func (e *ssaExport) Auto(t ssa.Type) ssa.GCNode { 4502 n := temp(t.(*Type)) // Note: adds new auto to Curfn.Func.Dcl list 4503 return n 4504 } 4505 4506 func (e *ssaExport) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4507 n := name.N.(*Node) 4508 ptrType := ptrto(Types[TUINT8]) 4509 lenType := Types[TINT] 4510 if n.Class == PAUTO && !n.Addrtaken { 4511 // Split this string up into two separate variables. 4512 p := e.namedAuto(n.Sym.Name+".ptr", ptrType) 4513 l := e.namedAuto(n.Sym.Name+".len", lenType) 4514 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0} 4515 } 4516 // Return the two parts of the larger variable. 4517 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)} 4518 } 4519 4520 func (e *ssaExport) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4521 n := name.N.(*Node) 4522 t := ptrto(Types[TUINT8]) 4523 if n.Class == PAUTO && !n.Addrtaken { 4524 // Split this interface up into two separate variables. 4525 f := ".itab" 4526 if n.Type.IsEmptyInterface() { 4527 f = ".type" 4528 } 4529 c := e.namedAuto(n.Sym.Name+f, t) 4530 d := e.namedAuto(n.Sym.Name+".data", t) 4531 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0} 4532 } 4533 // Return the two parts of the larger variable. 4534 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)} 4535 } 4536 4537 func (e *ssaExport) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) { 4538 n := name.N.(*Node) 4539 ptrType := ptrto(name.Type.ElemType().(*Type)) 4540 lenType := Types[TINT] 4541 if n.Class == PAUTO && !n.Addrtaken { 4542 // Split this slice up into three separate variables. 4543 p := e.namedAuto(n.Sym.Name+".ptr", ptrType) 4544 l := e.namedAuto(n.Sym.Name+".len", lenType) 4545 c := e.namedAuto(n.Sym.Name+".cap", lenType) 4546 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}, ssa.LocalSlot{N: c, Type: lenType, Off: 0} 4547 } 4548 // Return the three parts of the larger variable. 4549 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, 4550 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}, 4551 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)} 4552 } 4553 4554 func (e *ssaExport) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4555 n := name.N.(*Node) 4556 s := name.Type.Size() / 2 4557 var t *Type 4558 if s == 8 { 4559 t = Types[TFLOAT64] 4560 } else { 4561 t = Types[TFLOAT32] 4562 } 4563 if n.Class == PAUTO && !n.Addrtaken { 4564 // Split this complex up into two separate variables. 4565 c := e.namedAuto(n.Sym.Name+".real", t) 4566 d := e.namedAuto(n.Sym.Name+".imag", t) 4567 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0} 4568 } 4569 // Return the two parts of the larger variable. 4570 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s} 4571 } 4572 4573 func (e *ssaExport) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4574 n := name.N.(*Node) 4575 var t *Type 4576 if name.Type.IsSigned() { 4577 t = Types[TINT32] 4578 } else { 4579 t = Types[TUINT32] 4580 } 4581 if n.Class == PAUTO && !n.Addrtaken { 4582 // Split this int64 up into two separate variables. 4583 h := e.namedAuto(n.Sym.Name+".hi", t) 4584 l := e.namedAuto(n.Sym.Name+".lo", Types[TUINT32]) 4585 return ssa.LocalSlot{N: h, Type: t, Off: 0}, ssa.LocalSlot{N: l, Type: Types[TUINT32], Off: 0} 4586 } 4587 // Return the two parts of the larger variable. 4588 // Assuming little endian (we don't support big endian 32-bit architecture yet) 4589 return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: Types[TUINT32], Off: name.Off} 4590 } 4591 4592 func (e *ssaExport) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot { 4593 n := name.N.(*Node) 4594 st := name.Type 4595 ft := st.FieldType(i) 4596 if n.Class == PAUTO && !n.Addrtaken { 4597 // Note: the _ field may appear several times. But 4598 // have no fear, identically-named but distinct Autos are 4599 // ok, albeit maybe confusing for a debugger. 4600 x := e.namedAuto(n.Sym.Name+"."+st.FieldName(i), ft) 4601 return ssa.LocalSlot{N: x, Type: ft, Off: 0} 4602 } 4603 return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)} 4604 } 4605 4606 func (e *ssaExport) SplitArray(name ssa.LocalSlot) ssa.LocalSlot { 4607 n := name.N.(*Node) 4608 at := name.Type 4609 if at.NumElem() != 1 { 4610 Fatalf("bad array size") 4611 } 4612 et := at.ElemType() 4613 if n.Class == PAUTO && !n.Addrtaken { 4614 x := e.namedAuto(n.Sym.Name+"[0]", et) 4615 return ssa.LocalSlot{N: x, Type: et, Off: 0} 4616 } 4617 return ssa.LocalSlot{N: n, Type: et, Off: name.Off} 4618 } 4619 4620 // namedAuto returns a new AUTO variable with the given name and type. 4621 // These are exposed to the debugger. 4622 func (e *ssaExport) namedAuto(name string, typ ssa.Type) ssa.GCNode { 4623 t := typ.(*Type) 4624 s := &Sym{Name: name, Pkg: localpkg} 4625 n := nod(ONAME, nil, nil) 4626 s.Def = n 4627 s.Def.Used = true 4628 n.Sym = s 4629 n.Type = t 4630 n.Class = PAUTO 4631 n.Addable = true 4632 n.Ullman = 1 4633 n.Esc = EscNever 4634 n.Xoffset = 0 4635 n.Name.Curfn = Curfn 4636 Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) 4637 4638 dowidth(t) 4639 return n 4640 } 4641 4642 func (e *ssaExport) CanSSA(t ssa.Type) bool { 4643 return canSSAType(t.(*Type)) 4644 } 4645 4646 func (e *ssaExport) Line(line int32) string { 4647 return linestr(line) 4648 } 4649 4650 // Log logs a message from the compiler. 4651 func (e *ssaExport) Logf(msg string, args ...interface{}) { 4652 if e.log { 4653 fmt.Printf(msg, args...) 4654 } 4655 } 4656 4657 func (e *ssaExport) Log() bool { 4658 return e.log 4659 } 4660 4661 // Fatal reports a compiler error and exits. 4662 func (e *ssaExport) Fatalf(line int32, msg string, args ...interface{}) { 4663 lineno = line 4664 Fatalf(msg, args...) 4665 } 4666 4667 // Warnl reports a "warning", which is usually flag-triggered 4668 // logging output for the benefit of tests. 4669 func (e *ssaExport) Warnl(line int32, fmt_ string, args ...interface{}) { 4670 Warnl(line, fmt_, args...) 4671 } 4672 4673 func (e *ssaExport) Debug_checknil() bool { 4674 return Debug_checknil != 0 4675 } 4676 4677 func (e *ssaExport) Debug_wb() bool { 4678 return Debug_wb != 0 4679 } 4680 4681 func (e *ssaExport) Syslook(name string) interface{} { 4682 return syslook(name).Sym 4683 } 4684 4685 func (n *Node) Typ() ssa.Type { 4686 return n.Type 4687 }