github.com/tidwall/go@v0.0.0-20170415222209-6694a6888b7d/src/cmd/compile/internal/gc/ssa.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "bytes" 9 "encoding/binary" 10 "fmt" 11 "html" 12 "os" 13 "sort" 14 15 "cmd/compile/internal/ssa" 16 "cmd/compile/internal/types" 17 "cmd/internal/obj" 18 "cmd/internal/src" 19 "cmd/internal/sys" 20 ) 21 22 var ssaConfig *ssa.Config 23 var ssaCache *ssa.Cache 24 25 func initssaconfig() { 26 types_ := ssa.Types{ 27 Bool: types.Types[TBOOL], 28 Int8: types.Types[TINT8], 29 Int16: types.Types[TINT16], 30 Int32: types.Types[TINT32], 31 Int64: types.Types[TINT64], 32 UInt8: types.Types[TUINT8], 33 UInt16: types.Types[TUINT16], 34 UInt32: types.Types[TUINT32], 35 UInt64: types.Types[TUINT64], 36 Float32: types.Types[TFLOAT32], 37 Float64: types.Types[TFLOAT64], 38 Int: types.Types[TINT], 39 Uintptr: types.Types[TUINTPTR], 40 String: types.Types[TSTRING], 41 BytePtr: types.NewPtr(types.Types[TUINT8]), 42 Int32Ptr: types.NewPtr(types.Types[TINT32]), 43 UInt32Ptr: types.NewPtr(types.Types[TUINT32]), 44 IntPtr: types.NewPtr(types.Types[TINT]), 45 UintptrPtr: types.NewPtr(types.Types[TUINTPTR]), 46 Float32Ptr: types.NewPtr(types.Types[TFLOAT32]), 47 Float64Ptr: types.NewPtr(types.Types[TFLOAT64]), 48 BytePtrPtr: types.NewPtr(types.NewPtr(types.Types[TUINT8])), 49 } 50 // Generate a few pointer types that are uncommon in the frontend but common in the backend. 51 // Caching is disabled in the backend, so generating these here avoids allocations. 52 _ = types.NewPtr(types.Types[TINTER]) // *interface{} 53 _ = types.NewPtr(types.NewPtr(types.Types[TSTRING])) // **string 54 _ = types.NewPtr(types.NewPtr(types.Idealstring)) // **string 55 _ = types.NewPtr(types.NewSlice(types.Types[TINTER])) // *[]interface{} 56 _ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte 57 _ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte 58 _ = types.NewPtr(types.NewSlice(types.Types[TSTRING])) // *[]string 59 _ = types.NewPtr(types.NewSlice(types.Idealstring)) // *[]string 60 _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8 61 _ = types.NewPtr(types.Types[TINT16]) // *int16 62 _ = types.NewPtr(types.Types[TINT64]) // *int64 63 _ = types.NewPtr(types.Errortype) // *error 64 types.NewPtrCacheEnabled = false 65 ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, types_, Ctxt, Debug['N'] == 0) 66 if thearch.LinkArch.Name == "386" { 67 ssaConfig.Set387(thearch.Use387) 68 } 69 ssaCache = new(ssa.Cache) 70 71 // Set up some runtime functions we'll need to call. 72 Newproc = Sysfunc("newproc") 73 Deferproc = Sysfunc("deferproc") 74 Deferreturn = Sysfunc("deferreturn") 75 Duffcopy = Sysfunc("duffcopy") 76 Duffzero = Sysfunc("duffzero") 77 panicindex = Sysfunc("panicindex") 78 panicslice = Sysfunc("panicslice") 79 panicdivide = Sysfunc("panicdivide") 80 growslice = Sysfunc("growslice") 81 panicdottypeE = Sysfunc("panicdottypeE") 82 panicdottypeI = Sysfunc("panicdottypeI") 83 panicnildottype = Sysfunc("panicnildottype") 84 assertE2I = Sysfunc("assertE2I") 85 assertE2I2 = Sysfunc("assertE2I2") 86 assertI2I = Sysfunc("assertI2I") 87 assertI2I2 = Sysfunc("assertI2I2") 88 } 89 90 // buildssa builds an SSA function. 91 func buildssa(fn *Node) *ssa.Func { 92 name := fn.Func.Nname.Sym.Name 93 printssa := name == os.Getenv("GOSSAFUNC") 94 if printssa { 95 fmt.Println("generating SSA for", name) 96 dumplist("buildssa-enter", fn.Func.Enter) 97 dumplist("buildssa-body", fn.Nbody) 98 dumplist("buildssa-exit", fn.Func.Exit) 99 } 100 101 var s state 102 s.pushLine(fn.Pos) 103 defer s.popLine() 104 105 s.hasdefer = fn.Func.HasDefer() 106 if fn.Func.Pragma&CgoUnsafeArgs != 0 { 107 s.cgoUnsafeArgs = true 108 } 109 110 fe := ssafn{ 111 curfn: fn, 112 log: printssa, 113 } 114 s.curfn = fn 115 116 s.f = ssa.NewFunc(&fe) 117 s.config = ssaConfig 118 s.f.Config = ssaConfig 119 s.f.Cache = ssaCache 120 s.f.Cache.Reset() 121 s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH", name) 122 s.f.Name = name 123 if fn.Func.Pragma&Nosplit != 0 { 124 s.f.NoSplit = true 125 } 126 defer func() { 127 if s.f.WBPos.IsKnown() { 128 fn.Func.WBPos = s.f.WBPos 129 } 130 }() 131 s.exitCode = fn.Func.Exit 132 s.panics = map[funcLine]*ssa.Block{} 133 134 if name == os.Getenv("GOSSAFUNC") { 135 s.f.HTMLWriter = ssa.NewHTMLWriter("ssa.html", s.f.Frontend(), name) 136 // TODO: generate and print a mapping from nodes to values and blocks 137 } 138 139 // Allocate starting block 140 s.f.Entry = s.f.NewBlock(ssa.BlockPlain) 141 142 // Allocate starting values 143 s.labels = map[string]*ssaLabel{} 144 s.labeledNodes = map[*Node]*ssaLabel{} 145 s.fwdVars = map[*Node]*ssa.Value{} 146 s.startmem = s.entryNewValue0(ssa.OpInitMem, ssa.TypeMem) 147 s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead 148 s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR]) 149 150 s.startBlock(s.f.Entry) 151 s.vars[&memVar] = s.startmem 152 153 s.varsyms = map[*Node]interface{}{} 154 155 // Generate addresses of local declarations 156 s.decladdrs = map[*Node]*ssa.Value{} 157 for _, n := range fn.Func.Dcl { 158 switch n.Class { 159 case PPARAM, PPARAMOUT: 160 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n}) 161 s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), aux, s.sp) 162 if n.Class == PPARAMOUT && s.canSSA(n) { 163 // Save ssa-able PPARAMOUT variables so we can 164 // store them back to the stack at the end of 165 // the function. 166 s.returns = append(s.returns, n) 167 } 168 case PAUTO: 169 // processed at each use, to prevent Addr coming 170 // before the decl. 171 case PAUTOHEAP: 172 // moved to heap - already handled by frontend 173 case PFUNC: 174 // local function - already handled by frontend 175 default: 176 s.Fatalf("local variable with class %s unimplemented", classnames[n.Class]) 177 } 178 } 179 180 // Populate SSAable arguments. 181 for _, n := range fn.Func.Dcl { 182 if n.Class == PPARAM && s.canSSA(n) { 183 s.vars[n] = s.newValue0A(ssa.OpArg, n.Type, n) 184 } 185 } 186 187 // Convert the AST-based IR to the SSA-based IR 188 s.stmtList(fn.Func.Enter) 189 s.stmtList(fn.Nbody) 190 191 // fallthrough to exit 192 if s.curBlock != nil { 193 s.pushLine(fn.Func.Endlineno) 194 s.exit() 195 s.popLine() 196 } 197 198 s.insertPhis() 199 200 // Don't carry reference this around longer than necessary 201 s.exitCode = Nodes{} 202 203 // Main call to ssa package to compile function 204 ssa.Compile(s.f) 205 return s.f 206 } 207 208 type state struct { 209 // configuration (arch) information 210 config *ssa.Config 211 212 // function we're building 213 f *ssa.Func 214 215 // Node for function 216 curfn *Node 217 218 // labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f 219 labels map[string]*ssaLabel 220 labeledNodes map[*Node]*ssaLabel 221 222 // Code that must precede any return 223 // (e.g., copying value of heap-escaped paramout back to true paramout) 224 exitCode Nodes 225 226 // unlabeled break and continue statement tracking 227 breakTo *ssa.Block // current target for plain break statement 228 continueTo *ssa.Block // current target for plain continue statement 229 230 // current location where we're interpreting the AST 231 curBlock *ssa.Block 232 233 // variable assignments in the current block (map from variable symbol to ssa value) 234 // *Node is the unique identifier (an ONAME Node) for the variable. 235 // TODO: keep a single varnum map, then make all of these maps slices instead? 236 vars map[*Node]*ssa.Value 237 238 // fwdVars are variables that are used before they are defined in the current block. 239 // This map exists just to coalesce multiple references into a single FwdRef op. 240 // *Node is the unique identifier (an ONAME Node) for the variable. 241 fwdVars map[*Node]*ssa.Value 242 243 // all defined variables at the end of each block. Indexed by block ID. 244 defvars []map[*Node]*ssa.Value 245 246 // addresses of PPARAM and PPARAMOUT variables. 247 decladdrs map[*Node]*ssa.Value 248 249 // symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused. 250 varsyms map[*Node]interface{} 251 252 // starting values. Memory, stack pointer, and globals pointer 253 startmem *ssa.Value 254 sp *ssa.Value 255 sb *ssa.Value 256 257 // line number stack. The current line number is top of stack 258 line []src.XPos 259 260 // list of panic calls by function name and line number. 261 // Used to deduplicate panic calls. 262 panics map[funcLine]*ssa.Block 263 264 // list of PPARAMOUT (return) variables. 265 returns []*Node 266 267 cgoUnsafeArgs bool 268 hasdefer bool // whether the function contains a defer statement 269 } 270 271 type funcLine struct { 272 f *obj.LSym 273 line src.XPos 274 } 275 276 type ssaLabel struct { 277 target *ssa.Block // block identified by this label 278 breakTarget *ssa.Block // block to break to in control flow node identified by this label 279 continueTarget *ssa.Block // block to continue to in control flow node identified by this label 280 } 281 282 // label returns the label associated with sym, creating it if necessary. 283 func (s *state) label(sym *types.Sym) *ssaLabel { 284 lab := s.labels[sym.Name] 285 if lab == nil { 286 lab = new(ssaLabel) 287 s.labels[sym.Name] = lab 288 } 289 return lab 290 } 291 292 func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) } 293 func (s *state) Log() bool { return s.f.Log() } 294 func (s *state) Fatalf(msg string, args ...interface{}) { 295 s.f.Frontend().Fatalf(s.peekPos(), msg, args...) 296 } 297 func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) } 298 func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() } 299 300 var ( 301 // dummy node for the memory variable 302 memVar = Node{Op: ONAME, Class: Pxxx, Sym: &types.Sym{Name: "mem"}} 303 304 // dummy nodes for temporary variables 305 ptrVar = Node{Op: ONAME, Class: Pxxx, Sym: &types.Sym{Name: "ptr"}} 306 lenVar = Node{Op: ONAME, Class: Pxxx, Sym: &types.Sym{Name: "len"}} 307 newlenVar = Node{Op: ONAME, Class: Pxxx, Sym: &types.Sym{Name: "newlen"}} 308 capVar = Node{Op: ONAME, Class: Pxxx, Sym: &types.Sym{Name: "cap"}} 309 typVar = Node{Op: ONAME, Class: Pxxx, Sym: &types.Sym{Name: "typ"}} 310 okVar = Node{Op: ONAME, Class: Pxxx, Sym: &types.Sym{Name: "ok"}} 311 ) 312 313 // startBlock sets the current block we're generating code in to b. 314 func (s *state) startBlock(b *ssa.Block) { 315 if s.curBlock != nil { 316 s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock) 317 } 318 s.curBlock = b 319 s.vars = map[*Node]*ssa.Value{} 320 for n := range s.fwdVars { 321 delete(s.fwdVars, n) 322 } 323 } 324 325 // endBlock marks the end of generating code for the current block. 326 // Returns the (former) current block. Returns nil if there is no current 327 // block, i.e. if no code flows to the current execution point. 328 func (s *state) endBlock() *ssa.Block { 329 b := s.curBlock 330 if b == nil { 331 return nil 332 } 333 for len(s.defvars) <= int(b.ID) { 334 s.defvars = append(s.defvars, nil) 335 } 336 s.defvars[b.ID] = s.vars 337 s.curBlock = nil 338 s.vars = nil 339 b.Pos = s.peekPos() 340 return b 341 } 342 343 // pushLine pushes a line number on the line number stack. 344 func (s *state) pushLine(line src.XPos) { 345 if !line.IsKnown() { 346 // the frontend may emit node with line number missing, 347 // use the parent line number in this case. 348 line = s.peekPos() 349 if Debug['K'] != 0 { 350 Warn("buildssa: unknown position (line 0)") 351 } 352 } 353 s.line = append(s.line, line) 354 } 355 356 // popLine pops the top of the line number stack. 357 func (s *state) popLine() { 358 s.line = s.line[:len(s.line)-1] 359 } 360 361 // peekPos peeks the top of the line number stack. 362 func (s *state) peekPos() src.XPos { 363 return s.line[len(s.line)-1] 364 } 365 366 // newValue0 adds a new value with no arguments to the current block. 367 func (s *state) newValue0(op ssa.Op, t ssa.Type) *ssa.Value { 368 return s.curBlock.NewValue0(s.peekPos(), op, t) 369 } 370 371 // newValue0A adds a new value with no arguments and an aux value to the current block. 372 func (s *state) newValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value { 373 return s.curBlock.NewValue0A(s.peekPos(), op, t, aux) 374 } 375 376 // newValue0I adds a new value with no arguments and an auxint value to the current block. 377 func (s *state) newValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value { 378 return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint) 379 } 380 381 // newValue1 adds a new value with one argument to the current block. 382 func (s *state) newValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value { 383 return s.curBlock.NewValue1(s.peekPos(), op, t, arg) 384 } 385 386 // newValue1A adds a new value with one argument and an aux value to the current block. 387 func (s *state) newValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 388 return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg) 389 } 390 391 // newValue1I adds a new value with one argument and an auxint value to the current block. 392 func (s *state) newValue1I(op ssa.Op, t ssa.Type, aux int64, arg *ssa.Value) *ssa.Value { 393 return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg) 394 } 395 396 // newValue2 adds a new value with two arguments to the current block. 397 func (s *state) newValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value { 398 return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1) 399 } 400 401 // newValue2I adds a new value with two arguments and an auxint value to the current block. 402 func (s *state) newValue2I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value { 403 return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1) 404 } 405 406 // newValue3 adds a new value with three arguments to the current block. 407 func (s *state) newValue3(op ssa.Op, t ssa.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 408 return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2) 409 } 410 411 // newValue3I adds a new value with three arguments and an auxint value to the current block. 412 func (s *state) newValue3I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 413 return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2) 414 } 415 416 // newValue3A adds a new value with three arguments and an aux value to the current block. 417 func (s *state) newValue3A(op ssa.Op, t ssa.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 418 return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2) 419 } 420 421 // newValue4 adds a new value with four arguments to the current block. 422 func (s *state) newValue4(op ssa.Op, t ssa.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value { 423 return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3) 424 } 425 426 // entryNewValue0 adds a new value with no arguments to the entry block. 427 func (s *state) entryNewValue0(op ssa.Op, t ssa.Type) *ssa.Value { 428 return s.f.Entry.NewValue0(s.peekPos(), op, t) 429 } 430 431 // entryNewValue0A adds a new value with no arguments and an aux value to the entry block. 432 func (s *state) entryNewValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value { 433 return s.f.Entry.NewValue0A(s.peekPos(), op, t, aux) 434 } 435 436 // entryNewValue0I adds a new value with no arguments and an auxint value to the entry block. 437 func (s *state) entryNewValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value { 438 return s.f.Entry.NewValue0I(s.peekPos(), op, t, auxint) 439 } 440 441 // entryNewValue1 adds a new value with one argument to the entry block. 442 func (s *state) entryNewValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value { 443 return s.f.Entry.NewValue1(s.peekPos(), op, t, arg) 444 } 445 446 // entryNewValue1 adds a new value with one argument and an auxint value to the entry block. 447 func (s *state) entryNewValue1I(op ssa.Op, t ssa.Type, auxint int64, arg *ssa.Value) *ssa.Value { 448 return s.f.Entry.NewValue1I(s.peekPos(), op, t, auxint, arg) 449 } 450 451 // entryNewValue1A adds a new value with one argument and an aux value to the entry block. 452 func (s *state) entryNewValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 453 return s.f.Entry.NewValue1A(s.peekPos(), op, t, aux, arg) 454 } 455 456 // entryNewValue2 adds a new value with two arguments to the entry block. 457 func (s *state) entryNewValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value { 458 return s.f.Entry.NewValue2(s.peekPos(), op, t, arg0, arg1) 459 } 460 461 // const* routines add a new const value to the entry block. 462 func (s *state) constSlice(t ssa.Type) *ssa.Value { return s.f.ConstSlice(s.peekPos(), t) } 463 func (s *state) constInterface(t ssa.Type) *ssa.Value { return s.f.ConstInterface(s.peekPos(), t) } 464 func (s *state) constNil(t ssa.Type) *ssa.Value { return s.f.ConstNil(s.peekPos(), t) } 465 func (s *state) constEmptyString(t ssa.Type) *ssa.Value { return s.f.ConstEmptyString(s.peekPos(), t) } 466 func (s *state) constBool(c bool) *ssa.Value { 467 return s.f.ConstBool(s.peekPos(), types.Types[TBOOL], c) 468 } 469 func (s *state) constInt8(t ssa.Type, c int8) *ssa.Value { 470 return s.f.ConstInt8(s.peekPos(), t, c) 471 } 472 func (s *state) constInt16(t ssa.Type, c int16) *ssa.Value { 473 return s.f.ConstInt16(s.peekPos(), t, c) 474 } 475 func (s *state) constInt32(t ssa.Type, c int32) *ssa.Value { 476 return s.f.ConstInt32(s.peekPos(), t, c) 477 } 478 func (s *state) constInt64(t ssa.Type, c int64) *ssa.Value { 479 return s.f.ConstInt64(s.peekPos(), t, c) 480 } 481 func (s *state) constFloat32(t ssa.Type, c float64) *ssa.Value { 482 return s.f.ConstFloat32(s.peekPos(), t, c) 483 } 484 func (s *state) constFloat64(t ssa.Type, c float64) *ssa.Value { 485 return s.f.ConstFloat64(s.peekPos(), t, c) 486 } 487 func (s *state) constInt(t ssa.Type, c int64) *ssa.Value { 488 if s.config.IntSize == 8 { 489 return s.constInt64(t, c) 490 } 491 if int64(int32(c)) != c { 492 s.Fatalf("integer constant too big %d", c) 493 } 494 return s.constInt32(t, int32(c)) 495 } 496 func (s *state) constOffPtrSP(t ssa.Type, c int64) *ssa.Value { 497 return s.f.ConstOffPtrSP(s.peekPos(), t, c, s.sp) 498 } 499 500 // stmtList converts the statement list n to SSA and adds it to s. 501 func (s *state) stmtList(l Nodes) { 502 for _, n := range l.Slice() { 503 s.stmt(n) 504 } 505 } 506 507 // stmt converts the statement n to SSA and adds it to s. 508 func (s *state) stmt(n *Node) { 509 s.pushLine(n.Pos) 510 defer s.popLine() 511 512 // If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere), 513 // then this code is dead. Stop here. 514 if s.curBlock == nil && n.Op != OLABEL { 515 return 516 } 517 518 s.stmtList(n.Ninit) 519 switch n.Op { 520 521 case OBLOCK: 522 s.stmtList(n.List) 523 524 // No-ops 525 case OEMPTY, ODCLCONST, ODCLTYPE, OFALL: 526 527 // Expression statements 528 case OCALLFUNC: 529 if isIntrinsicCall(n) { 530 s.intrinsicCall(n) 531 return 532 } 533 fallthrough 534 535 case OCALLMETH, OCALLINTER: 536 s.call(n, callNormal) 537 if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class == PFUNC { 538 if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" || 539 n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block") { 540 m := s.mem() 541 b := s.endBlock() 542 b.Kind = ssa.BlockExit 543 b.SetControl(m) 544 // TODO: never rewrite OPANIC to OCALLFUNC in the 545 // first place. Need to wait until all backends 546 // go through SSA. 547 } 548 } 549 case ODEFER: 550 s.call(n.Left, callDefer) 551 case OPROC: 552 s.call(n.Left, callGo) 553 554 case OAS2DOTTYPE: 555 res, resok := s.dottype(n.Rlist.First(), true) 556 deref := false 557 if !canSSAType(n.Rlist.First().Type) { 558 if res.Op != ssa.OpLoad { 559 s.Fatalf("dottype of non-load") 560 } 561 mem := s.mem() 562 if mem.Op == ssa.OpVarKill { 563 mem = mem.Args[0] 564 } 565 if res.Args[1] != mem { 566 s.Fatalf("memory no longer live from 2-result dottype load") 567 } 568 deref = true 569 res = res.Args[0] 570 } 571 s.assign(n.List.First(), res, deref, 0) 572 s.assign(n.List.Second(), resok, false, 0) 573 return 574 575 case OAS2FUNC: 576 // We come here only when it is an intrinsic call returning two values. 577 if !isIntrinsicCall(n.Rlist.First()) { 578 s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Rlist.First()) 579 } 580 v := s.intrinsicCall(n.Rlist.First()) 581 v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v) 582 v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v) 583 s.assign(n.List.First(), v1, false, 0) 584 s.assign(n.List.Second(), v2, false, 0) 585 return 586 587 case ODCL: 588 if n.Left.Class == PAUTOHEAP { 589 Fatalf("DCL %v", n) 590 } 591 592 case OLABEL: 593 sym := n.Left.Sym 594 lab := s.label(sym) 595 596 // Associate label with its control flow node, if any 597 if ctl := n.labeledControl(); ctl != nil { 598 s.labeledNodes[ctl] = lab 599 } 600 601 // The label might already have a target block via a goto. 602 if lab.target == nil { 603 lab.target = s.f.NewBlock(ssa.BlockPlain) 604 } 605 606 // Go to that label. 607 // (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.) 608 if s.curBlock != nil { 609 b := s.endBlock() 610 b.AddEdgeTo(lab.target) 611 } 612 s.startBlock(lab.target) 613 614 case OGOTO: 615 sym := n.Left.Sym 616 617 lab := s.label(sym) 618 if lab.target == nil { 619 lab.target = s.f.NewBlock(ssa.BlockPlain) 620 } 621 622 b := s.endBlock() 623 b.AddEdgeTo(lab.target) 624 625 case OAS: 626 if n.Left == n.Right && n.Left.Op == ONAME { 627 // An x=x assignment. No point in doing anything 628 // here. In addition, skipping this assignment 629 // prevents generating: 630 // VARDEF x 631 // COPY x -> x 632 // which is bad because x is incorrectly considered 633 // dead before the vardef. See issue #14904. 634 return 635 } 636 637 var t *types.Type 638 if n.Right != nil { 639 t = n.Right.Type 640 } else { 641 t = n.Left.Type 642 } 643 644 // Evaluate RHS. 645 rhs := n.Right 646 if rhs != nil { 647 switch rhs.Op { 648 case OSTRUCTLIT, OARRAYLIT, OSLICELIT: 649 // All literals with nonzero fields have already been 650 // rewritten during walk. Any that remain are just T{} 651 // or equivalents. Use the zero value. 652 if !iszero(rhs) { 653 Fatalf("literal with nonzero value in SSA: %v", rhs) 654 } 655 rhs = nil 656 case OAPPEND: 657 // If we're writing the result of an append back to the same slice, 658 // handle it specially to avoid write barriers on the fast (non-growth) path. 659 // If the slice can be SSA'd, it'll be on the stack, 660 // so there will be no write barriers, 661 // so there's no need to attempt to prevent them. 662 if samesafeexpr(n.Left, rhs.List.First()) { 663 if !s.canSSA(n.Left) { 664 if Debug_append > 0 { 665 Warnl(n.Pos, "append: len-only update") 666 } 667 s.append(rhs, true) 668 return 669 } else { 670 if Debug_append > 0 { // replicating old diagnostic message 671 Warnl(n.Pos, "append: len-only update (in local slice)") 672 } 673 } 674 } 675 } 676 } 677 var r *ssa.Value 678 deref := !canSSAType(t) 679 if deref { 680 if rhs == nil { 681 r = nil // Signal assign to use OpZero. 682 } else { 683 r = s.addr(rhs, false) 684 } 685 } else { 686 if rhs == nil { 687 r = s.zeroVal(t) 688 } else { 689 r = s.expr(rhs) 690 } 691 } 692 693 var skip skipMask 694 if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) { 695 // We're assigning a slicing operation back to its source. 696 // Don't write back fields we aren't changing. See issue #14855. 697 i, j, k := rhs.SliceBounds() 698 if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) { 699 // [0:...] is the same as [:...] 700 i = nil 701 } 702 // TODO: detect defaults for len/cap also. 703 // Currently doesn't really work because (*p)[:len(*p)] appears here as: 704 // tmp = len(*p) 705 // (*p)[:tmp] 706 //if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) { 707 // j = nil 708 //} 709 //if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) { 710 // k = nil 711 //} 712 if i == nil { 713 skip |= skipPtr 714 if j == nil { 715 skip |= skipLen 716 } 717 if k == nil { 718 skip |= skipCap 719 } 720 } 721 } 722 723 s.assign(n.Left, r, deref, skip) 724 725 case OIF: 726 bThen := s.f.NewBlock(ssa.BlockPlain) 727 bEnd := s.f.NewBlock(ssa.BlockPlain) 728 var bElse *ssa.Block 729 if n.Rlist.Len() != 0 { 730 bElse = s.f.NewBlock(ssa.BlockPlain) 731 s.condBranch(n.Left, bThen, bElse, n.Likely) 732 } else { 733 s.condBranch(n.Left, bThen, bEnd, n.Likely) 734 } 735 736 s.startBlock(bThen) 737 s.stmtList(n.Nbody) 738 if b := s.endBlock(); b != nil { 739 b.AddEdgeTo(bEnd) 740 } 741 742 if n.Rlist.Len() != 0 { 743 s.startBlock(bElse) 744 s.stmtList(n.Rlist) 745 if b := s.endBlock(); b != nil { 746 b.AddEdgeTo(bEnd) 747 } 748 } 749 s.startBlock(bEnd) 750 751 case ORETURN: 752 s.stmtList(n.List) 753 s.exit() 754 case ORETJMP: 755 s.stmtList(n.List) 756 b := s.exit() 757 b.Kind = ssa.BlockRetJmp // override BlockRet 758 b.Aux = Linksym(n.Left.Sym) 759 760 case OCONTINUE, OBREAK: 761 var to *ssa.Block 762 if n.Left == nil { 763 // plain break/continue 764 switch n.Op { 765 case OCONTINUE: 766 to = s.continueTo 767 case OBREAK: 768 to = s.breakTo 769 } 770 } else { 771 // labeled break/continue; look up the target 772 sym := n.Left.Sym 773 lab := s.label(sym) 774 switch n.Op { 775 case OCONTINUE: 776 to = lab.continueTarget 777 case OBREAK: 778 to = lab.breakTarget 779 } 780 } 781 782 b := s.endBlock() 783 b.AddEdgeTo(to) 784 785 case OFOR, OFORUNTIL: 786 // OFOR: for Ninit; Left; Right { Nbody } 787 // For = cond; body; incr 788 // Foruntil = body; incr; cond 789 bCond := s.f.NewBlock(ssa.BlockPlain) 790 bBody := s.f.NewBlock(ssa.BlockPlain) 791 bIncr := s.f.NewBlock(ssa.BlockPlain) 792 bEnd := s.f.NewBlock(ssa.BlockPlain) 793 794 // first, jump to condition test (OFOR) or body (OFORUNTIL) 795 b := s.endBlock() 796 if n.Op == OFOR { 797 b.AddEdgeTo(bCond) 798 // generate code to test condition 799 s.startBlock(bCond) 800 if n.Left != nil { 801 s.condBranch(n.Left, bBody, bEnd, 1) 802 } else { 803 b := s.endBlock() 804 b.Kind = ssa.BlockPlain 805 b.AddEdgeTo(bBody) 806 } 807 808 } else { 809 b.AddEdgeTo(bBody) 810 } 811 812 // set up for continue/break in body 813 prevContinue := s.continueTo 814 prevBreak := s.breakTo 815 s.continueTo = bIncr 816 s.breakTo = bEnd 817 lab := s.labeledNodes[n] 818 if lab != nil { 819 // labeled for loop 820 lab.continueTarget = bIncr 821 lab.breakTarget = bEnd 822 } 823 824 // generate body 825 s.startBlock(bBody) 826 s.stmtList(n.Nbody) 827 828 // tear down continue/break 829 s.continueTo = prevContinue 830 s.breakTo = prevBreak 831 if lab != nil { 832 lab.continueTarget = nil 833 lab.breakTarget = nil 834 } 835 836 // done with body, goto incr 837 if b := s.endBlock(); b != nil { 838 b.AddEdgeTo(bIncr) 839 } 840 841 // generate incr 842 s.startBlock(bIncr) 843 if n.Right != nil { 844 s.stmt(n.Right) 845 } 846 if b := s.endBlock(); b != nil { 847 b.AddEdgeTo(bCond) 848 } 849 850 if n.Op == OFORUNTIL { 851 // generate code to test condition 852 s.startBlock(bCond) 853 if n.Left != nil { 854 s.condBranch(n.Left, bBody, bEnd, 1) 855 } else { 856 b := s.endBlock() 857 b.Kind = ssa.BlockPlain 858 b.AddEdgeTo(bBody) 859 } 860 } 861 862 s.startBlock(bEnd) 863 864 case OSWITCH, OSELECT: 865 // These have been mostly rewritten by the front end into their Nbody fields. 866 // Our main task is to correctly hook up any break statements. 867 bEnd := s.f.NewBlock(ssa.BlockPlain) 868 869 prevBreak := s.breakTo 870 s.breakTo = bEnd 871 lab := s.labeledNodes[n] 872 if lab != nil { 873 // labeled 874 lab.breakTarget = bEnd 875 } 876 877 // generate body code 878 s.stmtList(n.Nbody) 879 880 s.breakTo = prevBreak 881 if lab != nil { 882 lab.breakTarget = nil 883 } 884 885 // walk adds explicit OBREAK nodes to the end of all reachable code paths. 886 // If we still have a current block here, then mark it unreachable. 887 if s.curBlock != nil { 888 m := s.mem() 889 b := s.endBlock() 890 b.Kind = ssa.BlockExit 891 b.SetControl(m) 892 } 893 s.startBlock(bEnd) 894 895 case OVARKILL: 896 // Insert a varkill op to record that a variable is no longer live. 897 // We only care about liveness info at call sites, so putting the 898 // varkill in the store chain is enough to keep it correctly ordered 899 // with respect to call ops. 900 if !s.canSSA(n.Left) { 901 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, n.Left, s.mem()) 902 } 903 904 case OVARLIVE: 905 // Insert a varlive op to record that a variable is still live. 906 if !n.Left.Addrtaken() { 907 s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left) 908 } 909 s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, ssa.TypeMem, n.Left, s.mem()) 910 911 case OCHECKNIL: 912 p := s.expr(n.Left) 913 s.nilCheck(p) 914 915 default: 916 s.Fatalf("unhandled stmt %v", n.Op) 917 } 918 } 919 920 // exit processes any code that needs to be generated just before returning. 921 // It returns a BlockRet block that ends the control flow. Its control value 922 // will be set to the final memory state. 923 func (s *state) exit() *ssa.Block { 924 if s.hasdefer { 925 s.rtcall(Deferreturn, true, nil) 926 } 927 928 // Run exit code. Typically, this code copies heap-allocated PPARAMOUT 929 // variables back to the stack. 930 s.stmtList(s.exitCode) 931 932 // Store SSAable PPARAMOUT variables back to stack locations. 933 for _, n := range s.returns { 934 addr := s.decladdrs[n] 935 val := s.variable(n, n.Type) 936 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, n, s.mem()) 937 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, n.Type, addr, val, s.mem()) 938 // TODO: if val is ever spilled, we'd like to use the 939 // PPARAMOUT slot for spilling it. That won't happen 940 // currently. 941 } 942 943 // Do actual return. 944 m := s.mem() 945 b := s.endBlock() 946 b.Kind = ssa.BlockRet 947 b.SetControl(m) 948 return b 949 } 950 951 type opAndType struct { 952 op Op 953 etype types.EType 954 } 955 956 var opToSSA = map[opAndType]ssa.Op{ 957 opAndType{OADD, TINT8}: ssa.OpAdd8, 958 opAndType{OADD, TUINT8}: ssa.OpAdd8, 959 opAndType{OADD, TINT16}: ssa.OpAdd16, 960 opAndType{OADD, TUINT16}: ssa.OpAdd16, 961 opAndType{OADD, TINT32}: ssa.OpAdd32, 962 opAndType{OADD, TUINT32}: ssa.OpAdd32, 963 opAndType{OADD, TPTR32}: ssa.OpAdd32, 964 opAndType{OADD, TINT64}: ssa.OpAdd64, 965 opAndType{OADD, TUINT64}: ssa.OpAdd64, 966 opAndType{OADD, TPTR64}: ssa.OpAdd64, 967 opAndType{OADD, TFLOAT32}: ssa.OpAdd32F, 968 opAndType{OADD, TFLOAT64}: ssa.OpAdd64F, 969 970 opAndType{OSUB, TINT8}: ssa.OpSub8, 971 opAndType{OSUB, TUINT8}: ssa.OpSub8, 972 opAndType{OSUB, TINT16}: ssa.OpSub16, 973 opAndType{OSUB, TUINT16}: ssa.OpSub16, 974 opAndType{OSUB, TINT32}: ssa.OpSub32, 975 opAndType{OSUB, TUINT32}: ssa.OpSub32, 976 opAndType{OSUB, TINT64}: ssa.OpSub64, 977 opAndType{OSUB, TUINT64}: ssa.OpSub64, 978 opAndType{OSUB, TFLOAT32}: ssa.OpSub32F, 979 opAndType{OSUB, TFLOAT64}: ssa.OpSub64F, 980 981 opAndType{ONOT, TBOOL}: ssa.OpNot, 982 983 opAndType{OMINUS, TINT8}: ssa.OpNeg8, 984 opAndType{OMINUS, TUINT8}: ssa.OpNeg8, 985 opAndType{OMINUS, TINT16}: ssa.OpNeg16, 986 opAndType{OMINUS, TUINT16}: ssa.OpNeg16, 987 opAndType{OMINUS, TINT32}: ssa.OpNeg32, 988 opAndType{OMINUS, TUINT32}: ssa.OpNeg32, 989 opAndType{OMINUS, TINT64}: ssa.OpNeg64, 990 opAndType{OMINUS, TUINT64}: ssa.OpNeg64, 991 opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F, 992 opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F, 993 994 opAndType{OCOM, TINT8}: ssa.OpCom8, 995 opAndType{OCOM, TUINT8}: ssa.OpCom8, 996 opAndType{OCOM, TINT16}: ssa.OpCom16, 997 opAndType{OCOM, TUINT16}: ssa.OpCom16, 998 opAndType{OCOM, TINT32}: ssa.OpCom32, 999 opAndType{OCOM, TUINT32}: ssa.OpCom32, 1000 opAndType{OCOM, TINT64}: ssa.OpCom64, 1001 opAndType{OCOM, TUINT64}: ssa.OpCom64, 1002 1003 opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag, 1004 opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag, 1005 opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal, 1006 opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal, 1007 1008 opAndType{OMUL, TINT8}: ssa.OpMul8, 1009 opAndType{OMUL, TUINT8}: ssa.OpMul8, 1010 opAndType{OMUL, TINT16}: ssa.OpMul16, 1011 opAndType{OMUL, TUINT16}: ssa.OpMul16, 1012 opAndType{OMUL, TINT32}: ssa.OpMul32, 1013 opAndType{OMUL, TUINT32}: ssa.OpMul32, 1014 opAndType{OMUL, TINT64}: ssa.OpMul64, 1015 opAndType{OMUL, TUINT64}: ssa.OpMul64, 1016 opAndType{OMUL, TFLOAT32}: ssa.OpMul32F, 1017 opAndType{OMUL, TFLOAT64}: ssa.OpMul64F, 1018 1019 opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F, 1020 opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F, 1021 1022 opAndType{ODIV, TINT8}: ssa.OpDiv8, 1023 opAndType{ODIV, TUINT8}: ssa.OpDiv8u, 1024 opAndType{ODIV, TINT16}: ssa.OpDiv16, 1025 opAndType{ODIV, TUINT16}: ssa.OpDiv16u, 1026 opAndType{ODIV, TINT32}: ssa.OpDiv32, 1027 opAndType{ODIV, TUINT32}: ssa.OpDiv32u, 1028 opAndType{ODIV, TINT64}: ssa.OpDiv64, 1029 opAndType{ODIV, TUINT64}: ssa.OpDiv64u, 1030 1031 opAndType{OMOD, TINT8}: ssa.OpMod8, 1032 opAndType{OMOD, TUINT8}: ssa.OpMod8u, 1033 opAndType{OMOD, TINT16}: ssa.OpMod16, 1034 opAndType{OMOD, TUINT16}: ssa.OpMod16u, 1035 opAndType{OMOD, TINT32}: ssa.OpMod32, 1036 opAndType{OMOD, TUINT32}: ssa.OpMod32u, 1037 opAndType{OMOD, TINT64}: ssa.OpMod64, 1038 opAndType{OMOD, TUINT64}: ssa.OpMod64u, 1039 1040 opAndType{OAND, TINT8}: ssa.OpAnd8, 1041 opAndType{OAND, TUINT8}: ssa.OpAnd8, 1042 opAndType{OAND, TINT16}: ssa.OpAnd16, 1043 opAndType{OAND, TUINT16}: ssa.OpAnd16, 1044 opAndType{OAND, TINT32}: ssa.OpAnd32, 1045 opAndType{OAND, TUINT32}: ssa.OpAnd32, 1046 opAndType{OAND, TINT64}: ssa.OpAnd64, 1047 opAndType{OAND, TUINT64}: ssa.OpAnd64, 1048 1049 opAndType{OOR, TINT8}: ssa.OpOr8, 1050 opAndType{OOR, TUINT8}: ssa.OpOr8, 1051 opAndType{OOR, TINT16}: ssa.OpOr16, 1052 opAndType{OOR, TUINT16}: ssa.OpOr16, 1053 opAndType{OOR, TINT32}: ssa.OpOr32, 1054 opAndType{OOR, TUINT32}: ssa.OpOr32, 1055 opAndType{OOR, TINT64}: ssa.OpOr64, 1056 opAndType{OOR, TUINT64}: ssa.OpOr64, 1057 1058 opAndType{OXOR, TINT8}: ssa.OpXor8, 1059 opAndType{OXOR, TUINT8}: ssa.OpXor8, 1060 opAndType{OXOR, TINT16}: ssa.OpXor16, 1061 opAndType{OXOR, TUINT16}: ssa.OpXor16, 1062 opAndType{OXOR, TINT32}: ssa.OpXor32, 1063 opAndType{OXOR, TUINT32}: ssa.OpXor32, 1064 opAndType{OXOR, TINT64}: ssa.OpXor64, 1065 opAndType{OXOR, TUINT64}: ssa.OpXor64, 1066 1067 opAndType{OEQ, TBOOL}: ssa.OpEqB, 1068 opAndType{OEQ, TINT8}: ssa.OpEq8, 1069 opAndType{OEQ, TUINT8}: ssa.OpEq8, 1070 opAndType{OEQ, TINT16}: ssa.OpEq16, 1071 opAndType{OEQ, TUINT16}: ssa.OpEq16, 1072 opAndType{OEQ, TINT32}: ssa.OpEq32, 1073 opAndType{OEQ, TUINT32}: ssa.OpEq32, 1074 opAndType{OEQ, TINT64}: ssa.OpEq64, 1075 opAndType{OEQ, TUINT64}: ssa.OpEq64, 1076 opAndType{OEQ, TINTER}: ssa.OpEqInter, 1077 opAndType{OEQ, TSLICE}: ssa.OpEqSlice, 1078 opAndType{OEQ, TFUNC}: ssa.OpEqPtr, 1079 opAndType{OEQ, TMAP}: ssa.OpEqPtr, 1080 opAndType{OEQ, TCHAN}: ssa.OpEqPtr, 1081 opAndType{OEQ, TPTR32}: ssa.OpEqPtr, 1082 opAndType{OEQ, TPTR64}: ssa.OpEqPtr, 1083 opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr, 1084 opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr, 1085 opAndType{OEQ, TFLOAT64}: ssa.OpEq64F, 1086 opAndType{OEQ, TFLOAT32}: ssa.OpEq32F, 1087 1088 opAndType{ONE, TBOOL}: ssa.OpNeqB, 1089 opAndType{ONE, TINT8}: ssa.OpNeq8, 1090 opAndType{ONE, TUINT8}: ssa.OpNeq8, 1091 opAndType{ONE, TINT16}: ssa.OpNeq16, 1092 opAndType{ONE, TUINT16}: ssa.OpNeq16, 1093 opAndType{ONE, TINT32}: ssa.OpNeq32, 1094 opAndType{ONE, TUINT32}: ssa.OpNeq32, 1095 opAndType{ONE, TINT64}: ssa.OpNeq64, 1096 opAndType{ONE, TUINT64}: ssa.OpNeq64, 1097 opAndType{ONE, TINTER}: ssa.OpNeqInter, 1098 opAndType{ONE, TSLICE}: ssa.OpNeqSlice, 1099 opAndType{ONE, TFUNC}: ssa.OpNeqPtr, 1100 opAndType{ONE, TMAP}: ssa.OpNeqPtr, 1101 opAndType{ONE, TCHAN}: ssa.OpNeqPtr, 1102 opAndType{ONE, TPTR32}: ssa.OpNeqPtr, 1103 opAndType{ONE, TPTR64}: ssa.OpNeqPtr, 1104 opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr, 1105 opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr, 1106 opAndType{ONE, TFLOAT64}: ssa.OpNeq64F, 1107 opAndType{ONE, TFLOAT32}: ssa.OpNeq32F, 1108 1109 opAndType{OLT, TINT8}: ssa.OpLess8, 1110 opAndType{OLT, TUINT8}: ssa.OpLess8U, 1111 opAndType{OLT, TINT16}: ssa.OpLess16, 1112 opAndType{OLT, TUINT16}: ssa.OpLess16U, 1113 opAndType{OLT, TINT32}: ssa.OpLess32, 1114 opAndType{OLT, TUINT32}: ssa.OpLess32U, 1115 opAndType{OLT, TINT64}: ssa.OpLess64, 1116 opAndType{OLT, TUINT64}: ssa.OpLess64U, 1117 opAndType{OLT, TFLOAT64}: ssa.OpLess64F, 1118 opAndType{OLT, TFLOAT32}: ssa.OpLess32F, 1119 1120 opAndType{OGT, TINT8}: ssa.OpGreater8, 1121 opAndType{OGT, TUINT8}: ssa.OpGreater8U, 1122 opAndType{OGT, TINT16}: ssa.OpGreater16, 1123 opAndType{OGT, TUINT16}: ssa.OpGreater16U, 1124 opAndType{OGT, TINT32}: ssa.OpGreater32, 1125 opAndType{OGT, TUINT32}: ssa.OpGreater32U, 1126 opAndType{OGT, TINT64}: ssa.OpGreater64, 1127 opAndType{OGT, TUINT64}: ssa.OpGreater64U, 1128 opAndType{OGT, TFLOAT64}: ssa.OpGreater64F, 1129 opAndType{OGT, TFLOAT32}: ssa.OpGreater32F, 1130 1131 opAndType{OLE, TINT8}: ssa.OpLeq8, 1132 opAndType{OLE, TUINT8}: ssa.OpLeq8U, 1133 opAndType{OLE, TINT16}: ssa.OpLeq16, 1134 opAndType{OLE, TUINT16}: ssa.OpLeq16U, 1135 opAndType{OLE, TINT32}: ssa.OpLeq32, 1136 opAndType{OLE, TUINT32}: ssa.OpLeq32U, 1137 opAndType{OLE, TINT64}: ssa.OpLeq64, 1138 opAndType{OLE, TUINT64}: ssa.OpLeq64U, 1139 opAndType{OLE, TFLOAT64}: ssa.OpLeq64F, 1140 opAndType{OLE, TFLOAT32}: ssa.OpLeq32F, 1141 1142 opAndType{OGE, TINT8}: ssa.OpGeq8, 1143 opAndType{OGE, TUINT8}: ssa.OpGeq8U, 1144 opAndType{OGE, TINT16}: ssa.OpGeq16, 1145 opAndType{OGE, TUINT16}: ssa.OpGeq16U, 1146 opAndType{OGE, TINT32}: ssa.OpGeq32, 1147 opAndType{OGE, TUINT32}: ssa.OpGeq32U, 1148 opAndType{OGE, TINT64}: ssa.OpGeq64, 1149 opAndType{OGE, TUINT64}: ssa.OpGeq64U, 1150 opAndType{OGE, TFLOAT64}: ssa.OpGeq64F, 1151 opAndType{OGE, TFLOAT32}: ssa.OpGeq32F, 1152 } 1153 1154 func (s *state) concreteEtype(t *types.Type) types.EType { 1155 e := t.Etype 1156 switch e { 1157 default: 1158 return e 1159 case TINT: 1160 if s.config.IntSize == 8 { 1161 return TINT64 1162 } 1163 return TINT32 1164 case TUINT: 1165 if s.config.IntSize == 8 { 1166 return TUINT64 1167 } 1168 return TUINT32 1169 case TUINTPTR: 1170 if s.config.PtrSize == 8 { 1171 return TUINT64 1172 } 1173 return TUINT32 1174 } 1175 } 1176 1177 func (s *state) ssaOp(op Op, t *types.Type) ssa.Op { 1178 etype := s.concreteEtype(t) 1179 x, ok := opToSSA[opAndType{op, etype}] 1180 if !ok { 1181 s.Fatalf("unhandled binary op %v %s", op, etype) 1182 } 1183 return x 1184 } 1185 1186 func floatForComplex(t *types.Type) *types.Type { 1187 if t.Size() == 8 { 1188 return types.Types[TFLOAT32] 1189 } else { 1190 return types.Types[TFLOAT64] 1191 } 1192 } 1193 1194 type opAndTwoTypes struct { 1195 op Op 1196 etype1 types.EType 1197 etype2 types.EType 1198 } 1199 1200 type twoTypes struct { 1201 etype1 types.EType 1202 etype2 types.EType 1203 } 1204 1205 type twoOpsAndType struct { 1206 op1 ssa.Op 1207 op2 ssa.Op 1208 intermediateType types.EType 1209 } 1210 1211 var fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1212 1213 twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32}, 1214 twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32}, 1215 twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32}, 1216 twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64}, 1217 1218 twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32}, 1219 twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32}, 1220 twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32}, 1221 twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64}, 1222 1223 twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1224 twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1225 twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32}, 1226 twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64}, 1227 1228 twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1229 twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1230 twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32}, 1231 twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64}, 1232 // unsigned 1233 twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32}, 1234 twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32}, 1235 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned 1236 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead 1237 1238 twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32}, 1239 twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32}, 1240 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned 1241 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead 1242 1243 twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1244 twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1245 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1246 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead 1247 1248 twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1249 twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1250 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1251 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead 1252 1253 // float 1254 twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32}, 1255 twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, TFLOAT64}, 1256 twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, TFLOAT32}, 1257 twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64}, 1258 } 1259 1260 // this map is used only for 32-bit arch, and only includes the difference 1261 // on 32-bit arch, don't use int64<->float conversion for uint32 1262 var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{ 1263 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32}, 1264 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32}, 1265 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32}, 1266 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32}, 1267 } 1268 1269 // uint64<->float conversions, only on machines that have intructions for that 1270 var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1271 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64}, 1272 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64}, 1273 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64}, 1274 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64}, 1275 } 1276 1277 var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{ 1278 opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8, 1279 opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8, 1280 opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16, 1281 opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16, 1282 opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32, 1283 opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32, 1284 opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64, 1285 opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64, 1286 1287 opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8, 1288 opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8, 1289 opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16, 1290 opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16, 1291 opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32, 1292 opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32, 1293 opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64, 1294 opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64, 1295 1296 opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8, 1297 opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8, 1298 opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16, 1299 opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16, 1300 opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32, 1301 opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32, 1302 opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64, 1303 opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64, 1304 1305 opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8, 1306 opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8, 1307 opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16, 1308 opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16, 1309 opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32, 1310 opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32, 1311 opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64, 1312 opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64, 1313 1314 opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8, 1315 opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8, 1316 opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16, 1317 opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16, 1318 opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32, 1319 opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32, 1320 opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64, 1321 opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64, 1322 1323 opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8, 1324 opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8, 1325 opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16, 1326 opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16, 1327 opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32, 1328 opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32, 1329 opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64, 1330 opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64, 1331 1332 opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8, 1333 opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8, 1334 opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16, 1335 opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16, 1336 opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32, 1337 opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32, 1338 opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64, 1339 opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64, 1340 1341 opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8, 1342 opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8, 1343 opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16, 1344 opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16, 1345 opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32, 1346 opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32, 1347 opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64, 1348 opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64, 1349 } 1350 1351 func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op { 1352 etype1 := s.concreteEtype(t) 1353 etype2 := s.concreteEtype(u) 1354 x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}] 1355 if !ok { 1356 s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2) 1357 } 1358 return x 1359 } 1360 1361 // expr converts the expression n to ssa, adds it to s and returns the ssa result. 1362 func (s *state) expr(n *Node) *ssa.Value { 1363 if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) { 1364 // ONAMEs and named OLITERALs have the line number 1365 // of the decl, not the use. See issue 14742. 1366 s.pushLine(n.Pos) 1367 defer s.popLine() 1368 } 1369 1370 s.stmtList(n.Ninit) 1371 switch n.Op { 1372 case OARRAYBYTESTRTMP: 1373 slice := s.expr(n.Left) 1374 ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice) 1375 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) 1376 return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) 1377 case OSTRARRAYBYTETMP: 1378 str := s.expr(n.Left) 1379 ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str) 1380 len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str) 1381 return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len) 1382 case OCFUNC: 1383 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: Linksym(n.Left.Sym)}) 1384 return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb) 1385 case ONAME: 1386 if n.Class == PFUNC { 1387 // "value" of a function is the address of the function's closure 1388 sym := Linksym(funcsym(n.Sym)) 1389 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: sym}) 1390 return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), aux, s.sb) 1391 } 1392 if s.canSSA(n) { 1393 return s.variable(n, n.Type) 1394 } 1395 addr := s.addr(n, false) 1396 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1397 case OCLOSUREVAR: 1398 addr := s.addr(n, false) 1399 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1400 case OLITERAL: 1401 switch u := n.Val().U.(type) { 1402 case *Mpint: 1403 i := u.Int64() 1404 switch n.Type.Size() { 1405 case 1: 1406 return s.constInt8(n.Type, int8(i)) 1407 case 2: 1408 return s.constInt16(n.Type, int16(i)) 1409 case 4: 1410 return s.constInt32(n.Type, int32(i)) 1411 case 8: 1412 return s.constInt64(n.Type, i) 1413 default: 1414 s.Fatalf("bad integer size %d", n.Type.Size()) 1415 return nil 1416 } 1417 case string: 1418 if u == "" { 1419 return s.constEmptyString(n.Type) 1420 } 1421 return s.entryNewValue0A(ssa.OpConstString, n.Type, u) 1422 case bool: 1423 return s.constBool(u) 1424 case *NilVal: 1425 t := n.Type 1426 switch { 1427 case t.IsSlice(): 1428 return s.constSlice(t) 1429 case t.IsInterface(): 1430 return s.constInterface(t) 1431 default: 1432 return s.constNil(t) 1433 } 1434 case *Mpflt: 1435 switch n.Type.Size() { 1436 case 4: 1437 return s.constFloat32(n.Type, u.Float32()) 1438 case 8: 1439 return s.constFloat64(n.Type, u.Float64()) 1440 default: 1441 s.Fatalf("bad float size %d", n.Type.Size()) 1442 return nil 1443 } 1444 case *Mpcplx: 1445 r := &u.Real 1446 i := &u.Imag 1447 switch n.Type.Size() { 1448 case 8: 1449 pt := types.Types[TFLOAT32] 1450 return s.newValue2(ssa.OpComplexMake, n.Type, 1451 s.constFloat32(pt, r.Float32()), 1452 s.constFloat32(pt, i.Float32())) 1453 case 16: 1454 pt := types.Types[TFLOAT64] 1455 return s.newValue2(ssa.OpComplexMake, n.Type, 1456 s.constFloat64(pt, r.Float64()), 1457 s.constFloat64(pt, i.Float64())) 1458 default: 1459 s.Fatalf("bad float size %d", n.Type.Size()) 1460 return nil 1461 } 1462 1463 default: 1464 s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype()) 1465 return nil 1466 } 1467 case OCONVNOP: 1468 to := n.Type 1469 from := n.Left.Type 1470 1471 // Assume everything will work out, so set up our return value. 1472 // Anything interesting that happens from here is a fatal. 1473 x := s.expr(n.Left) 1474 1475 // Special case for not confusing GC and liveness. 1476 // We don't want pointers accidentally classified 1477 // as not-pointers or vice-versa because of copy 1478 // elision. 1479 if to.IsPtrShaped() != from.IsPtrShaped() { 1480 return s.newValue2(ssa.OpConvert, to, x, s.mem()) 1481 } 1482 1483 v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type 1484 1485 // CONVNOP closure 1486 if to.Etype == TFUNC && from.IsPtrShaped() { 1487 return v 1488 } 1489 1490 // named <--> unnamed type or typed <--> untyped const 1491 if from.Etype == to.Etype { 1492 return v 1493 } 1494 1495 // unsafe.Pointer <--> *T 1496 if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() { 1497 return v 1498 } 1499 1500 dowidth(from) 1501 dowidth(to) 1502 if from.Width != to.Width { 1503 s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width) 1504 return nil 1505 } 1506 if etypesign(from.Etype) != etypesign(to.Etype) { 1507 s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype) 1508 return nil 1509 } 1510 1511 if instrumenting { 1512 // These appear to be fine, but they fail the 1513 // integer constraint below, so okay them here. 1514 // Sample non-integer conversion: map[string]string -> *uint8 1515 return v 1516 } 1517 1518 if etypesign(from.Etype) == 0 { 1519 s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to) 1520 return nil 1521 } 1522 1523 // integer, same width, same sign 1524 return v 1525 1526 case OCONV: 1527 x := s.expr(n.Left) 1528 ft := n.Left.Type // from type 1529 tt := n.Type // to type 1530 if ft.IsBoolean() && tt.IsKind(TUINT8) { 1531 // Bool -> uint8 is generated internally when indexing into runtime.staticbyte. 1532 return s.newValue1(ssa.OpCopy, n.Type, x) 1533 } 1534 if ft.IsInteger() && tt.IsInteger() { 1535 var op ssa.Op 1536 if tt.Size() == ft.Size() { 1537 op = ssa.OpCopy 1538 } else if tt.Size() < ft.Size() { 1539 // truncation 1540 switch 10*ft.Size() + tt.Size() { 1541 case 21: 1542 op = ssa.OpTrunc16to8 1543 case 41: 1544 op = ssa.OpTrunc32to8 1545 case 42: 1546 op = ssa.OpTrunc32to16 1547 case 81: 1548 op = ssa.OpTrunc64to8 1549 case 82: 1550 op = ssa.OpTrunc64to16 1551 case 84: 1552 op = ssa.OpTrunc64to32 1553 default: 1554 s.Fatalf("weird integer truncation %v -> %v", ft, tt) 1555 } 1556 } else if ft.IsSigned() { 1557 // sign extension 1558 switch 10*ft.Size() + tt.Size() { 1559 case 12: 1560 op = ssa.OpSignExt8to16 1561 case 14: 1562 op = ssa.OpSignExt8to32 1563 case 18: 1564 op = ssa.OpSignExt8to64 1565 case 24: 1566 op = ssa.OpSignExt16to32 1567 case 28: 1568 op = ssa.OpSignExt16to64 1569 case 48: 1570 op = ssa.OpSignExt32to64 1571 default: 1572 s.Fatalf("bad integer sign extension %v -> %v", ft, tt) 1573 } 1574 } else { 1575 // zero extension 1576 switch 10*ft.Size() + tt.Size() { 1577 case 12: 1578 op = ssa.OpZeroExt8to16 1579 case 14: 1580 op = ssa.OpZeroExt8to32 1581 case 18: 1582 op = ssa.OpZeroExt8to64 1583 case 24: 1584 op = ssa.OpZeroExt16to32 1585 case 28: 1586 op = ssa.OpZeroExt16to64 1587 case 48: 1588 op = ssa.OpZeroExt32to64 1589 default: 1590 s.Fatalf("weird integer sign extension %v -> %v", ft, tt) 1591 } 1592 } 1593 return s.newValue1(op, n.Type, x) 1594 } 1595 1596 if ft.IsFloat() || tt.IsFloat() { 1597 conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}] 1598 if s.config.IntSize == 4 && thearch.LinkArch.Name != "amd64p32" && thearch.LinkArch.Family != sys.MIPS { 1599 if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { 1600 conv = conv1 1601 } 1602 } 1603 if thearch.LinkArch.Name == "arm64" { 1604 if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { 1605 conv = conv1 1606 } 1607 } 1608 1609 if thearch.LinkArch.Family == sys.MIPS { 1610 if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() { 1611 // tt is float32 or float64, and ft is also unsigned 1612 if tt.Size() == 4 { 1613 return s.uint32Tofloat32(n, x, ft, tt) 1614 } 1615 if tt.Size() == 8 { 1616 return s.uint32Tofloat64(n, x, ft, tt) 1617 } 1618 } else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() { 1619 // ft is float32 or float64, and tt is unsigned integer 1620 if ft.Size() == 4 { 1621 return s.float32ToUint32(n, x, ft, tt) 1622 } 1623 if ft.Size() == 8 { 1624 return s.float64ToUint32(n, x, ft, tt) 1625 } 1626 } 1627 } 1628 1629 if !ok { 1630 s.Fatalf("weird float conversion %v -> %v", ft, tt) 1631 } 1632 op1, op2, it := conv.op1, conv.op2, conv.intermediateType 1633 1634 if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid { 1635 // normal case, not tripping over unsigned 64 1636 if op1 == ssa.OpCopy { 1637 if op2 == ssa.OpCopy { 1638 return x 1639 } 1640 return s.newValue1(op2, n.Type, x) 1641 } 1642 if op2 == ssa.OpCopy { 1643 return s.newValue1(op1, n.Type, x) 1644 } 1645 return s.newValue1(op2, n.Type, s.newValue1(op1, types.Types[it], x)) 1646 } 1647 // Tricky 64-bit unsigned cases. 1648 if ft.IsInteger() { 1649 // tt is float32 or float64, and ft is also unsigned 1650 if tt.Size() == 4 { 1651 return s.uint64Tofloat32(n, x, ft, tt) 1652 } 1653 if tt.Size() == 8 { 1654 return s.uint64Tofloat64(n, x, ft, tt) 1655 } 1656 s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt) 1657 } 1658 // ft is float32 or float64, and tt is unsigned integer 1659 if ft.Size() == 4 { 1660 return s.float32ToUint64(n, x, ft, tt) 1661 } 1662 if ft.Size() == 8 { 1663 return s.float64ToUint64(n, x, ft, tt) 1664 } 1665 s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt) 1666 return nil 1667 } 1668 1669 if ft.IsComplex() && tt.IsComplex() { 1670 var op ssa.Op 1671 if ft.Size() == tt.Size() { 1672 switch ft.Size() { 1673 case 8: 1674 op = ssa.OpRound32F 1675 case 16: 1676 op = ssa.OpRound64F 1677 default: 1678 s.Fatalf("weird complex conversion %v -> %v", ft, tt) 1679 } 1680 } else if ft.Size() == 8 && tt.Size() == 16 { 1681 op = ssa.OpCvt32Fto64F 1682 } else if ft.Size() == 16 && tt.Size() == 8 { 1683 op = ssa.OpCvt64Fto32F 1684 } else { 1685 s.Fatalf("weird complex conversion %v -> %v", ft, tt) 1686 } 1687 ftp := floatForComplex(ft) 1688 ttp := floatForComplex(tt) 1689 return s.newValue2(ssa.OpComplexMake, tt, 1690 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)), 1691 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x))) 1692 } 1693 1694 s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype) 1695 return nil 1696 1697 case ODOTTYPE: 1698 res, _ := s.dottype(n, false) 1699 return res 1700 1701 // binary ops 1702 case OLT, OEQ, ONE, OLE, OGE, OGT: 1703 a := s.expr(n.Left) 1704 b := s.expr(n.Right) 1705 if n.Left.Type.IsComplex() { 1706 pt := floatForComplex(n.Left.Type) 1707 op := s.ssaOp(OEQ, pt) 1708 r := s.newValue2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)) 1709 i := s.newValue2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)) 1710 c := s.newValue2(ssa.OpAndB, types.Types[TBOOL], r, i) 1711 switch n.Op { 1712 case OEQ: 1713 return c 1714 case ONE: 1715 return s.newValue1(ssa.OpNot, types.Types[TBOOL], c) 1716 default: 1717 s.Fatalf("ordered complex compare %v", n.Op) 1718 } 1719 } 1720 return s.newValue2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b) 1721 case OMUL: 1722 a := s.expr(n.Left) 1723 b := s.expr(n.Right) 1724 if n.Type.IsComplex() { 1725 mulop := ssa.OpMul64F 1726 addop := ssa.OpAdd64F 1727 subop := ssa.OpSub64F 1728 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1729 wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 1730 1731 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1732 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1733 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1734 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1735 1736 if pt != wt { // Widen for calculation 1737 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal) 1738 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal) 1739 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag) 1740 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag) 1741 } 1742 1743 xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag)) 1744 ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal)) 1745 1746 if pt != wt { // Narrow to store back 1747 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal) 1748 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag) 1749 } 1750 1751 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1752 } 1753 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1754 1755 case ODIV: 1756 a := s.expr(n.Left) 1757 b := s.expr(n.Right) 1758 if n.Type.IsComplex() { 1759 // TODO this is not executed because the front-end substitutes a runtime call. 1760 // That probably ought to change; with modest optimization the widen/narrow 1761 // conversions could all be elided in larger expression trees. 1762 mulop := ssa.OpMul64F 1763 addop := ssa.OpAdd64F 1764 subop := ssa.OpSub64F 1765 divop := ssa.OpDiv64F 1766 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1767 wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 1768 1769 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1770 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1771 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1772 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1773 1774 if pt != wt { // Widen for calculation 1775 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal) 1776 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal) 1777 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag) 1778 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag) 1779 } 1780 1781 denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag)) 1782 xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag)) 1783 ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag)) 1784 1785 // TODO not sure if this is best done in wide precision or narrow 1786 // Double-rounding might be an issue. 1787 // Note that the pre-SSA implementation does the entire calculation 1788 // in wide format, so wide is compatible. 1789 xreal = s.newValue2(divop, wt, xreal, denom) 1790 ximag = s.newValue2(divop, wt, ximag, denom) 1791 1792 if pt != wt { // Narrow to store back 1793 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal) 1794 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag) 1795 } 1796 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1797 } 1798 if n.Type.IsFloat() { 1799 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1800 } 1801 return s.intDivide(n, a, b) 1802 case OMOD: 1803 a := s.expr(n.Left) 1804 b := s.expr(n.Right) 1805 return s.intDivide(n, a, b) 1806 case OADD, OSUB: 1807 a := s.expr(n.Left) 1808 b := s.expr(n.Right) 1809 if n.Type.IsComplex() { 1810 pt := floatForComplex(n.Type) 1811 op := s.ssaOp(n.Op, pt) 1812 return s.newValue2(ssa.OpComplexMake, n.Type, 1813 s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)), 1814 s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))) 1815 } 1816 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1817 case OAND, OOR, OXOR: 1818 a := s.expr(n.Left) 1819 b := s.expr(n.Right) 1820 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1821 case OLSH, ORSH: 1822 a := s.expr(n.Left) 1823 b := s.expr(n.Right) 1824 return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b) 1825 case OANDAND, OOROR: 1826 // To implement OANDAND (and OOROR), we introduce a 1827 // new temporary variable to hold the result. The 1828 // variable is associated with the OANDAND node in the 1829 // s.vars table (normally variables are only 1830 // associated with ONAME nodes). We convert 1831 // A && B 1832 // to 1833 // var = A 1834 // if var { 1835 // var = B 1836 // } 1837 // Using var in the subsequent block introduces the 1838 // necessary phi variable. 1839 el := s.expr(n.Left) 1840 s.vars[n] = el 1841 1842 b := s.endBlock() 1843 b.Kind = ssa.BlockIf 1844 b.SetControl(el) 1845 // In theory, we should set b.Likely here based on context. 1846 // However, gc only gives us likeliness hints 1847 // in a single place, for plain OIF statements, 1848 // and passing around context is finnicky, so don't bother for now. 1849 1850 bRight := s.f.NewBlock(ssa.BlockPlain) 1851 bResult := s.f.NewBlock(ssa.BlockPlain) 1852 if n.Op == OANDAND { 1853 b.AddEdgeTo(bRight) 1854 b.AddEdgeTo(bResult) 1855 } else if n.Op == OOROR { 1856 b.AddEdgeTo(bResult) 1857 b.AddEdgeTo(bRight) 1858 } 1859 1860 s.startBlock(bRight) 1861 er := s.expr(n.Right) 1862 s.vars[n] = er 1863 1864 b = s.endBlock() 1865 b.AddEdgeTo(bResult) 1866 1867 s.startBlock(bResult) 1868 return s.variable(n, types.Types[TBOOL]) 1869 case OCOMPLEX: 1870 r := s.expr(n.Left) 1871 i := s.expr(n.Right) 1872 return s.newValue2(ssa.OpComplexMake, n.Type, r, i) 1873 1874 // unary ops 1875 case OMINUS: 1876 a := s.expr(n.Left) 1877 if n.Type.IsComplex() { 1878 tp := floatForComplex(n.Type) 1879 negop := s.ssaOp(n.Op, tp) 1880 return s.newValue2(ssa.OpComplexMake, n.Type, 1881 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)), 1882 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a))) 1883 } 1884 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 1885 case ONOT, OCOM: 1886 a := s.expr(n.Left) 1887 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 1888 case OIMAG, OREAL: 1889 a := s.expr(n.Left) 1890 return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a) 1891 case OPLUS: 1892 return s.expr(n.Left) 1893 1894 case OADDR: 1895 return s.addr(n.Left, n.Bounded()) 1896 1897 case OINDREGSP: 1898 addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset) 1899 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1900 1901 case OIND: 1902 p := s.exprPtr(n.Left, false, n.Pos) 1903 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1904 1905 case ODOT: 1906 t := n.Left.Type 1907 if canSSAType(t) { 1908 v := s.expr(n.Left) 1909 return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v) 1910 } 1911 if n.Left.Op == OSTRUCTLIT { 1912 // All literals with nonzero fields have already been 1913 // rewritten during walk. Any that remain are just T{} 1914 // or equivalents. Use the zero value. 1915 if !iszero(n.Left) { 1916 Fatalf("literal with nonzero value in SSA: %v", n.Left) 1917 } 1918 return s.zeroVal(n.Type) 1919 } 1920 p := s.addr(n, false) 1921 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1922 1923 case ODOTPTR: 1924 p := s.exprPtr(n.Left, false, n.Pos) 1925 p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type), n.Xoffset, p) 1926 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1927 1928 case OINDEX: 1929 switch { 1930 case n.Left.Type.IsString(): 1931 if n.Bounded() && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) { 1932 // Replace "abc"[1] with 'b'. 1933 // Delayed until now because "abc"[1] is not an ideal constant. 1934 // See test/fixedbugs/issue11370.go. 1935 return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(n.Left.Val().U.(string)[n.Right.Int64()]))) 1936 } 1937 a := s.expr(n.Left) 1938 i := s.expr(n.Right) 1939 i = s.extendIndex(i, panicindex) 1940 if !n.Bounded() { 1941 len := s.newValue1(ssa.OpStringLen, types.Types[TINT], a) 1942 s.boundsCheck(i, len) 1943 } 1944 ptrtyp := s.f.Config.Types.BytePtr 1945 ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a) 1946 if Isconst(n.Right, CTINT) { 1947 ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr) 1948 } else { 1949 ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i) 1950 } 1951 return s.newValue2(ssa.OpLoad, types.Types[TUINT8], ptr, s.mem()) 1952 case n.Left.Type.IsSlice(): 1953 p := s.addr(n, false) 1954 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 1955 case n.Left.Type.IsArray(): 1956 if bound := n.Left.Type.NumElem(); bound <= 1 { 1957 // SSA can handle arrays of length at most 1. 1958 a := s.expr(n.Left) 1959 i := s.expr(n.Right) 1960 if bound == 0 { 1961 // Bounds check will never succeed. Might as well 1962 // use constants for the bounds check. 1963 z := s.constInt(types.Types[TINT], 0) 1964 s.boundsCheck(z, z) 1965 // The return value won't be live, return junk. 1966 return s.newValue0(ssa.OpUnknown, n.Type) 1967 } 1968 i = s.extendIndex(i, panicindex) 1969 if !n.Bounded() { 1970 s.boundsCheck(i, s.constInt(types.Types[TINT], bound)) 1971 } 1972 return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a) 1973 } 1974 p := s.addr(n, false) 1975 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 1976 default: 1977 s.Fatalf("bad type for index %v", n.Left.Type) 1978 return nil 1979 } 1980 1981 case OLEN, OCAP: 1982 switch { 1983 case n.Left.Type.IsSlice(): 1984 op := ssa.OpSliceLen 1985 if n.Op == OCAP { 1986 op = ssa.OpSliceCap 1987 } 1988 return s.newValue1(op, types.Types[TINT], s.expr(n.Left)) 1989 case n.Left.Type.IsString(): // string; not reachable for OCAP 1990 return s.newValue1(ssa.OpStringLen, types.Types[TINT], s.expr(n.Left)) 1991 case n.Left.Type.IsMap(), n.Left.Type.IsChan(): 1992 return s.referenceTypeBuiltin(n, s.expr(n.Left)) 1993 default: // array 1994 return s.constInt(types.Types[TINT], n.Left.Type.NumElem()) 1995 } 1996 1997 case OSPTR: 1998 a := s.expr(n.Left) 1999 if n.Left.Type.IsSlice() { 2000 return s.newValue1(ssa.OpSlicePtr, n.Type, a) 2001 } else { 2002 return s.newValue1(ssa.OpStringPtr, n.Type, a) 2003 } 2004 2005 case OITAB: 2006 a := s.expr(n.Left) 2007 return s.newValue1(ssa.OpITab, n.Type, a) 2008 2009 case OIDATA: 2010 a := s.expr(n.Left) 2011 return s.newValue1(ssa.OpIData, n.Type, a) 2012 2013 case OEFACE: 2014 tab := s.expr(n.Left) 2015 data := s.expr(n.Right) 2016 return s.newValue2(ssa.OpIMake, n.Type, tab, data) 2017 2018 case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR: 2019 v := s.expr(n.Left) 2020 var i, j, k *ssa.Value 2021 low, high, max := n.SliceBounds() 2022 if low != nil { 2023 i = s.extendIndex(s.expr(low), panicslice) 2024 } 2025 if high != nil { 2026 j = s.extendIndex(s.expr(high), panicslice) 2027 } 2028 if max != nil { 2029 k = s.extendIndex(s.expr(max), panicslice) 2030 } 2031 p, l, c := s.slice(n.Left.Type, v, i, j, k) 2032 return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c) 2033 2034 case OSLICESTR: 2035 v := s.expr(n.Left) 2036 var i, j *ssa.Value 2037 low, high, _ := n.SliceBounds() 2038 if low != nil { 2039 i = s.extendIndex(s.expr(low), panicslice) 2040 } 2041 if high != nil { 2042 j = s.extendIndex(s.expr(high), panicslice) 2043 } 2044 p, l, _ := s.slice(n.Left.Type, v, i, j, nil) 2045 return s.newValue2(ssa.OpStringMake, n.Type, p, l) 2046 2047 case OCALLFUNC: 2048 if isIntrinsicCall(n) { 2049 return s.intrinsicCall(n) 2050 } 2051 fallthrough 2052 2053 case OCALLINTER, OCALLMETH: 2054 a := s.call(n, callNormal) 2055 return s.newValue2(ssa.OpLoad, n.Type, a, s.mem()) 2056 2057 case OGETG: 2058 return s.newValue1(ssa.OpGetG, n.Type, s.mem()) 2059 2060 case OAPPEND: 2061 return s.append(n, false) 2062 2063 case OSTRUCTLIT, OARRAYLIT: 2064 // All literals with nonzero fields have already been 2065 // rewritten during walk. Any that remain are just T{} 2066 // or equivalents. Use the zero value. 2067 if !iszero(n) { 2068 Fatalf("literal with nonzero value in SSA: %v", n) 2069 } 2070 return s.zeroVal(n.Type) 2071 2072 default: 2073 s.Fatalf("unhandled expr %v", n.Op) 2074 return nil 2075 } 2076 } 2077 2078 // append converts an OAPPEND node to SSA. 2079 // If inplace is false, it converts the OAPPEND expression n to an ssa.Value, 2080 // adds it to s, and returns the Value. 2081 // If inplace is true, it writes the result of the OAPPEND expression n 2082 // back to the slice being appended to, and returns nil. 2083 // inplace MUST be set to false if the slice can be SSA'd. 2084 func (s *state) append(n *Node, inplace bool) *ssa.Value { 2085 // If inplace is false, process as expression "append(s, e1, e2, e3)": 2086 // 2087 // ptr, len, cap := s 2088 // newlen := len + 3 2089 // if newlen > cap { 2090 // ptr, len, cap = growslice(s, newlen) 2091 // newlen = len + 3 // recalculate to avoid a spill 2092 // } 2093 // // with write barriers, if needed: 2094 // *(ptr+len) = e1 2095 // *(ptr+len+1) = e2 2096 // *(ptr+len+2) = e3 2097 // return makeslice(ptr, newlen, cap) 2098 // 2099 // 2100 // If inplace is true, process as statement "s = append(s, e1, e2, e3)": 2101 // 2102 // a := &s 2103 // ptr, len, cap := s 2104 // newlen := len + 3 2105 // if newlen > cap { 2106 // newptr, len, newcap = growslice(ptr, len, cap, newlen) 2107 // vardef(a) // if necessary, advise liveness we are writing a new a 2108 // *a.cap = newcap // write before ptr to avoid a spill 2109 // *a.ptr = newptr // with write barrier 2110 // } 2111 // newlen = len + 3 // recalculate to avoid a spill 2112 // *a.len = newlen 2113 // // with write barriers, if needed: 2114 // *(ptr+len) = e1 2115 // *(ptr+len+1) = e2 2116 // *(ptr+len+2) = e3 2117 2118 et := n.Type.Elem() 2119 pt := types.NewPtr(et) 2120 2121 // Evaluate slice 2122 sn := n.List.First() // the slice node is the first in the list 2123 2124 var slice, addr *ssa.Value 2125 if inplace { 2126 addr = s.addr(sn, false) 2127 slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 2128 } else { 2129 slice = s.expr(sn) 2130 } 2131 2132 // Allocate new blocks 2133 grow := s.f.NewBlock(ssa.BlockPlain) 2134 assign := s.f.NewBlock(ssa.BlockPlain) 2135 2136 // Decide if we need to grow 2137 nargs := int64(n.List.Len() - 1) 2138 p := s.newValue1(ssa.OpSlicePtr, pt, slice) 2139 l := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) 2140 c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice) 2141 nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs)) 2142 2143 cmp := s.newValue2(s.ssaOp(OGT, types.Types[TINT]), types.Types[TBOOL], nl, c) 2144 s.vars[&ptrVar] = p 2145 2146 if !inplace { 2147 s.vars[&newlenVar] = nl 2148 s.vars[&capVar] = c 2149 } else { 2150 s.vars[&lenVar] = l 2151 } 2152 2153 b := s.endBlock() 2154 b.Kind = ssa.BlockIf 2155 b.Likely = ssa.BranchUnlikely 2156 b.SetControl(cmp) 2157 b.AddEdgeTo(grow) 2158 b.AddEdgeTo(assign) 2159 2160 // Call growslice 2161 s.startBlock(grow) 2162 taddr := s.expr(n.Left) 2163 r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[TINT], types.Types[TINT]}, taddr, p, l, c, nl) 2164 2165 if inplace { 2166 if sn.Op == ONAME { 2167 // Tell liveness we're about to build a new slice 2168 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, sn, s.mem()) 2169 } 2170 capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_cap), addr) 2171 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TINT], capaddr, r[2], s.mem()) 2172 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, pt, addr, r[0], s.mem()) 2173 // load the value we just stored to avoid having to spill it 2174 s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem()) 2175 s.vars[&lenVar] = r[1] // avoid a spill in the fast path 2176 } else { 2177 s.vars[&ptrVar] = r[0] 2178 s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs)) 2179 s.vars[&capVar] = r[2] 2180 } 2181 2182 b = s.endBlock() 2183 b.AddEdgeTo(assign) 2184 2185 // assign new elements to slots 2186 s.startBlock(assign) 2187 2188 if inplace { 2189 l = s.variable(&lenVar, types.Types[TINT]) // generates phi for len 2190 nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs)) 2191 lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_nel), addr) 2192 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TINT], lenaddr, nl, s.mem()) 2193 } 2194 2195 // Evaluate args 2196 type argRec struct { 2197 // if store is true, we're appending the value v. If false, we're appending the 2198 // value at *v. 2199 v *ssa.Value 2200 store bool 2201 } 2202 args := make([]argRec, 0, nargs) 2203 for _, n := range n.List.Slice()[1:] { 2204 if canSSAType(n.Type) { 2205 args = append(args, argRec{v: s.expr(n), store: true}) 2206 } else { 2207 v := s.addr(n, false) 2208 args = append(args, argRec{v: v}) 2209 } 2210 } 2211 2212 p = s.variable(&ptrVar, pt) // generates phi for ptr 2213 if !inplace { 2214 nl = s.variable(&newlenVar, types.Types[TINT]) // generates phi for nl 2215 c = s.variable(&capVar, types.Types[TINT]) // generates phi for cap 2216 } 2217 p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l) 2218 for i, arg := range args { 2219 addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[TINT], int64(i))) 2220 if arg.store { 2221 s.storeType(et, addr, arg.v, 0) 2222 } else { 2223 store := s.newValue3I(ssa.OpMove, ssa.TypeMem, et.Size(), addr, arg.v, s.mem()) 2224 store.Aux = et 2225 s.vars[&memVar] = store 2226 } 2227 } 2228 2229 delete(s.vars, &ptrVar) 2230 if inplace { 2231 delete(s.vars, &lenVar) 2232 return nil 2233 } 2234 delete(s.vars, &newlenVar) 2235 delete(s.vars, &capVar) 2236 // make result 2237 return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c) 2238 } 2239 2240 // condBranch evaluates the boolean expression cond and branches to yes 2241 // if cond is true and no if cond is false. 2242 // This function is intended to handle && and || better than just calling 2243 // s.expr(cond) and branching on the result. 2244 func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) { 2245 if cond.Op == OANDAND { 2246 mid := s.f.NewBlock(ssa.BlockPlain) 2247 s.stmtList(cond.Ninit) 2248 s.condBranch(cond.Left, mid, no, max8(likely, 0)) 2249 s.startBlock(mid) 2250 s.condBranch(cond.Right, yes, no, likely) 2251 return 2252 // Note: if likely==1, then both recursive calls pass 1. 2253 // If likely==-1, then we don't have enough information to decide 2254 // whether the first branch is likely or not. So we pass 0 for 2255 // the likeliness of the first branch. 2256 // TODO: have the frontend give us branch prediction hints for 2257 // OANDAND and OOROR nodes (if it ever has such info). 2258 } 2259 if cond.Op == OOROR { 2260 mid := s.f.NewBlock(ssa.BlockPlain) 2261 s.stmtList(cond.Ninit) 2262 s.condBranch(cond.Left, yes, mid, min8(likely, 0)) 2263 s.startBlock(mid) 2264 s.condBranch(cond.Right, yes, no, likely) 2265 return 2266 // Note: if likely==-1, then both recursive calls pass -1. 2267 // If likely==1, then we don't have enough info to decide 2268 // the likelihood of the first branch. 2269 } 2270 if cond.Op == ONOT { 2271 s.stmtList(cond.Ninit) 2272 s.condBranch(cond.Left, no, yes, -likely) 2273 return 2274 } 2275 c := s.expr(cond) 2276 b := s.endBlock() 2277 b.Kind = ssa.BlockIf 2278 b.SetControl(c) 2279 b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness 2280 b.AddEdgeTo(yes) 2281 b.AddEdgeTo(no) 2282 } 2283 2284 type skipMask uint8 2285 2286 const ( 2287 skipPtr skipMask = 1 << iota 2288 skipLen 2289 skipCap 2290 ) 2291 2292 // assign does left = right. 2293 // Right has already been evaluated to ssa, left has not. 2294 // If deref is true, then we do left = *right instead (and right has already been nil-checked). 2295 // If deref is true and right == nil, just do left = 0. 2296 // skip indicates assignments (at the top level) that can be avoided. 2297 func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) { 2298 if left.Op == ONAME && isblank(left) { 2299 return 2300 } 2301 t := left.Type 2302 dowidth(t) 2303 if s.canSSA(left) { 2304 if deref { 2305 s.Fatalf("can SSA LHS %v but not RHS %s", left, right) 2306 } 2307 if left.Op == ODOT { 2308 // We're assigning to a field of an ssa-able value. 2309 // We need to build a new structure with the new value for the 2310 // field we're assigning and the old values for the other fields. 2311 // For instance: 2312 // type T struct {a, b, c int} 2313 // var T x 2314 // x.b = 5 2315 // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c} 2316 2317 // Grab information about the structure type. 2318 t := left.Left.Type 2319 nf := t.NumFields() 2320 idx := fieldIdx(left) 2321 2322 // Grab old value of structure. 2323 old := s.expr(left.Left) 2324 2325 // Make new structure. 2326 new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t) 2327 2328 // Add fields as args. 2329 for i := 0; i < nf; i++ { 2330 if i == idx { 2331 new.AddArg(right) 2332 } else { 2333 new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old)) 2334 } 2335 } 2336 2337 // Recursively assign the new value we've made to the base of the dot op. 2338 s.assign(left.Left, new, false, 0) 2339 // TODO: do we need to update named values here? 2340 return 2341 } 2342 if left.Op == OINDEX && left.Left.Type.IsArray() { 2343 // We're assigning to an element of an ssa-able array. 2344 // a[i] = v 2345 t := left.Left.Type 2346 n := t.NumElem() 2347 2348 i := s.expr(left.Right) // index 2349 if n == 0 { 2350 // The bounds check must fail. Might as well 2351 // ignore the actual index and just use zeros. 2352 z := s.constInt(types.Types[TINT], 0) 2353 s.boundsCheck(z, z) 2354 return 2355 } 2356 if n != 1 { 2357 s.Fatalf("assigning to non-1-length array") 2358 } 2359 // Rewrite to a = [1]{v} 2360 i = s.extendIndex(i, panicindex) 2361 s.boundsCheck(i, s.constInt(types.Types[TINT], 1)) 2362 v := s.newValue1(ssa.OpArrayMake1, t, right) 2363 s.assign(left.Left, v, false, 0) 2364 return 2365 } 2366 // Update variable assignment. 2367 s.vars[left] = right 2368 s.addNamedValue(left, right) 2369 return 2370 } 2371 // Left is not ssa-able. Compute its address. 2372 addr := s.addr(left, false) 2373 if left.Op == ONAME && skip == 0 { 2374 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, left, s.mem()) 2375 } 2376 if isReflectHeaderDataField(left) { 2377 // Package unsafe's documentation says storing pointers into 2378 // reflect.SliceHeader and reflect.StringHeader's Data fields 2379 // is valid, even though they have type uintptr (#19168). 2380 // Mark it pointer type to signal the writebarrier pass to 2381 // insert a write barrier. 2382 t = types.Types[TUNSAFEPTR] 2383 } 2384 if deref { 2385 // Treat as a mem->mem move. 2386 var store *ssa.Value 2387 if right == nil { 2388 store = s.newValue2I(ssa.OpZero, ssa.TypeMem, t.Size(), addr, s.mem()) 2389 } else { 2390 store = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), addr, right, s.mem()) 2391 } 2392 store.Aux = t 2393 s.vars[&memVar] = store 2394 return 2395 } 2396 // Treat as a store. 2397 s.storeType(t, addr, right, skip) 2398 } 2399 2400 // zeroVal returns the zero value for type t. 2401 func (s *state) zeroVal(t *types.Type) *ssa.Value { 2402 switch { 2403 case t.IsInteger(): 2404 switch t.Size() { 2405 case 1: 2406 return s.constInt8(t, 0) 2407 case 2: 2408 return s.constInt16(t, 0) 2409 case 4: 2410 return s.constInt32(t, 0) 2411 case 8: 2412 return s.constInt64(t, 0) 2413 default: 2414 s.Fatalf("bad sized integer type %v", t) 2415 } 2416 case t.IsFloat(): 2417 switch t.Size() { 2418 case 4: 2419 return s.constFloat32(t, 0) 2420 case 8: 2421 return s.constFloat64(t, 0) 2422 default: 2423 s.Fatalf("bad sized float type %v", t) 2424 } 2425 case t.IsComplex(): 2426 switch t.Size() { 2427 case 8: 2428 z := s.constFloat32(types.Types[TFLOAT32], 0) 2429 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2430 case 16: 2431 z := s.constFloat64(types.Types[TFLOAT64], 0) 2432 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2433 default: 2434 s.Fatalf("bad sized complex type %v", t) 2435 } 2436 2437 case t.IsString(): 2438 return s.constEmptyString(t) 2439 case t.IsPtrShaped(): 2440 return s.constNil(t) 2441 case t.IsBoolean(): 2442 return s.constBool(false) 2443 case t.IsInterface(): 2444 return s.constInterface(t) 2445 case t.IsSlice(): 2446 return s.constSlice(t) 2447 case t.IsStruct(): 2448 n := t.NumFields() 2449 v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t) 2450 for i := 0; i < n; i++ { 2451 v.AddArg(s.zeroVal(t.FieldType(i).(*types.Type))) 2452 } 2453 return v 2454 case t.IsArray(): 2455 switch t.NumElem() { 2456 case 0: 2457 return s.entryNewValue0(ssa.OpArrayMake0, t) 2458 case 1: 2459 return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem())) 2460 } 2461 } 2462 s.Fatalf("zero for type %v not implemented", t) 2463 return nil 2464 } 2465 2466 type callKind int8 2467 2468 const ( 2469 callNormal callKind = iota 2470 callDefer 2471 callGo 2472 ) 2473 2474 var intrinsics map[intrinsicKey]intrinsicBuilder 2475 2476 // An intrinsicBuilder converts a call node n into an ssa value that 2477 // implements that call as an intrinsic. args is a list of arguments to the func. 2478 type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value 2479 2480 type intrinsicKey struct { 2481 arch *sys.Arch 2482 pkg string 2483 fn string 2484 } 2485 2486 func init() { 2487 intrinsics = map[intrinsicKey]intrinsicBuilder{} 2488 2489 var all []*sys.Arch 2490 var i4 []*sys.Arch 2491 var i8 []*sys.Arch 2492 var p4 []*sys.Arch 2493 var p8 []*sys.Arch 2494 for _, a := range sys.Archs { 2495 all = append(all, a) 2496 if a.IntSize == 4 { 2497 i4 = append(i4, a) 2498 } else { 2499 i8 = append(i8, a) 2500 } 2501 if a.PtrSize == 4 { 2502 p4 = append(p4, a) 2503 } else { 2504 p8 = append(p8, a) 2505 } 2506 } 2507 2508 // add adds the intrinsic b for pkg.fn for the given list of architectures. 2509 add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) { 2510 for _, a := range archs { 2511 intrinsics[intrinsicKey{a, pkg, fn}] = b 2512 } 2513 } 2514 // addF does the same as add but operates on architecture families. 2515 addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) { 2516 m := 0 2517 for _, f := range archFamilies { 2518 if f >= 32 { 2519 panic("too many architecture families") 2520 } 2521 m |= 1 << uint(f) 2522 } 2523 for _, a := range all { 2524 if m>>uint(a.Family)&1 != 0 { 2525 intrinsics[intrinsicKey{a, pkg, fn}] = b 2526 } 2527 } 2528 } 2529 // alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists. 2530 alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) { 2531 for _, a := range archs { 2532 if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok { 2533 intrinsics[intrinsicKey{a, pkg, fn}] = b 2534 } 2535 } 2536 } 2537 2538 /******** runtime ********/ 2539 if !instrumenting { 2540 add("runtime", "slicebytetostringtmp", 2541 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2542 // Compiler frontend optimizations emit OARRAYBYTESTRTMP nodes 2543 // for the backend instead of slicebytetostringtmp calls 2544 // when not instrumenting. 2545 slice := args[0] 2546 ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice) 2547 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) 2548 return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) 2549 }, 2550 all...) 2551 } 2552 add("runtime", "KeepAlive", 2553 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2554 data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0]) 2555 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, data, s.mem()) 2556 return nil 2557 }, 2558 all...) 2559 2560 /******** runtime/internal/sys ********/ 2561 addF("runtime/internal/sys", "Ctz32", 2562 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2563 return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0]) 2564 }, 2565 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) 2566 addF("runtime/internal/sys", "Ctz64", 2567 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2568 return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0]) 2569 }, 2570 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) 2571 addF("runtime/internal/sys", "Bswap32", 2572 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2573 return s.newValue1(ssa.OpBswap32, types.Types[TUINT32], args[0]) 2574 }, 2575 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X) 2576 addF("runtime/internal/sys", "Bswap64", 2577 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2578 return s.newValue1(ssa.OpBswap64, types.Types[TUINT64], args[0]) 2579 }, 2580 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X) 2581 2582 /******** runtime/internal/atomic ********/ 2583 addF("runtime/internal/atomic", "Load", 2584 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2585 v := s.newValue2(ssa.OpAtomicLoad32, ssa.MakeTuple(types.Types[TUINT32], ssa.TypeMem), args[0], s.mem()) 2586 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2587 return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) 2588 }, 2589 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) 2590 2591 addF("runtime/internal/atomic", "Load64", 2592 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2593 v := s.newValue2(ssa.OpAtomicLoad64, ssa.MakeTuple(types.Types[TUINT64], ssa.TypeMem), args[0], s.mem()) 2594 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2595 return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) 2596 }, 2597 sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64) 2598 addF("runtime/internal/atomic", "Loadp", 2599 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2600 v := s.newValue2(ssa.OpAtomicLoadPtr, ssa.MakeTuple(s.f.Config.Types.BytePtr, ssa.TypeMem), args[0], s.mem()) 2601 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2602 return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v) 2603 }, 2604 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) 2605 2606 addF("runtime/internal/atomic", "Store", 2607 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2608 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, ssa.TypeMem, args[0], args[1], s.mem()) 2609 return nil 2610 }, 2611 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) 2612 addF("runtime/internal/atomic", "Store64", 2613 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2614 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, ssa.TypeMem, args[0], args[1], s.mem()) 2615 return nil 2616 }, 2617 sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64) 2618 addF("runtime/internal/atomic", "StorepNoWB", 2619 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2620 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, ssa.TypeMem, args[0], args[1], s.mem()) 2621 return nil 2622 }, 2623 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS) 2624 2625 addF("runtime/internal/atomic", "Xchg", 2626 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2627 v := s.newValue3(ssa.OpAtomicExchange32, ssa.MakeTuple(types.Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem()) 2628 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2629 return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) 2630 }, 2631 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) 2632 addF("runtime/internal/atomic", "Xchg64", 2633 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2634 v := s.newValue3(ssa.OpAtomicExchange64, ssa.MakeTuple(types.Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem()) 2635 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2636 return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) 2637 }, 2638 sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64) 2639 2640 addF("runtime/internal/atomic", "Xadd", 2641 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2642 v := s.newValue3(ssa.OpAtomicAdd32, ssa.MakeTuple(types.Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem()) 2643 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2644 return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) 2645 }, 2646 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) 2647 addF("runtime/internal/atomic", "Xadd64", 2648 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2649 v := s.newValue3(ssa.OpAtomicAdd64, ssa.MakeTuple(types.Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem()) 2650 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2651 return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) 2652 }, 2653 sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64) 2654 2655 addF("runtime/internal/atomic", "Cas", 2656 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2657 v := s.newValue4(ssa.OpAtomicCompareAndSwap32, ssa.MakeTuple(types.Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem()) 2658 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2659 return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) 2660 }, 2661 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) 2662 addF("runtime/internal/atomic", "Cas64", 2663 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2664 v := s.newValue4(ssa.OpAtomicCompareAndSwap64, ssa.MakeTuple(types.Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem()) 2665 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2666 return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) 2667 }, 2668 sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64) 2669 2670 addF("runtime/internal/atomic", "And8", 2671 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2672 s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, ssa.TypeMem, args[0], args[1], s.mem()) 2673 return nil 2674 }, 2675 sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64) 2676 addF("runtime/internal/atomic", "Or8", 2677 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2678 s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, ssa.TypeMem, args[0], args[1], s.mem()) 2679 return nil 2680 }, 2681 sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64) 2682 2683 alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...) 2684 alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...) 2685 alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", i4...) 2686 alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", i8...) 2687 alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...) 2688 alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...) 2689 alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...) 2690 alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...) 2691 alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...) 2692 alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...) 2693 alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...) 2694 alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...) 2695 alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...) 2696 alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...) 2697 alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...) 2698 alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...) 2699 2700 /******** math ********/ 2701 addF("math", "Sqrt", 2702 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2703 return s.newValue1(ssa.OpSqrt, types.Types[TFLOAT64], args[0]) 2704 }, 2705 sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X) 2706 2707 /******** math/bits ********/ 2708 addF("math/bits", "TrailingZeros64", 2709 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2710 return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0]) 2711 }, 2712 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) 2713 addF("math/bits", "TrailingZeros32", 2714 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2715 return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0]) 2716 }, 2717 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) 2718 addF("math/bits", "TrailingZeros16", 2719 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2720 x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0]) 2721 c := s.constInt32(types.Types[TUINT32], 1<<16) 2722 y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c) 2723 return s.newValue1(ssa.OpCtz32, types.Types[TINT], y) 2724 }, 2725 sys.ARM, sys.MIPS) 2726 addF("math/bits", "TrailingZeros16", 2727 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2728 x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0]) 2729 c := s.constInt64(types.Types[TUINT64], 1<<16) 2730 y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c) 2731 return s.newValue1(ssa.OpCtz64, types.Types[TINT], y) 2732 }, 2733 sys.AMD64, sys.ARM64, sys.S390X) 2734 addF("math/bits", "TrailingZeros8", 2735 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2736 x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0]) 2737 c := s.constInt32(types.Types[TUINT32], 1<<8) 2738 y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c) 2739 return s.newValue1(ssa.OpCtz32, types.Types[TINT], y) 2740 }, 2741 sys.ARM, sys.MIPS) 2742 addF("math/bits", "TrailingZeros8", 2743 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2744 x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0]) 2745 c := s.constInt64(types.Types[TUINT64], 1<<8) 2746 y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c) 2747 return s.newValue1(ssa.OpCtz64, types.Types[TINT], y) 2748 }, 2749 sys.AMD64, sys.ARM64, sys.S390X) 2750 alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...) 2751 alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...) 2752 // ReverseBytes inlines correctly, no need to intrinsify it. 2753 // ReverseBytes16 lowers to a rotate, no need for anything special here. 2754 addF("math/bits", "Len64", 2755 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2756 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0]) 2757 }, 2758 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) 2759 addF("math/bits", "Len32", 2760 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2761 if s.config.IntSize == 4 { 2762 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0]) 2763 } 2764 x := s.newValue1(ssa.OpZeroExt32to64, types.Types[TUINT64], args[0]) 2765 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) 2766 }, 2767 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) 2768 addF("math/bits", "Len16", 2769 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2770 if s.config.IntSize == 4 { 2771 x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0]) 2772 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x) 2773 } 2774 x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0]) 2775 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) 2776 }, 2777 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) 2778 // Note: disabled on AMD64 because the Go code is faster! 2779 addF("math/bits", "Len8", 2780 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2781 if s.config.IntSize == 4 { 2782 x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0]) 2783 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x) 2784 } 2785 x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0]) 2786 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) 2787 }, 2788 sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) 2789 2790 addF("math/bits", "Len", 2791 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2792 if s.config.IntSize == 4 { 2793 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0]) 2794 } 2795 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0]) 2796 }, 2797 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) 2798 // LeadingZeros is handled because it trivially calls Len. 2799 addF("math/bits", "Reverse64", 2800 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2801 return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0]) 2802 }, 2803 sys.ARM64) 2804 addF("math/bits", "Reverse32", 2805 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2806 return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0]) 2807 }, 2808 sys.ARM64) 2809 addF("math/bits", "Reverse16", 2810 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2811 return s.newValue1(ssa.OpBitRev16, types.Types[TINT], args[0]) 2812 }, 2813 sys.ARM64) 2814 addF("math/bits", "Reverse8", 2815 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2816 return s.newValue1(ssa.OpBitRev8, types.Types[TINT], args[0]) 2817 }, 2818 sys.ARM64) 2819 addF("math/bits", "Reverse", 2820 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2821 if s.config.IntSize == 4 { 2822 return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0]) 2823 } 2824 return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0]) 2825 }, 2826 sys.ARM64) 2827 makeOnesCount := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2828 return func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2829 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: Linksym(syslook("support_popcnt").Sym)}) 2830 addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), aux, s.sb) 2831 v := s.newValue2(ssa.OpLoad, types.Types[TBOOL], addr, s.mem()) 2832 b := s.endBlock() 2833 b.Kind = ssa.BlockIf 2834 b.SetControl(v) 2835 bTrue := s.f.NewBlock(ssa.BlockPlain) 2836 bFalse := s.f.NewBlock(ssa.BlockPlain) 2837 bEnd := s.f.NewBlock(ssa.BlockPlain) 2838 b.AddEdgeTo(bTrue) 2839 b.AddEdgeTo(bFalse) 2840 b.Likely = ssa.BranchLikely // most machines have popcnt nowadays 2841 2842 // We have the intrinsic - use it directly. 2843 s.startBlock(bTrue) 2844 op := op64 2845 if s.config.IntSize == 4 { 2846 op = op32 2847 } 2848 s.vars[n] = s.newValue1(op, types.Types[TINT], args[0]) 2849 s.endBlock().AddEdgeTo(bEnd) 2850 2851 // Call the pure Go version. 2852 s.startBlock(bFalse) 2853 a := s.call(n, callNormal) 2854 s.vars[n] = s.newValue2(ssa.OpLoad, types.Types[TINT], a, s.mem()) 2855 s.endBlock().AddEdgeTo(bEnd) 2856 2857 // Merge results. 2858 s.startBlock(bEnd) 2859 return s.variable(n, types.Types[TINT]) 2860 } 2861 } 2862 addF("math/bits", "OnesCount64", 2863 makeOnesCount(ssa.OpPopCount64, ssa.OpPopCount64), 2864 sys.AMD64) 2865 addF("math/bits", "OnesCount32", 2866 makeOnesCount(ssa.OpPopCount32, ssa.OpPopCount32), 2867 sys.AMD64) 2868 addF("math/bits", "OnesCount16", 2869 makeOnesCount(ssa.OpPopCount16, ssa.OpPopCount16), 2870 sys.AMD64) 2871 // Note: no OnesCount8, the Go implementation is faster - just a table load. 2872 addF("math/bits", "OnesCount", 2873 makeOnesCount(ssa.OpPopCount64, ssa.OpPopCount32), 2874 sys.AMD64) 2875 2876 /******** sync/atomic ********/ 2877 2878 // Note: these are disabled by flag_race in findIntrinsic below. 2879 alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...) 2880 alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...) 2881 alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...) 2882 alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...) 2883 alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...) 2884 alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...) 2885 alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...) 2886 2887 alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...) 2888 alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...) 2889 // Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap. 2890 alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...) 2891 alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...) 2892 alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...) 2893 alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...) 2894 2895 alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...) 2896 alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...) 2897 alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...) 2898 alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...) 2899 alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...) 2900 alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...) 2901 2902 alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...) 2903 alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...) 2904 alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...) 2905 alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...) 2906 alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...) 2907 alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...) 2908 2909 alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...) 2910 alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...) 2911 alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...) 2912 alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...) 2913 alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...) 2914 alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...) 2915 2916 /******** math/big ********/ 2917 add("math/big", "mulWW", 2918 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2919 return s.newValue2(ssa.OpMul64uhilo, ssa.MakeTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1]) 2920 }, 2921 sys.ArchAMD64) 2922 add("math/big", "divWW", 2923 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2924 return s.newValue3(ssa.OpDiv128u, ssa.MakeTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2]) 2925 }, 2926 sys.ArchAMD64) 2927 } 2928 2929 // findIntrinsic returns a function which builds the SSA equivalent of the 2930 // function identified by the symbol sym. If sym is not an intrinsic call, returns nil. 2931 func findIntrinsic(sym *types.Sym) intrinsicBuilder { 2932 if ssa.IntrinsicsDisable { 2933 return nil 2934 } 2935 if sym == nil || sym.Pkg == nil { 2936 return nil 2937 } 2938 pkg := sym.Pkg.Path 2939 if sym.Pkg == localpkg { 2940 pkg = myimportpath 2941 } 2942 if flag_race && pkg == "sync/atomic" { 2943 // The race detector needs to be able to intercept these calls. 2944 // We can't intrinsify them. 2945 return nil 2946 } 2947 fn := sym.Name 2948 return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}] 2949 } 2950 2951 func isIntrinsicCall(n *Node) bool { 2952 if n == nil || n.Left == nil { 2953 return false 2954 } 2955 return findIntrinsic(n.Left.Sym) != nil 2956 } 2957 2958 // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation. 2959 func (s *state) intrinsicCall(n *Node) *ssa.Value { 2960 v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n)) 2961 if ssa.IntrinsicsDebug > 0 { 2962 x := v 2963 if x == nil { 2964 x = s.mem() 2965 } 2966 if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 { 2967 x = x.Args[0] 2968 } 2969 Warnl(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString()) 2970 } 2971 return v 2972 } 2973 2974 type callArg struct { 2975 offset int64 2976 v *ssa.Value 2977 } 2978 type byOffset []callArg 2979 2980 func (x byOffset) Len() int { return len(x) } 2981 func (x byOffset) Swap(i, j int) { x[i], x[j] = x[j], x[i] } 2982 func (x byOffset) Less(i, j int) bool { 2983 return x[i].offset < x[j].offset 2984 } 2985 2986 // intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them. 2987 func (s *state) intrinsicArgs(n *Node) []*ssa.Value { 2988 // This code is complicated because of how walk transforms calls. For a call node, 2989 // each entry in n.List is either an assignment to OINDREGSP which actually 2990 // stores an arg, or an assignment to a temporary which computes an arg 2991 // which is later assigned. 2992 // The args can also be out of order. 2993 // TODO: when walk goes away someday, this code can go away also. 2994 var args []callArg 2995 temps := map[*Node]*ssa.Value{} 2996 for _, a := range n.List.Slice() { 2997 if a.Op != OAS { 2998 s.Fatalf("non-assignment as a function argument %s", opnames[a.Op]) 2999 } 3000 l, r := a.Left, a.Right 3001 switch l.Op { 3002 case ONAME: 3003 // Evaluate and store to "temporary". 3004 // Walk ensures these temporaries are dead outside of n. 3005 temps[l] = s.expr(r) 3006 case OINDREGSP: 3007 // Store a value to an argument slot. 3008 var v *ssa.Value 3009 if x, ok := temps[r]; ok { 3010 // This is a previously computed temporary. 3011 v = x 3012 } else { 3013 // This is an explicit value; evaluate it. 3014 v = s.expr(r) 3015 } 3016 args = append(args, callArg{l.Xoffset, v}) 3017 default: 3018 s.Fatalf("function argument assignment target not allowed: %s", opnames[l.Op]) 3019 } 3020 } 3021 sort.Sort(byOffset(args)) 3022 res := make([]*ssa.Value, len(args)) 3023 for i, a := range args { 3024 res[i] = a.v 3025 } 3026 return res 3027 } 3028 3029 // Calls the function n using the specified call type. 3030 // Returns the address of the return value (or nil if none). 3031 func (s *state) call(n *Node, k callKind) *ssa.Value { 3032 var sym *types.Sym // target symbol (if static) 3033 var closure *ssa.Value // ptr to closure to run (if dynamic) 3034 var codeptr *ssa.Value // ptr to target code (if dynamic) 3035 var rcvr *ssa.Value // receiver to set 3036 fn := n.Left 3037 switch n.Op { 3038 case OCALLFUNC: 3039 if k == callNormal && fn.Op == ONAME && fn.Class == PFUNC { 3040 sym = fn.Sym 3041 break 3042 } 3043 closure = s.expr(fn) 3044 case OCALLMETH: 3045 if fn.Op != ODOTMETH { 3046 Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) 3047 } 3048 if k == callNormal { 3049 sym = fn.Sym 3050 break 3051 } 3052 // Make a name n2 for the function. 3053 // fn.Sym might be sync.(*Mutex).Unlock. 3054 // Make a PFUNC node out of that, then evaluate it. 3055 // We get back an SSA value representing &sync.(*Mutex).Unlock·f. 3056 // We can then pass that to defer or go. 3057 n2 := newnamel(fn.Pos, fn.Sym) 3058 n2.Name.Curfn = s.curfn 3059 n2.Class = PFUNC 3060 n2.Pos = fn.Pos 3061 n2.Type = types.Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it. 3062 closure = s.expr(n2) 3063 // Note: receiver is already assigned in n.List, so we don't 3064 // want to set it here. 3065 case OCALLINTER: 3066 if fn.Op != ODOTINTER { 3067 Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op) 3068 } 3069 i := s.expr(fn.Left) 3070 itab := s.newValue1(ssa.OpITab, types.Types[TUINTPTR], i) 3071 if k != callNormal { 3072 s.nilCheck(itab) 3073 } 3074 itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab 3075 itab = s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab) 3076 if k == callNormal { 3077 codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], itab, s.mem()) 3078 } else { 3079 closure = itab 3080 } 3081 rcvr = s.newValue1(ssa.OpIData, types.Types[TUINTPTR], i) 3082 } 3083 dowidth(fn.Type) 3084 stksize := fn.Type.ArgWidth() // includes receiver 3085 3086 // Run all argument assignments. The arg slots have already 3087 // been offset by the appropriate amount (+2*widthptr for go/defer, 3088 // +widthptr for interface calls). 3089 // For OCALLMETH, the receiver is set in these statements. 3090 s.stmtList(n.List) 3091 3092 // Set receiver (for interface calls) 3093 if rcvr != nil { 3094 argStart := Ctxt.FixedFrameSize() 3095 if k != callNormal { 3096 argStart += int64(2 * Widthptr) 3097 } 3098 addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart) 3099 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TUINTPTR], addr, rcvr, s.mem()) 3100 } 3101 3102 // Defer/go args 3103 if k != callNormal { 3104 // Write argsize and closure (args to Newproc/Deferproc). 3105 argStart := Ctxt.FixedFrameSize() 3106 argsize := s.constInt32(types.Types[TUINT32], int32(stksize)) 3107 addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart) 3108 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TUINT32], addr, argsize, s.mem()) 3109 addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr)) 3110 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TUINTPTR], addr, closure, s.mem()) 3111 stksize += 2 * int64(Widthptr) 3112 } 3113 3114 // call target 3115 var call *ssa.Value 3116 switch { 3117 case k == callDefer: 3118 call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, Deferproc, s.mem()) 3119 case k == callGo: 3120 call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, Newproc, s.mem()) 3121 case closure != nil: 3122 codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], closure, s.mem()) 3123 call = s.newValue3(ssa.OpClosureCall, ssa.TypeMem, codeptr, closure, s.mem()) 3124 case codeptr != nil: 3125 call = s.newValue2(ssa.OpInterCall, ssa.TypeMem, codeptr, s.mem()) 3126 case sym != nil: 3127 call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, Linksym(sym), s.mem()) 3128 default: 3129 Fatalf("bad call type %v %v", n.Op, n) 3130 } 3131 call.AuxInt = stksize // Call operations carry the argsize of the callee along with them 3132 s.vars[&memVar] = call 3133 3134 // Finish block for defers 3135 if k == callDefer { 3136 b := s.endBlock() 3137 b.Kind = ssa.BlockDefer 3138 b.SetControl(call) 3139 bNext := s.f.NewBlock(ssa.BlockPlain) 3140 b.AddEdgeTo(bNext) 3141 // Add recover edge to exit code. 3142 r := s.f.NewBlock(ssa.BlockPlain) 3143 s.startBlock(r) 3144 s.exit() 3145 b.AddEdgeTo(r) 3146 b.Likely = ssa.BranchLikely 3147 s.startBlock(bNext) 3148 } 3149 3150 res := n.Left.Type.Results() 3151 if res.NumFields() == 0 || k != callNormal { 3152 // call has no return value. Continue with the next statement. 3153 return nil 3154 } 3155 fp := res.Field(0) 3156 return s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize()) 3157 } 3158 3159 // etypesign returns the signed-ness of e, for integer/pointer etypes. 3160 // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer. 3161 func etypesign(e types.EType) int8 { 3162 switch e { 3163 case TINT8, TINT16, TINT32, TINT64, TINT: 3164 return -1 3165 case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR: 3166 return +1 3167 } 3168 return 0 3169 } 3170 3171 // lookupSymbol is used to retrieve the symbol (Extern, Arg or Auto) used for a particular node. 3172 // This improves the effectiveness of cse by using the same Aux values for the 3173 // same symbols. 3174 func (s *state) lookupSymbol(n *Node, sym interface{}) interface{} { 3175 switch sym.(type) { 3176 default: 3177 s.Fatalf("sym %v is of unknown type %T", sym, sym) 3178 case *ssa.ExternSymbol, *ssa.ArgSymbol, *ssa.AutoSymbol: 3179 // these are the only valid types 3180 } 3181 3182 if lsym, ok := s.varsyms[n]; ok { 3183 return lsym 3184 } 3185 s.varsyms[n] = sym 3186 return sym 3187 } 3188 3189 // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result. 3190 // The value that the returned Value represents is guaranteed to be non-nil. 3191 // If bounded is true then this address does not require a nil check for its operand 3192 // even if that would otherwise be implied. 3193 func (s *state) addr(n *Node, bounded bool) *ssa.Value { 3194 t := types.NewPtr(n.Type) 3195 switch n.Op { 3196 case ONAME: 3197 switch n.Class { 3198 case PEXTERN: 3199 // global variable 3200 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: Linksym(n.Sym)}) 3201 v := s.entryNewValue1A(ssa.OpAddr, t, aux, s.sb) 3202 // TODO: Make OpAddr use AuxInt as well as Aux. 3203 if n.Xoffset != 0 { 3204 v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v) 3205 } 3206 return v 3207 case PPARAM: 3208 // parameter slot 3209 v := s.decladdrs[n] 3210 if v != nil { 3211 return v 3212 } 3213 if n == nodfp { 3214 // Special arg that points to the frame pointer (Used by ORECOVER). 3215 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n}) 3216 return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp) 3217 } 3218 s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs) 3219 return nil 3220 case PAUTO: 3221 aux := s.lookupSymbol(n, &ssa.AutoSymbol{Node: n}) 3222 return s.newValue1A(ssa.OpAddr, t, aux, s.sp) 3223 case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early. 3224 // ensure that we reuse symbols for out parameters so 3225 // that cse works on their addresses 3226 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n}) 3227 return s.newValue1A(ssa.OpAddr, t, aux, s.sp) 3228 default: 3229 s.Fatalf("variable address class %v not implemented", classnames[n.Class]) 3230 return nil 3231 } 3232 case OINDREGSP: 3233 // indirect off REGSP 3234 // used for storing/loading arguments/returns to/from callees 3235 return s.constOffPtrSP(t, n.Xoffset) 3236 case OINDEX: 3237 if n.Left.Type.IsSlice() { 3238 a := s.expr(n.Left) 3239 i := s.expr(n.Right) 3240 i = s.extendIndex(i, panicindex) 3241 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], a) 3242 if !n.Bounded() { 3243 s.boundsCheck(i, len) 3244 } 3245 p := s.newValue1(ssa.OpSlicePtr, t, a) 3246 return s.newValue2(ssa.OpPtrIndex, t, p, i) 3247 } else { // array 3248 a := s.addr(n.Left, bounded) 3249 i := s.expr(n.Right) 3250 i = s.extendIndex(i, panicindex) 3251 len := s.constInt(types.Types[TINT], n.Left.Type.NumElem()) 3252 if !n.Bounded() { 3253 s.boundsCheck(i, len) 3254 } 3255 return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left.Type.Elem()), a, i) 3256 } 3257 case OIND: 3258 return s.exprPtr(n.Left, bounded, n.Pos) 3259 case ODOT: 3260 p := s.addr(n.Left, bounded) 3261 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p) 3262 case ODOTPTR: 3263 p := s.exprPtr(n.Left, bounded, n.Pos) 3264 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p) 3265 case OCLOSUREVAR: 3266 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, 3267 s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)) 3268 case OCONVNOP: 3269 addr := s.addr(n.Left, bounded) 3270 return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type 3271 case OCALLFUNC, OCALLINTER, OCALLMETH: 3272 return s.call(n, callNormal) 3273 case ODOTTYPE: 3274 v, _ := s.dottype(n, false) 3275 if v.Op != ssa.OpLoad { 3276 s.Fatalf("dottype of non-load") 3277 } 3278 if v.Args[1] != s.mem() { 3279 s.Fatalf("memory no longer live from dottype load") 3280 } 3281 return v.Args[0] 3282 default: 3283 s.Fatalf("unhandled addr %v", n.Op) 3284 return nil 3285 } 3286 } 3287 3288 // canSSA reports whether n is SSA-able. 3289 // n must be an ONAME (or an ODOT sequence with an ONAME base). 3290 func (s *state) canSSA(n *Node) bool { 3291 if Debug['N'] != 0 { 3292 return false 3293 } 3294 for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) { 3295 n = n.Left 3296 } 3297 if n.Op != ONAME { 3298 return false 3299 } 3300 if n.Addrtaken() { 3301 return false 3302 } 3303 if n.isParamHeapCopy() { 3304 return false 3305 } 3306 if n.Class == PAUTOHEAP { 3307 Fatalf("canSSA of PAUTOHEAP %v", n) 3308 } 3309 switch n.Class { 3310 case PEXTERN: 3311 return false 3312 case PPARAMOUT: 3313 if s.hasdefer { 3314 // TODO: handle this case? Named return values must be 3315 // in memory so that the deferred function can see them. 3316 // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false } 3317 // Or maybe not, see issue 18860. Even unnamed return values 3318 // must be written back so if a defer recovers, the caller can see them. 3319 return false 3320 } 3321 if s.cgoUnsafeArgs { 3322 // Cgo effectively takes the address of all result args, 3323 // but the compiler can't see that. 3324 return false 3325 } 3326 } 3327 if n.Class == PPARAM && n.Sym != nil && n.Sym.Name == ".this" { 3328 // wrappers generated by genwrapper need to update 3329 // the .this pointer in place. 3330 // TODO: treat as a PPARMOUT? 3331 return false 3332 } 3333 return canSSAType(n.Type) 3334 // TODO: try to make more variables SSAable? 3335 } 3336 3337 // canSSA reports whether variables of type t are SSA-able. 3338 func canSSAType(t *types.Type) bool { 3339 dowidth(t) 3340 if t.Width > int64(4*Widthptr) { 3341 // 4*Widthptr is an arbitrary constant. We want it 3342 // to be at least 3*Widthptr so slices can be registerized. 3343 // Too big and we'll introduce too much register pressure. 3344 return false 3345 } 3346 switch t.Etype { 3347 case TARRAY: 3348 // We can't do larger arrays because dynamic indexing is 3349 // not supported on SSA variables. 3350 // TODO: allow if all indexes are constant. 3351 if t.NumElem() <= 1 { 3352 return canSSAType(t.Elem()) 3353 } 3354 return false 3355 case TSTRUCT: 3356 if t.NumFields() > ssa.MaxStruct { 3357 return false 3358 } 3359 for _, t1 := range t.Fields().Slice() { 3360 if !canSSAType(t1.Type) { 3361 return false 3362 } 3363 } 3364 return true 3365 default: 3366 return true 3367 } 3368 } 3369 3370 // exprPtr evaluates n to a pointer and nil-checks it. 3371 func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value { 3372 p := s.expr(n) 3373 if bounded || n.NonNil() { 3374 if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 { 3375 s.f.Warnl(lineno, "removed nil check") 3376 } 3377 return p 3378 } 3379 s.nilCheck(p) 3380 return p 3381 } 3382 3383 // nilCheck generates nil pointer checking code. 3384 // Used only for automatically inserted nil checks, 3385 // not for user code like 'x != nil'. 3386 func (s *state) nilCheck(ptr *ssa.Value) { 3387 if disable_checknil != 0 { 3388 return 3389 } 3390 s.newValue2(ssa.OpNilCheck, ssa.TypeVoid, ptr, s.mem()) 3391 } 3392 3393 // boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not. 3394 // Starts a new block on return. 3395 // idx is already converted to full int width. 3396 func (s *state) boundsCheck(idx, len *ssa.Value) { 3397 if Debug['B'] != 0 { 3398 return 3399 } 3400 3401 // bounds check 3402 cmp := s.newValue2(ssa.OpIsInBounds, types.Types[TBOOL], idx, len) 3403 s.check(cmp, panicindex) 3404 } 3405 3406 // sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not. 3407 // Starts a new block on return. 3408 // idx and len are already converted to full int width. 3409 func (s *state) sliceBoundsCheck(idx, len *ssa.Value) { 3410 if Debug['B'] != 0 { 3411 return 3412 } 3413 3414 // bounds check 3415 cmp := s.newValue2(ssa.OpIsSliceInBounds, types.Types[TBOOL], idx, len) 3416 s.check(cmp, panicslice) 3417 } 3418 3419 // If cmp (a bool) is false, panic using the given function. 3420 func (s *state) check(cmp *ssa.Value, fn *obj.LSym) { 3421 b := s.endBlock() 3422 b.Kind = ssa.BlockIf 3423 b.SetControl(cmp) 3424 b.Likely = ssa.BranchLikely 3425 bNext := s.f.NewBlock(ssa.BlockPlain) 3426 line := s.peekPos() 3427 bPanic := s.panics[funcLine{fn, line}] 3428 if bPanic == nil { 3429 bPanic = s.f.NewBlock(ssa.BlockPlain) 3430 s.panics[funcLine{fn, line}] = bPanic 3431 s.startBlock(bPanic) 3432 // The panic call takes/returns memory to ensure that the right 3433 // memory state is observed if the panic happens. 3434 s.rtcall(fn, false, nil) 3435 } 3436 b.AddEdgeTo(bNext) 3437 b.AddEdgeTo(bPanic) 3438 s.startBlock(bNext) 3439 } 3440 3441 func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value { 3442 needcheck := true 3443 switch b.Op { 3444 case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64: 3445 if b.AuxInt != 0 { 3446 needcheck = false 3447 } 3448 } 3449 if needcheck { 3450 // do a size-appropriate check for zero 3451 cmp := s.newValue2(s.ssaOp(ONE, n.Type), types.Types[TBOOL], b, s.zeroVal(n.Type)) 3452 s.check(cmp, panicdivide) 3453 } 3454 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 3455 } 3456 3457 // rtcall issues a call to the given runtime function fn with the listed args. 3458 // Returns a slice of results of the given result types. 3459 // The call is added to the end of the current block. 3460 // If returns is false, the block is marked as an exit block. 3461 func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value { 3462 // Write args to the stack 3463 off := Ctxt.FixedFrameSize() 3464 for _, arg := range args { 3465 t := arg.Type 3466 off = Rnd(off, t.Alignment()) 3467 ptr := s.constOffPtrSP(t.PtrTo(), off) 3468 size := t.Size() 3469 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, ptr, arg, s.mem()) 3470 off += size 3471 } 3472 off = Rnd(off, int64(Widthptr)) 3473 if thearch.LinkArch.Name == "amd64p32" { 3474 // amd64p32 wants 8-byte alignment of the start of the return values. 3475 off = Rnd(off, 8) 3476 } 3477 3478 // Issue call 3479 call := s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, fn, s.mem()) 3480 s.vars[&memVar] = call 3481 3482 if !returns { 3483 // Finish block 3484 b := s.endBlock() 3485 b.Kind = ssa.BlockExit 3486 b.SetControl(call) 3487 call.AuxInt = off - Ctxt.FixedFrameSize() 3488 if len(results) > 0 { 3489 Fatalf("panic call can't have results") 3490 } 3491 return nil 3492 } 3493 3494 // Load results 3495 res := make([]*ssa.Value, len(results)) 3496 for i, t := range results { 3497 off = Rnd(off, t.Alignment()) 3498 ptr := s.constOffPtrSP(types.NewPtr(t), off) 3499 res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem()) 3500 off += t.Size() 3501 } 3502 off = Rnd(off, int64(Widthptr)) 3503 3504 // Remember how much callee stack space we needed. 3505 call.AuxInt = off 3506 3507 return res 3508 } 3509 3510 // do *left = right for type t. 3511 func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask) { 3512 if skip == 0 && (!types.Haspointers(t) || ssa.IsStackAddr(left)) { 3513 // Known to not have write barrier. Store the whole type. 3514 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, left, right, s.mem()) 3515 return 3516 } 3517 3518 // store scalar fields first, so write barrier stores for 3519 // pointer fields can be grouped together, and scalar values 3520 // don't need to be live across the write barrier call. 3521 // TODO: if the writebarrier pass knows how to reorder stores, 3522 // we can do a single store here as long as skip==0. 3523 s.storeTypeScalars(t, left, right, skip) 3524 if skip&skipPtr == 0 && types.Haspointers(t) { 3525 s.storeTypePtrs(t, left, right) 3526 } 3527 } 3528 3529 // do *left = right for all scalar (non-pointer) parts of t. 3530 func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) { 3531 switch { 3532 case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex(): 3533 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, left, right, s.mem()) 3534 case t.IsPtrShaped(): 3535 // no scalar fields. 3536 case t.IsString(): 3537 if skip&skipLen != 0 { 3538 return 3539 } 3540 len := s.newValue1(ssa.OpStringLen, types.Types[TINT], right) 3541 lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.IntSize, left) 3542 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TINT], lenAddr, len, s.mem()) 3543 case t.IsSlice(): 3544 if skip&skipLen == 0 { 3545 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], right) 3546 lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.IntSize, left) 3547 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TINT], lenAddr, len, s.mem()) 3548 } 3549 if skip&skipCap == 0 { 3550 cap := s.newValue1(ssa.OpSliceCap, types.Types[TINT], right) 3551 capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.IntSize, left) 3552 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TINT], capAddr, cap, s.mem()) 3553 } 3554 case t.IsInterface(): 3555 // itab field doesn't need a write barrier (even though it is a pointer). 3556 itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right) 3557 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TUINTPTR], left, itab, s.mem()) 3558 case t.IsStruct(): 3559 n := t.NumFields() 3560 for i := 0; i < n; i++ { 3561 ft := t.FieldType(i) 3562 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3563 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3564 s.storeTypeScalars(ft.(*types.Type), addr, val, 0) 3565 } 3566 case t.IsArray() && t.NumElem() == 0: 3567 // nothing 3568 case t.IsArray() && t.NumElem() == 1: 3569 s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0) 3570 default: 3571 s.Fatalf("bad write barrier type %v", t) 3572 } 3573 } 3574 3575 // do *left = right for all pointer parts of t. 3576 func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) { 3577 switch { 3578 case t.IsPtrShaped(): 3579 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, left, right, s.mem()) 3580 case t.IsString(): 3581 ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right) 3582 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem()) 3583 case t.IsSlice(): 3584 ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, right) 3585 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem()) 3586 case t.IsInterface(): 3587 // itab field is treated as a scalar. 3588 idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right) 3589 idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left) 3590 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, s.f.Config.Types.BytePtr, idataAddr, idata, s.mem()) 3591 case t.IsStruct(): 3592 n := t.NumFields() 3593 for i := 0; i < n; i++ { 3594 ft := t.FieldType(i) 3595 if !types.Haspointers(ft.(*types.Type)) { 3596 continue 3597 } 3598 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3599 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3600 s.storeTypePtrs(ft.(*types.Type), addr, val) 3601 } 3602 case t.IsArray() && t.NumElem() == 0: 3603 // nothing 3604 case t.IsArray() && t.NumElem() == 1: 3605 s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right)) 3606 default: 3607 s.Fatalf("bad write barrier type %v", t) 3608 } 3609 } 3610 3611 // slice computes the slice v[i:j:k] and returns ptr, len, and cap of result. 3612 // i,j,k may be nil, in which case they are set to their default value. 3613 // t is a slice, ptr to array, or string type. 3614 func (s *state) slice(t *types.Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) { 3615 var elemtype *types.Type 3616 var ptrtype *types.Type 3617 var ptr *ssa.Value 3618 var len *ssa.Value 3619 var cap *ssa.Value 3620 zero := s.constInt(types.Types[TINT], 0) 3621 switch { 3622 case t.IsSlice(): 3623 elemtype = t.Elem() 3624 ptrtype = types.NewPtr(elemtype) 3625 ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v) 3626 len = s.newValue1(ssa.OpSliceLen, types.Types[TINT], v) 3627 cap = s.newValue1(ssa.OpSliceCap, types.Types[TINT], v) 3628 case t.IsString(): 3629 elemtype = types.Types[TUINT8] 3630 ptrtype = types.NewPtr(elemtype) 3631 ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v) 3632 len = s.newValue1(ssa.OpStringLen, types.Types[TINT], v) 3633 cap = len 3634 case t.IsPtr(): 3635 if !t.Elem().IsArray() { 3636 s.Fatalf("bad ptr to array in slice %v\n", t) 3637 } 3638 elemtype = t.Elem().Elem() 3639 ptrtype = types.NewPtr(elemtype) 3640 s.nilCheck(v) 3641 ptr = v 3642 len = s.constInt(types.Types[TINT], t.Elem().NumElem()) 3643 cap = len 3644 default: 3645 s.Fatalf("bad type in slice %v\n", t) 3646 } 3647 3648 // Set default values 3649 if i == nil { 3650 i = zero 3651 } 3652 if j == nil { 3653 j = len 3654 } 3655 if k == nil { 3656 k = cap 3657 } 3658 3659 // Panic if slice indices are not in bounds. 3660 s.sliceBoundsCheck(i, j) 3661 if j != k { 3662 s.sliceBoundsCheck(j, k) 3663 } 3664 if k != cap { 3665 s.sliceBoundsCheck(k, cap) 3666 } 3667 3668 // Generate the following code assuming that indexes are in bounds. 3669 // The masking is to make sure that we don't generate a slice 3670 // that points to the next object in memory. 3671 // rlen = j - i 3672 // rcap = k - i 3673 // delta = i * elemsize 3674 // rptr = p + delta&mask(rcap) 3675 // result = (SliceMake rptr rlen rcap) 3676 // where mask(x) is 0 if x==0 and -1 if x>0. 3677 subOp := s.ssaOp(OSUB, types.Types[TINT]) 3678 mulOp := s.ssaOp(OMUL, types.Types[TINT]) 3679 andOp := s.ssaOp(OAND, types.Types[TINT]) 3680 rlen := s.newValue2(subOp, types.Types[TINT], j, i) 3681 var rcap *ssa.Value 3682 switch { 3683 case t.IsString(): 3684 // Capacity of the result is unimportant. However, we use 3685 // rcap to test if we've generated a zero-length slice. 3686 // Use length of strings for that. 3687 rcap = rlen 3688 case j == k: 3689 rcap = rlen 3690 default: 3691 rcap = s.newValue2(subOp, types.Types[TINT], k, i) 3692 } 3693 3694 var rptr *ssa.Value 3695 if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 { 3696 // No pointer arithmetic necessary. 3697 rptr = ptr 3698 } else { 3699 // delta = # of bytes to offset pointer by. 3700 delta := s.newValue2(mulOp, types.Types[TINT], i, s.constInt(types.Types[TINT], elemtype.Width)) 3701 // If we're slicing to the point where the capacity is zero, 3702 // zero out the delta. 3703 mask := s.newValue1(ssa.OpSlicemask, types.Types[TINT], rcap) 3704 delta = s.newValue2(andOp, types.Types[TINT], delta, mask) 3705 // Compute rptr = ptr + delta 3706 rptr = s.newValue2(ssa.OpAddPtr, ptrtype, ptr, delta) 3707 } 3708 3709 return rptr, rlen, rcap 3710 } 3711 3712 type u642fcvtTab struct { 3713 geq, cvt2F, and, rsh, or, add ssa.Op 3714 one func(*state, ssa.Type, int64) *ssa.Value 3715 } 3716 3717 var u64_f64 u642fcvtTab = u642fcvtTab{ 3718 geq: ssa.OpGeq64, 3719 cvt2F: ssa.OpCvt64to64F, 3720 and: ssa.OpAnd64, 3721 rsh: ssa.OpRsh64Ux64, 3722 or: ssa.OpOr64, 3723 add: ssa.OpAdd64F, 3724 one: (*state).constInt64, 3725 } 3726 3727 var u64_f32 u642fcvtTab = u642fcvtTab{ 3728 geq: ssa.OpGeq64, 3729 cvt2F: ssa.OpCvt64to32F, 3730 and: ssa.OpAnd64, 3731 rsh: ssa.OpRsh64Ux64, 3732 or: ssa.OpOr64, 3733 add: ssa.OpAdd32F, 3734 one: (*state).constInt64, 3735 } 3736 3737 func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3738 return s.uint64Tofloat(&u64_f64, n, x, ft, tt) 3739 } 3740 3741 func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3742 return s.uint64Tofloat(&u64_f32, n, x, ft, tt) 3743 } 3744 3745 func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3746 // if x >= 0 { 3747 // result = (floatY) x 3748 // } else { 3749 // y = uintX(x) ; y = x & 1 3750 // z = uintX(x) ; z = z >> 1 3751 // z = z >> 1 3752 // z = z | y 3753 // result = floatY(z) 3754 // result = result + result 3755 // } 3756 // 3757 // Code borrowed from old code generator. 3758 // What's going on: large 64-bit "unsigned" looks like 3759 // negative number to hardware's integer-to-float 3760 // conversion. However, because the mantissa is only 3761 // 63 bits, we don't need the LSB, so instead we do an 3762 // unsigned right shift (divide by two), convert, and 3763 // double. However, before we do that, we need to be 3764 // sure that we do not lose a "1" if that made the 3765 // difference in the resulting rounding. Therefore, we 3766 // preserve it, and OR (not ADD) it back in. The case 3767 // that matters is when the eleven discarded bits are 3768 // equal to 10000000001; that rounds up, and the 1 cannot 3769 // be lost else it would round down if the LSB of the 3770 // candidate mantissa is 0. 3771 cmp := s.newValue2(cvttab.geq, types.Types[TBOOL], x, s.zeroVal(ft)) 3772 b := s.endBlock() 3773 b.Kind = ssa.BlockIf 3774 b.SetControl(cmp) 3775 b.Likely = ssa.BranchLikely 3776 3777 bThen := s.f.NewBlock(ssa.BlockPlain) 3778 bElse := s.f.NewBlock(ssa.BlockPlain) 3779 bAfter := s.f.NewBlock(ssa.BlockPlain) 3780 3781 b.AddEdgeTo(bThen) 3782 s.startBlock(bThen) 3783 a0 := s.newValue1(cvttab.cvt2F, tt, x) 3784 s.vars[n] = a0 3785 s.endBlock() 3786 bThen.AddEdgeTo(bAfter) 3787 3788 b.AddEdgeTo(bElse) 3789 s.startBlock(bElse) 3790 one := cvttab.one(s, ft, 1) 3791 y := s.newValue2(cvttab.and, ft, x, one) 3792 z := s.newValue2(cvttab.rsh, ft, x, one) 3793 z = s.newValue2(cvttab.or, ft, z, y) 3794 a := s.newValue1(cvttab.cvt2F, tt, z) 3795 a1 := s.newValue2(cvttab.add, tt, a, a) 3796 s.vars[n] = a1 3797 s.endBlock() 3798 bElse.AddEdgeTo(bAfter) 3799 3800 s.startBlock(bAfter) 3801 return s.variable(n, n.Type) 3802 } 3803 3804 type u322fcvtTab struct { 3805 cvtI2F, cvtF2F ssa.Op 3806 } 3807 3808 var u32_f64 u322fcvtTab = u322fcvtTab{ 3809 cvtI2F: ssa.OpCvt32to64F, 3810 cvtF2F: ssa.OpCopy, 3811 } 3812 3813 var u32_f32 u322fcvtTab = u322fcvtTab{ 3814 cvtI2F: ssa.OpCvt32to32F, 3815 cvtF2F: ssa.OpCvt64Fto32F, 3816 } 3817 3818 func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3819 return s.uint32Tofloat(&u32_f64, n, x, ft, tt) 3820 } 3821 3822 func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3823 return s.uint32Tofloat(&u32_f32, n, x, ft, tt) 3824 } 3825 3826 func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3827 // if x >= 0 { 3828 // result = floatY(x) 3829 // } else { 3830 // result = floatY(float64(x) + (1<<32)) 3831 // } 3832 cmp := s.newValue2(ssa.OpGeq32, types.Types[TBOOL], x, s.zeroVal(ft)) 3833 b := s.endBlock() 3834 b.Kind = ssa.BlockIf 3835 b.SetControl(cmp) 3836 b.Likely = ssa.BranchLikely 3837 3838 bThen := s.f.NewBlock(ssa.BlockPlain) 3839 bElse := s.f.NewBlock(ssa.BlockPlain) 3840 bAfter := s.f.NewBlock(ssa.BlockPlain) 3841 3842 b.AddEdgeTo(bThen) 3843 s.startBlock(bThen) 3844 a0 := s.newValue1(cvttab.cvtI2F, tt, x) 3845 s.vars[n] = a0 3846 s.endBlock() 3847 bThen.AddEdgeTo(bAfter) 3848 3849 b.AddEdgeTo(bElse) 3850 s.startBlock(bElse) 3851 a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[TFLOAT64], x) 3852 twoToThe32 := s.constFloat64(types.Types[TFLOAT64], float64(1<<32)) 3853 a2 := s.newValue2(ssa.OpAdd64F, types.Types[TFLOAT64], a1, twoToThe32) 3854 a3 := s.newValue1(cvttab.cvtF2F, tt, a2) 3855 3856 s.vars[n] = a3 3857 s.endBlock() 3858 bElse.AddEdgeTo(bAfter) 3859 3860 s.startBlock(bAfter) 3861 return s.variable(n, n.Type) 3862 } 3863 3864 // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels. 3865 func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value { 3866 if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() { 3867 s.Fatalf("node must be a map or a channel") 3868 } 3869 // if n == nil { 3870 // return 0 3871 // } else { 3872 // // len 3873 // return *((*int)n) 3874 // // cap 3875 // return *(((*int)n)+1) 3876 // } 3877 lenType := n.Type 3878 nilValue := s.constNil(types.Types[TUINTPTR]) 3879 cmp := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], x, nilValue) 3880 b := s.endBlock() 3881 b.Kind = ssa.BlockIf 3882 b.SetControl(cmp) 3883 b.Likely = ssa.BranchUnlikely 3884 3885 bThen := s.f.NewBlock(ssa.BlockPlain) 3886 bElse := s.f.NewBlock(ssa.BlockPlain) 3887 bAfter := s.f.NewBlock(ssa.BlockPlain) 3888 3889 // length/capacity of a nil map/chan is zero 3890 b.AddEdgeTo(bThen) 3891 s.startBlock(bThen) 3892 s.vars[n] = s.zeroVal(lenType) 3893 s.endBlock() 3894 bThen.AddEdgeTo(bAfter) 3895 3896 b.AddEdgeTo(bElse) 3897 s.startBlock(bElse) 3898 if n.Op == OLEN { 3899 // length is stored in the first word for map/chan 3900 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem()) 3901 } else if n.Op == OCAP { 3902 // capacity is stored in the second word for chan 3903 sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x) 3904 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem()) 3905 } else { 3906 s.Fatalf("op must be OLEN or OCAP") 3907 } 3908 s.endBlock() 3909 bElse.AddEdgeTo(bAfter) 3910 3911 s.startBlock(bAfter) 3912 return s.variable(n, lenType) 3913 } 3914 3915 type f2uCvtTab struct { 3916 ltf, cvt2U, subf, or ssa.Op 3917 floatValue func(*state, ssa.Type, float64) *ssa.Value 3918 intValue func(*state, ssa.Type, int64) *ssa.Value 3919 cutoff uint64 3920 } 3921 3922 var f32_u64 f2uCvtTab = f2uCvtTab{ 3923 ltf: ssa.OpLess32F, 3924 cvt2U: ssa.OpCvt32Fto64, 3925 subf: ssa.OpSub32F, 3926 or: ssa.OpOr64, 3927 floatValue: (*state).constFloat32, 3928 intValue: (*state).constInt64, 3929 cutoff: 9223372036854775808, 3930 } 3931 3932 var f64_u64 f2uCvtTab = f2uCvtTab{ 3933 ltf: ssa.OpLess64F, 3934 cvt2U: ssa.OpCvt64Fto64, 3935 subf: ssa.OpSub64F, 3936 or: ssa.OpOr64, 3937 floatValue: (*state).constFloat64, 3938 intValue: (*state).constInt64, 3939 cutoff: 9223372036854775808, 3940 } 3941 3942 var f32_u32 f2uCvtTab = f2uCvtTab{ 3943 ltf: ssa.OpLess32F, 3944 cvt2U: ssa.OpCvt32Fto32, 3945 subf: ssa.OpSub32F, 3946 or: ssa.OpOr32, 3947 floatValue: (*state).constFloat32, 3948 intValue: func(s *state, t ssa.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) }, 3949 cutoff: 2147483648, 3950 } 3951 3952 var f64_u32 f2uCvtTab = f2uCvtTab{ 3953 ltf: ssa.OpLess64F, 3954 cvt2U: ssa.OpCvt64Fto32, 3955 subf: ssa.OpSub64F, 3956 or: ssa.OpOr32, 3957 floatValue: (*state).constFloat64, 3958 intValue: func(s *state, t ssa.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) }, 3959 cutoff: 2147483648, 3960 } 3961 3962 func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3963 return s.floatToUint(&f32_u64, n, x, ft, tt) 3964 } 3965 func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3966 return s.floatToUint(&f64_u64, n, x, ft, tt) 3967 } 3968 3969 func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3970 return s.floatToUint(&f32_u32, n, x, ft, tt) 3971 } 3972 3973 func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3974 return s.floatToUint(&f64_u32, n, x, ft, tt) 3975 } 3976 3977 func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3978 // cutoff:=1<<(intY_Size-1) 3979 // if x < floatX(cutoff) { 3980 // result = uintY(x) 3981 // } else { 3982 // y = x - floatX(cutoff) 3983 // z = uintY(y) 3984 // result = z | -(cutoff) 3985 // } 3986 cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff)) 3987 cmp := s.newValue2(cvttab.ltf, types.Types[TBOOL], x, cutoff) 3988 b := s.endBlock() 3989 b.Kind = ssa.BlockIf 3990 b.SetControl(cmp) 3991 b.Likely = ssa.BranchLikely 3992 3993 bThen := s.f.NewBlock(ssa.BlockPlain) 3994 bElse := s.f.NewBlock(ssa.BlockPlain) 3995 bAfter := s.f.NewBlock(ssa.BlockPlain) 3996 3997 b.AddEdgeTo(bThen) 3998 s.startBlock(bThen) 3999 a0 := s.newValue1(cvttab.cvt2U, tt, x) 4000 s.vars[n] = a0 4001 s.endBlock() 4002 bThen.AddEdgeTo(bAfter) 4003 4004 b.AddEdgeTo(bElse) 4005 s.startBlock(bElse) 4006 y := s.newValue2(cvttab.subf, ft, x, cutoff) 4007 y = s.newValue1(cvttab.cvt2U, tt, y) 4008 z := cvttab.intValue(s, tt, int64(-cvttab.cutoff)) 4009 a1 := s.newValue2(cvttab.or, tt, y, z) 4010 s.vars[n] = a1 4011 s.endBlock() 4012 bElse.AddEdgeTo(bAfter) 4013 4014 s.startBlock(bAfter) 4015 return s.variable(n, n.Type) 4016 } 4017 4018 // dottype generates SSA for a type assertion node. 4019 // commaok indicates whether to panic or return a bool. 4020 // If commaok is false, resok will be nil. 4021 func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { 4022 iface := s.expr(n.Left) // input interface 4023 target := s.expr(n.Right) // target type 4024 byteptr := s.f.Config.Types.BytePtr 4025 4026 if n.Type.IsInterface() { 4027 if n.Type.IsEmptyInterface() { 4028 // Converting to an empty interface. 4029 // Input could be an empty or nonempty interface. 4030 if Debug_typeassert > 0 { 4031 Warnl(n.Pos, "type assertion inlined") 4032 } 4033 4034 // Get itab/type field from input. 4035 itab := s.newValue1(ssa.OpITab, byteptr, iface) 4036 // Conversion succeeds iff that field is not nil. 4037 cond := s.newValue2(ssa.OpNeqPtr, types.Types[TBOOL], itab, s.constNil(byteptr)) 4038 4039 if n.Left.Type.IsEmptyInterface() && commaok { 4040 // Converting empty interface to empty interface with ,ok is just a nil check. 4041 return iface, cond 4042 } 4043 4044 // Branch on nilness. 4045 b := s.endBlock() 4046 b.Kind = ssa.BlockIf 4047 b.SetControl(cond) 4048 b.Likely = ssa.BranchLikely 4049 bOk := s.f.NewBlock(ssa.BlockPlain) 4050 bFail := s.f.NewBlock(ssa.BlockPlain) 4051 b.AddEdgeTo(bOk) 4052 b.AddEdgeTo(bFail) 4053 4054 if !commaok { 4055 // On failure, panic by calling panicnildottype. 4056 s.startBlock(bFail) 4057 s.rtcall(panicnildottype, false, nil, target) 4058 4059 // On success, return (perhaps modified) input interface. 4060 s.startBlock(bOk) 4061 if n.Left.Type.IsEmptyInterface() { 4062 res = iface // Use input interface unchanged. 4063 return 4064 } 4065 // Load type out of itab, build interface with existing idata. 4066 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) 4067 typ := s.newValue2(ssa.OpLoad, byteptr, off, s.mem()) 4068 idata := s.newValue1(ssa.OpIData, n.Type, iface) 4069 res = s.newValue2(ssa.OpIMake, n.Type, typ, idata) 4070 return 4071 } 4072 4073 s.startBlock(bOk) 4074 // nonempty -> empty 4075 // Need to load type from itab 4076 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) 4077 s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem()) 4078 s.endBlock() 4079 4080 // itab is nil, might as well use that as the nil result. 4081 s.startBlock(bFail) 4082 s.vars[&typVar] = itab 4083 s.endBlock() 4084 4085 // Merge point. 4086 bEnd := s.f.NewBlock(ssa.BlockPlain) 4087 bOk.AddEdgeTo(bEnd) 4088 bFail.AddEdgeTo(bEnd) 4089 s.startBlock(bEnd) 4090 idata := s.newValue1(ssa.OpIData, n.Type, iface) 4091 res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata) 4092 resok = cond 4093 delete(s.vars, &typVar) 4094 return 4095 } 4096 // converting to a nonempty interface needs a runtime call. 4097 if Debug_typeassert > 0 { 4098 Warnl(n.Pos, "type assertion not inlined") 4099 } 4100 if n.Left.Type.IsEmptyInterface() { 4101 if commaok { 4102 call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface) 4103 return call[0], call[1] 4104 } 4105 return s.rtcall(assertE2I, true, []*types.Type{n.Type}, target, iface)[0], nil 4106 } 4107 if commaok { 4108 call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface) 4109 return call[0], call[1] 4110 } 4111 return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil 4112 } 4113 4114 if Debug_typeassert > 0 { 4115 Warnl(n.Pos, "type assertion inlined") 4116 } 4117 4118 // Converting to a concrete type. 4119 direct := isdirectiface(n.Type) 4120 itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface 4121 if Debug_typeassert > 0 { 4122 Warnl(n.Pos, "type assertion inlined") 4123 } 4124 var targetITab *ssa.Value 4125 if n.Left.Type.IsEmptyInterface() { 4126 // Looking for pointer to target type. 4127 targetITab = target 4128 } else { 4129 // Looking for pointer to itab for target type and source interface. 4130 targetITab = s.expr(n.List.First()) 4131 } 4132 4133 var tmp *Node // temporary for use with large types 4134 var addr *ssa.Value // address of tmp 4135 if commaok && !canSSAType(n.Type) { 4136 // unSSAable type, use temporary. 4137 // TODO: get rid of some of these temporaries. 4138 tmp = tempAt(n.Pos, s.curfn, n.Type) 4139 addr = s.addr(tmp, false) 4140 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, tmp, s.mem()) 4141 } 4142 4143 cond := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], itab, targetITab) 4144 b := s.endBlock() 4145 b.Kind = ssa.BlockIf 4146 b.SetControl(cond) 4147 b.Likely = ssa.BranchLikely 4148 4149 bOk := s.f.NewBlock(ssa.BlockPlain) 4150 bFail := s.f.NewBlock(ssa.BlockPlain) 4151 b.AddEdgeTo(bOk) 4152 b.AddEdgeTo(bFail) 4153 4154 if !commaok { 4155 // on failure, panic by calling panicdottype 4156 s.startBlock(bFail) 4157 taddr := s.expr(n.Right.Right) 4158 if n.Left.Type.IsEmptyInterface() { 4159 s.rtcall(panicdottypeE, false, nil, itab, target, taddr) 4160 } else { 4161 s.rtcall(panicdottypeI, false, nil, itab, target, taddr) 4162 } 4163 4164 // on success, return data from interface 4165 s.startBlock(bOk) 4166 if direct { 4167 return s.newValue1(ssa.OpIData, n.Type, iface), nil 4168 } 4169 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) 4170 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()), nil 4171 } 4172 4173 // commaok is the more complicated case because we have 4174 // a control flow merge point. 4175 bEnd := s.f.NewBlock(ssa.BlockPlain) 4176 // Note that we need a new valVar each time (unlike okVar where we can 4177 // reuse the variable) because it might have a different type every time. 4178 valVar := &Node{Op: ONAME, Class: Pxxx, Sym: &types.Sym{Name: "val"}} 4179 4180 // type assertion succeeded 4181 s.startBlock(bOk) 4182 if tmp == nil { 4183 if direct { 4184 s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface) 4185 } else { 4186 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) 4187 s.vars[valVar] = s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 4188 } 4189 } else { 4190 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) 4191 store := s.newValue3I(ssa.OpMove, ssa.TypeMem, n.Type.Size(), addr, p, s.mem()) 4192 store.Aux = n.Type 4193 s.vars[&memVar] = store 4194 } 4195 s.vars[&okVar] = s.constBool(true) 4196 s.endBlock() 4197 bOk.AddEdgeTo(bEnd) 4198 4199 // type assertion failed 4200 s.startBlock(bFail) 4201 if tmp == nil { 4202 s.vars[valVar] = s.zeroVal(n.Type) 4203 } else { 4204 store := s.newValue2I(ssa.OpZero, ssa.TypeMem, n.Type.Size(), addr, s.mem()) 4205 store.Aux = n.Type 4206 s.vars[&memVar] = store 4207 } 4208 s.vars[&okVar] = s.constBool(false) 4209 s.endBlock() 4210 bFail.AddEdgeTo(bEnd) 4211 4212 // merge point 4213 s.startBlock(bEnd) 4214 if tmp == nil { 4215 res = s.variable(valVar, n.Type) 4216 delete(s.vars, valVar) 4217 } else { 4218 res = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 4219 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, tmp, s.mem()) 4220 } 4221 resok = s.variable(&okVar, types.Types[TBOOL]) 4222 delete(s.vars, &okVar) 4223 return res, resok 4224 } 4225 4226 // variable returns the value of a variable at the current location. 4227 func (s *state) variable(name *Node, t ssa.Type) *ssa.Value { 4228 v := s.vars[name] 4229 if v != nil { 4230 return v 4231 } 4232 v = s.fwdVars[name] 4233 if v != nil { 4234 return v 4235 } 4236 4237 if s.curBlock == s.f.Entry { 4238 // No variable should be live at entry. 4239 s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, name, v) 4240 } 4241 // Make a FwdRef, which records a value that's live on block input. 4242 // We'll find the matching definition as part of insertPhis. 4243 v = s.newValue0A(ssa.OpFwdRef, t, name) 4244 s.fwdVars[name] = v 4245 s.addNamedValue(name, v) 4246 return v 4247 } 4248 4249 func (s *state) mem() *ssa.Value { 4250 return s.variable(&memVar, ssa.TypeMem) 4251 } 4252 4253 func (s *state) addNamedValue(n *Node, v *ssa.Value) { 4254 if n.Class == Pxxx { 4255 // Don't track our dummy nodes (&memVar etc.). 4256 return 4257 } 4258 if n.IsAutoTmp() { 4259 // Don't track temporary variables. 4260 return 4261 } 4262 if n.Class == PPARAMOUT { 4263 // Don't track named output values. This prevents return values 4264 // from being assigned too early. See #14591 and #14762. TODO: allow this. 4265 return 4266 } 4267 if n.Class == PAUTO && n.Xoffset != 0 { 4268 s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset) 4269 } 4270 loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0} 4271 values, ok := s.f.NamedValues[loc] 4272 if !ok { 4273 s.f.Names = append(s.f.Names, loc) 4274 } 4275 s.f.NamedValues[loc] = append(values, v) 4276 } 4277 4278 // Branch is an unresolved branch. 4279 type Branch struct { 4280 P *obj.Prog // branch instruction 4281 B *ssa.Block // target 4282 } 4283 4284 // SSAGenState contains state needed during Prog generation. 4285 type SSAGenState struct { 4286 pp *Progs 4287 4288 // Branches remembers all the branch instructions we've seen 4289 // and where they would like to go. 4290 Branches []Branch 4291 4292 // bstart remembers where each block starts (indexed by block ID) 4293 bstart []*obj.Prog 4294 4295 // 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?) 4296 SSEto387 map[int16]int16 4297 // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8. 4298 ScratchFpMem *Node 4299 4300 maxarg int64 // largest frame size for arguments to calls made by the function 4301 4302 // Map from GC safe points to stack map index, generated by 4303 // liveness analysis. 4304 stackMapIndex map[*ssa.Value]int 4305 } 4306 4307 // Prog appends a new Prog. 4308 func (s *SSAGenState) Prog(as obj.As) *obj.Prog { 4309 return s.pp.Prog(as) 4310 } 4311 4312 // Pc returns the current Prog. 4313 func (s *SSAGenState) Pc() *obj.Prog { 4314 return s.pp.next 4315 } 4316 4317 // SetPos sets the current source position. 4318 func (s *SSAGenState) SetPos(pos src.XPos) { 4319 s.pp.pos = pos 4320 } 4321 4322 // genssa appends entries to pp for each instruction in f. 4323 func genssa(f *ssa.Func, pp *Progs) { 4324 var s SSAGenState 4325 4326 e := f.Frontend().(*ssafn) 4327 4328 // Generate GC bitmaps. 4329 gcargs := makefuncdatasym(pp, "gcargs·", obj.FUNCDATA_ArgsPointerMaps, e.curfn) 4330 gclocals := makefuncdatasym(pp, "gclocals·", obj.FUNCDATA_LocalsPointerMaps, e.curfn) 4331 s.stackMapIndex = liveness(e, f, gcargs, gclocals) 4332 4333 // Remember where each block starts. 4334 s.bstart = make([]*obj.Prog, f.NumBlocks()) 4335 s.pp = pp 4336 var valueProgs map[*obj.Prog]*ssa.Value 4337 var blockProgs map[*obj.Prog]*ssa.Block 4338 var logProgs = e.log 4339 if logProgs { 4340 valueProgs = make(map[*obj.Prog]*ssa.Value, f.NumValues()) 4341 blockProgs = make(map[*obj.Prog]*ssa.Block, f.NumBlocks()) 4342 f.Logf("genssa %s\n", f.Name) 4343 blockProgs[s.pp.next] = f.Blocks[0] 4344 } 4345 4346 if thearch.Use387 { 4347 s.SSEto387 = map[int16]int16{} 4348 } 4349 4350 s.ScratchFpMem = e.scratchFpMem 4351 4352 // Emit basic blocks 4353 for i, b := range f.Blocks { 4354 s.bstart[b.ID] = s.pp.next 4355 // Emit values in block 4356 thearch.SSAMarkMoves(&s, b) 4357 for _, v := range b.Values { 4358 x := s.pp.next 4359 s.SetPos(v.Pos) 4360 4361 switch v.Op { 4362 case ssa.OpInitMem: 4363 // memory arg needs no code 4364 case ssa.OpArg: 4365 // input args need no code 4366 case ssa.OpSP, ssa.OpSB: 4367 // nothing to do 4368 case ssa.OpSelect0, ssa.OpSelect1: 4369 // nothing to do 4370 case ssa.OpGetG: 4371 // nothing to do when there's a g register, 4372 // and checkLower complains if there's not 4373 case ssa.OpVarDef, ssa.OpVarKill, ssa.OpVarLive, ssa.OpKeepAlive: 4374 // nothing to do; already used by liveness 4375 case ssa.OpPhi: 4376 CheckLoweredPhi(v) 4377 4378 default: 4379 // let the backend handle it 4380 thearch.SSAGenValue(&s, v) 4381 } 4382 4383 if logProgs { 4384 for ; x != s.pp.next; x = x.Link { 4385 valueProgs[x] = v 4386 } 4387 } 4388 } 4389 // Emit control flow instructions for block 4390 var next *ssa.Block 4391 if i < len(f.Blocks)-1 && Debug['N'] == 0 { 4392 // If -N, leave next==nil so every block with successors 4393 // ends in a JMP (except call blocks - plive doesn't like 4394 // select{send,recv} followed by a JMP call). Helps keep 4395 // line numbers for otherwise empty blocks. 4396 next = f.Blocks[i+1] 4397 } 4398 x := s.pp.next 4399 s.SetPos(b.Pos) 4400 thearch.SSAGenBlock(&s, b, next) 4401 if logProgs { 4402 for ; x != s.pp.next; x = x.Link { 4403 blockProgs[x] = b 4404 } 4405 } 4406 } 4407 4408 // Resolve branches 4409 for _, br := range s.Branches { 4410 br.P.To.Val = s.bstart[br.B.ID] 4411 } 4412 4413 if logProgs { 4414 for p := pp.Text; p != nil; p = p.Link { 4415 var s string 4416 if v, ok := valueProgs[p]; ok { 4417 s = v.String() 4418 } else if b, ok := blockProgs[p]; ok { 4419 s = b.String() 4420 } else { 4421 s = " " // most value and branch strings are 2-3 characters long 4422 } 4423 f.Logf("%s\t%s\n", s, p) 4424 } 4425 if f.HTMLWriter != nil { 4426 // LineHist is defunct now - this code won't do 4427 // anything. 4428 // TODO: fix this (ideally without a global variable) 4429 // saved := pp.Text.Ctxt.LineHist.PrintFilenameOnly 4430 // pp.Text.Ctxt.LineHist.PrintFilenameOnly = true 4431 var buf bytes.Buffer 4432 buf.WriteString("<code>") 4433 buf.WriteString("<dl class=\"ssa-gen\">") 4434 for p := pp.Text; p != nil; p = p.Link { 4435 buf.WriteString("<dt class=\"ssa-prog-src\">") 4436 if v, ok := valueProgs[p]; ok { 4437 buf.WriteString(v.HTML()) 4438 } else if b, ok := blockProgs[p]; ok { 4439 buf.WriteString(b.HTML()) 4440 } 4441 buf.WriteString("</dt>") 4442 buf.WriteString("<dd class=\"ssa-prog\">") 4443 buf.WriteString(html.EscapeString(p.String())) 4444 buf.WriteString("</dd>") 4445 buf.WriteString("</li>") 4446 } 4447 buf.WriteString("</dl>") 4448 buf.WriteString("</code>") 4449 f.HTMLWriter.WriteColumn("genssa", buf.String()) 4450 // pp.Text.Ctxt.LineHist.PrintFilenameOnly = saved 4451 } 4452 } 4453 4454 // Add frame prologue. Zero ambiguously live variables. 4455 thearch.Defframe(s.pp, e.curfn, e.stksize+s.maxarg) 4456 if Debug['f'] != 0 { 4457 frame(0) 4458 } 4459 4460 f.HTMLWriter.Close() 4461 f.HTMLWriter = nil 4462 } 4463 4464 type FloatingEQNEJump struct { 4465 Jump obj.As 4466 Index int 4467 } 4468 4469 func (s *SSAGenState) oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump) { 4470 p := s.Prog(jumps.Jump) 4471 p.To.Type = obj.TYPE_BRANCH 4472 to := jumps.Index 4473 s.Branches = append(s.Branches, Branch{p, b.Succs[to].Block()}) 4474 } 4475 4476 func (s *SSAGenState) FPJump(b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) { 4477 switch next { 4478 case b.Succs[0].Block(): 4479 s.oneFPJump(b, &jumps[0][0]) 4480 s.oneFPJump(b, &jumps[0][1]) 4481 case b.Succs[1].Block(): 4482 s.oneFPJump(b, &jumps[1][0]) 4483 s.oneFPJump(b, &jumps[1][1]) 4484 default: 4485 s.oneFPJump(b, &jumps[1][0]) 4486 s.oneFPJump(b, &jumps[1][1]) 4487 q := s.Prog(obj.AJMP) 4488 q.To.Type = obj.TYPE_BRANCH 4489 s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()}) 4490 } 4491 } 4492 4493 func AuxOffset(v *ssa.Value) (offset int64) { 4494 if v.Aux == nil { 4495 return 0 4496 } 4497 switch sym := v.Aux.(type) { 4498 4499 case *ssa.AutoSymbol: 4500 n := sym.Node.(*Node) 4501 return n.Xoffset 4502 } 4503 return 0 4504 } 4505 4506 // AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a. 4507 func AddAux(a *obj.Addr, v *ssa.Value) { 4508 AddAux2(a, v, v.AuxInt) 4509 } 4510 func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { 4511 if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR { 4512 v.Fatalf("bad AddAux addr %v", a) 4513 } 4514 // add integer offset 4515 a.Offset += offset 4516 4517 // If no additional symbol offset, we're done. 4518 if v.Aux == nil { 4519 return 4520 } 4521 // Add symbol's offset from its base register. 4522 switch sym := v.Aux.(type) { 4523 case *ssa.ExternSymbol: 4524 a.Name = obj.NAME_EXTERN 4525 a.Sym = sym.Sym 4526 case *ssa.ArgSymbol: 4527 n := sym.Node.(*Node) 4528 a.Name = obj.NAME_PARAM 4529 a.Sym = Linksym(n.Orig.Sym) 4530 a.Offset += n.Xoffset 4531 case *ssa.AutoSymbol: 4532 n := sym.Node.(*Node) 4533 a.Name = obj.NAME_AUTO 4534 a.Sym = Linksym(n.Sym) 4535 a.Offset += n.Xoffset 4536 default: 4537 v.Fatalf("aux in %s not implemented %#v", v, v.Aux) 4538 } 4539 } 4540 4541 // extendIndex extends v to a full int width. 4542 // panic using the given function if v does not fit in an int (only on 32-bit archs). 4543 func (s *state) extendIndex(v *ssa.Value, panicfn *obj.LSym) *ssa.Value { 4544 size := v.Type.Size() 4545 if size == s.config.IntSize { 4546 return v 4547 } 4548 if size > s.config.IntSize { 4549 // truncate 64-bit indexes on 32-bit pointer archs. Test the 4550 // high word and branch to out-of-bounds failure if it is not 0. 4551 if Debug['B'] == 0 { 4552 hi := s.newValue1(ssa.OpInt64Hi, types.Types[TUINT32], v) 4553 cmp := s.newValue2(ssa.OpEq32, types.Types[TBOOL], hi, s.constInt32(types.Types[TUINT32], 0)) 4554 s.check(cmp, panicfn) 4555 } 4556 return s.newValue1(ssa.OpTrunc64to32, types.Types[TINT], v) 4557 } 4558 4559 // Extend value to the required size 4560 var op ssa.Op 4561 if v.Type.IsSigned() { 4562 switch 10*size + s.config.IntSize { 4563 case 14: 4564 op = ssa.OpSignExt8to32 4565 case 18: 4566 op = ssa.OpSignExt8to64 4567 case 24: 4568 op = ssa.OpSignExt16to32 4569 case 28: 4570 op = ssa.OpSignExt16to64 4571 case 48: 4572 op = ssa.OpSignExt32to64 4573 default: 4574 s.Fatalf("bad signed index extension %s", v.Type) 4575 } 4576 } else { 4577 switch 10*size + s.config.IntSize { 4578 case 14: 4579 op = ssa.OpZeroExt8to32 4580 case 18: 4581 op = ssa.OpZeroExt8to64 4582 case 24: 4583 op = ssa.OpZeroExt16to32 4584 case 28: 4585 op = ssa.OpZeroExt16to64 4586 case 48: 4587 op = ssa.OpZeroExt32to64 4588 default: 4589 s.Fatalf("bad unsigned index extension %s", v.Type) 4590 } 4591 } 4592 return s.newValue1(op, types.Types[TINT], v) 4593 } 4594 4595 // CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values. 4596 // Called during ssaGenValue. 4597 func CheckLoweredPhi(v *ssa.Value) { 4598 if v.Op != ssa.OpPhi { 4599 v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString()) 4600 } 4601 if v.Type.IsMemory() { 4602 return 4603 } 4604 f := v.Block.Func 4605 loc := f.RegAlloc[v.ID] 4606 for _, a := range v.Args { 4607 if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead? 4608 v.Fatalf("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n%s\n", v, loc, a, aloc, v.Block.Func) 4609 } 4610 } 4611 } 4612 4613 // CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block. 4614 // The output of LoweredGetClosurePtr is generally hardwired to the correct register. 4615 // That register contains the closure pointer on closure entry. 4616 func CheckLoweredGetClosurePtr(v *ssa.Value) { 4617 entry := v.Block.Func.Entry 4618 if entry != v.Block || entry.Values[0] != v { 4619 Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v) 4620 } 4621 } 4622 4623 // AutoVar returns a *Node and int64 representing the auto variable and offset within it 4624 // where v should be spilled. 4625 func AutoVar(v *ssa.Value) (*Node, int64) { 4626 loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot) 4627 if v.Type.Size() > loc.Type.Size() { 4628 v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) 4629 } 4630 return loc.N.(*Node), loc.Off 4631 } 4632 4633 func AddrAuto(a *obj.Addr, v *ssa.Value) { 4634 n, off := AutoVar(v) 4635 a.Type = obj.TYPE_MEM 4636 a.Sym = Linksym(n.Sym) 4637 a.Reg = int16(thearch.REGSP) 4638 a.Offset = n.Xoffset + off 4639 if n.Class == PPARAM || n.Class == PPARAMOUT { 4640 a.Name = obj.NAME_PARAM 4641 } else { 4642 a.Name = obj.NAME_AUTO 4643 } 4644 } 4645 4646 func (s *SSAGenState) AddrScratch(a *obj.Addr) { 4647 if s.ScratchFpMem == nil { 4648 panic("no scratch memory available; forgot to declare usesScratch for Op?") 4649 } 4650 a.Type = obj.TYPE_MEM 4651 a.Name = obj.NAME_AUTO 4652 a.Sym = Linksym(s.ScratchFpMem.Sym) 4653 a.Reg = int16(thearch.REGSP) 4654 a.Offset = s.ScratchFpMem.Xoffset 4655 } 4656 4657 func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog { 4658 idx, ok := s.stackMapIndex[v] 4659 if !ok { 4660 Fatalf("missing stack map index for %v", v.LongString()) 4661 } 4662 p := s.Prog(obj.APCDATA) 4663 Addrconst(&p.From, obj.PCDATA_StackMapIndex) 4664 Addrconst(&p.To, int64(idx)) 4665 4666 if sym, _ := v.Aux.(*obj.LSym); sym == Deferreturn { 4667 // Deferred calls will appear to be returning to 4668 // the CALL deferreturn(SB) that we are about to emit. 4669 // However, the stack trace code will show the line 4670 // of the instruction byte before the return PC. 4671 // To avoid that being an unrelated instruction, 4672 // insert an actual hardware NOP that will have the right line number. 4673 // This is different from obj.ANOP, which is a virtual no-op 4674 // that doesn't make it into the instruction stream. 4675 thearch.Ginsnop(s.pp) 4676 } 4677 4678 p = s.Prog(obj.ACALL) 4679 if sym, ok := v.Aux.(*obj.LSym); ok { 4680 p.To.Type = obj.TYPE_MEM 4681 p.To.Name = obj.NAME_EXTERN 4682 p.To.Sym = sym 4683 } else { 4684 // TODO(mdempsky): Can these differences be eliminated? 4685 switch thearch.LinkArch.Family { 4686 case sys.AMD64, sys.I386, sys.PPC64, sys.S390X: 4687 p.To.Type = obj.TYPE_REG 4688 case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64: 4689 p.To.Type = obj.TYPE_MEM 4690 default: 4691 Fatalf("unknown indirect call family") 4692 } 4693 p.To.Reg = v.Args[0].Reg() 4694 } 4695 if s.maxarg < v.AuxInt { 4696 s.maxarg = v.AuxInt 4697 } 4698 return p 4699 } 4700 4701 // fieldIdx finds the index of the field referred to by the ODOT node n. 4702 func fieldIdx(n *Node) int { 4703 t := n.Left.Type 4704 f := n.Sym 4705 if !t.IsStruct() { 4706 panic("ODOT's LHS is not a struct") 4707 } 4708 4709 var i int 4710 for _, t1 := range t.Fields().Slice() { 4711 if t1.Sym != f { 4712 i++ 4713 continue 4714 } 4715 if t1.Offset != n.Xoffset { 4716 panic("field offset doesn't match") 4717 } 4718 return i 4719 } 4720 panic(fmt.Sprintf("can't find field in expr %v\n", n)) 4721 4722 // TODO: keep the result of this function somewhere in the ODOT Node 4723 // so we don't have to recompute it each time we need it. 4724 } 4725 4726 // ssafn holds frontend information about a function that the backend is processing. 4727 // It also exports a bunch of compiler services for the ssa backend. 4728 type ssafn struct { 4729 curfn *Node 4730 strings map[string]interface{} // map from constant string to data symbols 4731 scratchFpMem *Node // temp for floating point register / memory moves on some architectures 4732 stksize int64 // stack size for current frame 4733 stkptrsize int64 // prefix of stack containing pointers 4734 log bool 4735 } 4736 4737 // StringData returns a symbol (a *types.Sym wrapped in an interface) which 4738 // is the data component of a global string constant containing s. 4739 func (e *ssafn) StringData(s string) interface{} { 4740 if aux, ok := e.strings[s]; ok { 4741 return aux 4742 } 4743 if e.strings == nil { 4744 e.strings = make(map[string]interface{}) 4745 } 4746 data := stringsym(s) 4747 aux := &ssa.ExternSymbol{Sym: data} 4748 e.strings[s] = aux 4749 return aux 4750 } 4751 4752 func (e *ssafn) Auto(pos src.XPos, t ssa.Type) ssa.GCNode { 4753 n := tempAt(pos, e.curfn, t.(*types.Type)) // Note: adds new auto to e.curfn.Func.Dcl list 4754 return n 4755 } 4756 4757 func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4758 n := name.N.(*Node) 4759 ptrType := types.NewPtr(types.Types[TUINT8]) 4760 lenType := types.Types[TINT] 4761 if n.Class == PAUTO && !n.Addrtaken() { 4762 // Split this string up into two separate variables. 4763 p := e.namedAuto(n.Sym.Name+".ptr", ptrType, n.Pos) 4764 l := e.namedAuto(n.Sym.Name+".len", lenType, n.Pos) 4765 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0} 4766 } 4767 // Return the two parts of the larger variable. 4768 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)} 4769 } 4770 4771 func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4772 n := name.N.(*Node) 4773 t := types.NewPtr(types.Types[TUINT8]) 4774 if n.Class == PAUTO && !n.Addrtaken() { 4775 // Split this interface up into two separate variables. 4776 f := ".itab" 4777 if n.Type.IsEmptyInterface() { 4778 f = ".type" 4779 } 4780 c := e.namedAuto(n.Sym.Name+f, t, n.Pos) 4781 d := e.namedAuto(n.Sym.Name+".data", t, n.Pos) 4782 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0} 4783 } 4784 // Return the two parts of the larger variable. 4785 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)} 4786 } 4787 4788 func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) { 4789 n := name.N.(*Node) 4790 ptrType := types.NewPtr(name.Type.ElemType().(*types.Type)) 4791 lenType := types.Types[TINT] 4792 if n.Class == PAUTO && !n.Addrtaken() { 4793 // Split this slice up into three separate variables. 4794 p := e.namedAuto(n.Sym.Name+".ptr", ptrType, n.Pos) 4795 l := e.namedAuto(n.Sym.Name+".len", lenType, n.Pos) 4796 c := e.namedAuto(n.Sym.Name+".cap", lenType, n.Pos) 4797 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}, ssa.LocalSlot{N: c, Type: lenType, Off: 0} 4798 } 4799 // Return the three parts of the larger variable. 4800 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, 4801 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}, 4802 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)} 4803 } 4804 4805 func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4806 n := name.N.(*Node) 4807 s := name.Type.Size() / 2 4808 var t *types.Type 4809 if s == 8 { 4810 t = types.Types[TFLOAT64] 4811 } else { 4812 t = types.Types[TFLOAT32] 4813 } 4814 if n.Class == PAUTO && !n.Addrtaken() { 4815 // Split this complex up into two separate variables. 4816 c := e.namedAuto(n.Sym.Name+".real", t, n.Pos) 4817 d := e.namedAuto(n.Sym.Name+".imag", t, n.Pos) 4818 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0} 4819 } 4820 // Return the two parts of the larger variable. 4821 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s} 4822 } 4823 4824 func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4825 n := name.N.(*Node) 4826 var t *types.Type 4827 if name.Type.IsSigned() { 4828 t = types.Types[TINT32] 4829 } else { 4830 t = types.Types[TUINT32] 4831 } 4832 if n.Class == PAUTO && !n.Addrtaken() { 4833 // Split this int64 up into two separate variables. 4834 h := e.namedAuto(n.Sym.Name+".hi", t, n.Pos) 4835 l := e.namedAuto(n.Sym.Name+".lo", types.Types[TUINT32], n.Pos) 4836 return ssa.LocalSlot{N: h, Type: t, Off: 0}, ssa.LocalSlot{N: l, Type: types.Types[TUINT32], Off: 0} 4837 } 4838 // Return the two parts of the larger variable. 4839 if thearch.LinkArch.ByteOrder == binary.BigEndian { 4840 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off + 4} 4841 } 4842 return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off} 4843 } 4844 4845 func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot { 4846 n := name.N.(*Node) 4847 st := name.Type 4848 ft := st.FieldType(i) 4849 if n.Class == PAUTO && !n.Addrtaken() { 4850 // Note: the _ field may appear several times. But 4851 // have no fear, identically-named but distinct Autos are 4852 // ok, albeit maybe confusing for a debugger. 4853 x := e.namedAuto(n.Sym.Name+"."+st.FieldName(i), ft, n.Pos) 4854 return ssa.LocalSlot{N: x, Type: ft, Off: 0} 4855 } 4856 return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)} 4857 } 4858 4859 func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot { 4860 n := name.N.(*Node) 4861 at := name.Type 4862 if at.NumElem() != 1 { 4863 Fatalf("bad array size") 4864 } 4865 et := at.ElemType() 4866 if n.Class == PAUTO && !n.Addrtaken() { 4867 x := e.namedAuto(n.Sym.Name+"[0]", et, n.Pos) 4868 return ssa.LocalSlot{N: x, Type: et, Off: 0} 4869 } 4870 return ssa.LocalSlot{N: n, Type: et, Off: name.Off} 4871 } 4872 4873 func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym { 4874 return itabsym(it, offset) 4875 } 4876 4877 // namedAuto returns a new AUTO variable with the given name and type. 4878 // These are exposed to the debugger. 4879 func (e *ssafn) namedAuto(name string, typ ssa.Type, pos src.XPos) ssa.GCNode { 4880 t := typ.(*types.Type) 4881 s := &types.Sym{Name: name, Pkg: localpkg} 4882 4883 n := new(Node) 4884 n.Name = new(Name) 4885 n.Op = ONAME 4886 n.Pos = pos 4887 n.Orig = n 4888 4889 s.Def = asTypesNode(n) 4890 asNode(s.Def).SetUsed(true) 4891 n.Sym = s 4892 n.Type = t 4893 n.Class = PAUTO 4894 n.SetAddable(true) 4895 n.Esc = EscNever 4896 n.Name.Curfn = e.curfn 4897 e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n) 4898 4899 dowidth(t) 4900 return n 4901 } 4902 4903 func (e *ssafn) CanSSA(t ssa.Type) bool { 4904 return canSSAType(t.(*types.Type)) 4905 } 4906 4907 func (e *ssafn) Line(pos src.XPos) string { 4908 return linestr(pos) 4909 } 4910 4911 // Log logs a message from the compiler. 4912 func (e *ssafn) Logf(msg string, args ...interface{}) { 4913 if e.log { 4914 fmt.Printf(msg, args...) 4915 } 4916 } 4917 4918 func (e *ssafn) Log() bool { 4919 return e.log 4920 } 4921 4922 // Fatal reports a compiler error and exits. 4923 func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) { 4924 lineno = pos 4925 Fatalf(msg, args...) 4926 } 4927 4928 // Warnl reports a "warning", which is usually flag-triggered 4929 // logging output for the benefit of tests. 4930 func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) { 4931 Warnl(pos, fmt_, args...) 4932 } 4933 4934 func (e *ssafn) Debug_checknil() bool { 4935 return Debug_checknil != 0 4936 } 4937 4938 func (e *ssafn) Debug_wb() bool { 4939 return Debug_wb != 0 4940 } 4941 4942 func (e *ssafn) UseWriteBarrier() bool { 4943 return use_writebarrier 4944 } 4945 4946 func (e *ssafn) Syslook(name string) *obj.LSym { 4947 return Linksym(syslook(name).Sym) 4948 } 4949 4950 func (n *Node) Typ() ssa.Type { 4951 return n.Type 4952 }