github.com/slayercat/go@v0.0.0-20170428012452-c51559813f61/src/cmd/compile/internal/gc/ssa.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "bytes" 9 "encoding/binary" 10 "fmt" 11 "html" 12 "os" 13 "sort" 14 15 "cmd/compile/internal/ssa" 16 "cmd/compile/internal/types" 17 "cmd/internal/obj" 18 "cmd/internal/objabi" 19 "cmd/internal/src" 20 "cmd/internal/sys" 21 ) 22 23 var ssaConfig *ssa.Config 24 var ssaCaches []ssa.Cache 25 26 func initssaconfig() { 27 types_ := ssa.Types{ 28 Bool: types.Types[TBOOL], 29 Int8: types.Types[TINT8], 30 Int16: types.Types[TINT16], 31 Int32: types.Types[TINT32], 32 Int64: types.Types[TINT64], 33 UInt8: types.Types[TUINT8], 34 UInt16: types.Types[TUINT16], 35 UInt32: types.Types[TUINT32], 36 UInt64: types.Types[TUINT64], 37 Float32: types.Types[TFLOAT32], 38 Float64: types.Types[TFLOAT64], 39 Int: types.Types[TINT], 40 Uintptr: types.Types[TUINTPTR], 41 String: types.Types[TSTRING], 42 BytePtr: types.NewPtr(types.Types[TUINT8]), 43 Int32Ptr: types.NewPtr(types.Types[TINT32]), 44 UInt32Ptr: types.NewPtr(types.Types[TUINT32]), 45 IntPtr: types.NewPtr(types.Types[TINT]), 46 UintptrPtr: types.NewPtr(types.Types[TUINTPTR]), 47 Float32Ptr: types.NewPtr(types.Types[TFLOAT32]), 48 Float64Ptr: types.NewPtr(types.Types[TFLOAT64]), 49 BytePtrPtr: types.NewPtr(types.NewPtr(types.Types[TUINT8])), 50 } 51 // Generate a few pointer types that are uncommon in the frontend but common in the backend. 52 // Caching is disabled in the backend, so generating these here avoids allocations. 53 _ = types.NewPtr(types.Types[TINTER]) // *interface{} 54 _ = types.NewPtr(types.NewPtr(types.Types[TSTRING])) // **string 55 _ = types.NewPtr(types.NewPtr(types.Idealstring)) // **string 56 _ = types.NewPtr(types.NewSlice(types.Types[TINTER])) // *[]interface{} 57 _ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte 58 _ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte 59 _ = types.NewPtr(types.NewSlice(types.Types[TSTRING])) // *[]string 60 _ = types.NewPtr(types.NewSlice(types.Idealstring)) // *[]string 61 _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8 62 _ = types.NewPtr(types.Types[TINT16]) // *int16 63 _ = types.NewPtr(types.Types[TINT64]) // *int64 64 _ = types.NewPtr(types.Errortype) // *error 65 types.NewPtrCacheEnabled = false 66 ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, types_, Ctxt, Debug['N'] == 0) 67 if thearch.LinkArch.Name == "386" { 68 ssaConfig.Set387(thearch.Use387) 69 } 70 ssaCaches = make([]ssa.Cache, nBackendWorkers) 71 72 // Set up some runtime functions we'll need to call. 73 Newproc = Sysfunc("newproc") 74 Deferproc = Sysfunc("deferproc") 75 Deferreturn = Sysfunc("deferreturn") 76 Duffcopy = Sysfunc("duffcopy") 77 Duffzero = Sysfunc("duffzero") 78 panicindex = Sysfunc("panicindex") 79 panicslice = Sysfunc("panicslice") 80 panicdivide = Sysfunc("panicdivide") 81 growslice = Sysfunc("growslice") 82 panicdottypeE = Sysfunc("panicdottypeE") 83 panicdottypeI = Sysfunc("panicdottypeI") 84 panicnildottype = Sysfunc("panicnildottype") 85 assertE2I = Sysfunc("assertE2I") 86 assertE2I2 = Sysfunc("assertE2I2") 87 assertI2I = Sysfunc("assertI2I") 88 assertI2I2 = Sysfunc("assertI2I2") 89 goschedguarded = Sysfunc("goschedguarded") 90 writeBarrier = Sysfunc("writeBarrier") 91 writebarrierptr = Sysfunc("writebarrierptr") 92 typedmemmove = Sysfunc("typedmemmove") 93 typedmemclr = Sysfunc("typedmemclr") 94 Udiv = Sysfunc("udiv") 95 } 96 97 // buildssa builds an SSA function for fn. 98 // worker indicates which of the backend workers is doing the processing. 99 func buildssa(fn *Node, worker int) *ssa.Func { 100 name := fn.funcname() 101 printssa := name == os.Getenv("GOSSAFUNC") 102 if printssa { 103 fmt.Println("generating SSA for", name) 104 dumplist("buildssa-enter", fn.Func.Enter) 105 dumplist("buildssa-body", fn.Nbody) 106 dumplist("buildssa-exit", fn.Func.Exit) 107 } 108 109 var s state 110 s.pushLine(fn.Pos) 111 defer s.popLine() 112 113 s.hasdefer = fn.Func.HasDefer() 114 if fn.Func.Pragma&CgoUnsafeArgs != 0 { 115 s.cgoUnsafeArgs = true 116 } 117 118 fe := ssafn{ 119 curfn: fn, 120 log: printssa, 121 } 122 s.curfn = fn 123 124 s.f = ssa.NewFunc(&fe) 125 s.config = ssaConfig 126 s.f.Config = ssaConfig 127 s.f.Cache = &ssaCaches[worker] 128 s.f.Cache.Reset() 129 s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH", name) 130 s.f.Name = name 131 if fn.Func.Pragma&Nosplit != 0 { 132 s.f.NoSplit = true 133 } 134 defer func() { 135 if s.f.WBPos.IsKnown() { 136 fn.Func.WBPos = s.f.WBPos 137 } 138 }() 139 s.exitCode = fn.Func.Exit 140 s.panics = map[funcLine]*ssa.Block{} 141 142 if name == os.Getenv("GOSSAFUNC") { 143 s.f.HTMLWriter = ssa.NewHTMLWriter("ssa.html", s.f.Frontend(), name) 144 // TODO: generate and print a mapping from nodes to values and blocks 145 } 146 147 // Allocate starting block 148 s.f.Entry = s.f.NewBlock(ssa.BlockPlain) 149 150 // Allocate starting values 151 s.labels = map[string]*ssaLabel{} 152 s.labeledNodes = map[*Node]*ssaLabel{} 153 s.fwdVars = map[*Node]*ssa.Value{} 154 s.startmem = s.entryNewValue0(ssa.OpInitMem, ssa.TypeMem) 155 s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead 156 s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR]) 157 158 s.startBlock(s.f.Entry) 159 s.vars[&memVar] = s.startmem 160 161 s.varsyms = map[*Node]interface{}{} 162 163 // Generate addresses of local declarations 164 s.decladdrs = map[*Node]*ssa.Value{} 165 for _, n := range fn.Func.Dcl { 166 switch n.Class() { 167 case PPARAM, PPARAMOUT: 168 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n}) 169 s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), aux, s.sp) 170 if n.Class() == PPARAMOUT && s.canSSA(n) { 171 // Save ssa-able PPARAMOUT variables so we can 172 // store them back to the stack at the end of 173 // the function. 174 s.returns = append(s.returns, n) 175 } 176 case PAUTO: 177 // processed at each use, to prevent Addr coming 178 // before the decl. 179 case PAUTOHEAP: 180 // moved to heap - already handled by frontend 181 case PFUNC: 182 // local function - already handled by frontend 183 default: 184 s.Fatalf("local variable with class %s unimplemented", classnames[n.Class()]) 185 } 186 } 187 188 // Populate SSAable arguments. 189 for _, n := range fn.Func.Dcl { 190 if n.Class() == PPARAM && s.canSSA(n) { 191 s.vars[n] = s.newValue0A(ssa.OpArg, n.Type, n) 192 } 193 } 194 195 // Convert the AST-based IR to the SSA-based IR 196 s.stmtList(fn.Func.Enter) 197 s.stmtList(fn.Nbody) 198 199 // fallthrough to exit 200 if s.curBlock != nil { 201 s.pushLine(fn.Func.Endlineno) 202 s.exit() 203 s.popLine() 204 } 205 206 s.insertPhis() 207 208 // Don't carry reference this around longer than necessary 209 s.exitCode = Nodes{} 210 211 // Main call to ssa package to compile function 212 ssa.Compile(s.f) 213 return s.f 214 } 215 216 type state struct { 217 // configuration (arch) information 218 config *ssa.Config 219 220 // function we're building 221 f *ssa.Func 222 223 // Node for function 224 curfn *Node 225 226 // labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f 227 labels map[string]*ssaLabel 228 labeledNodes map[*Node]*ssaLabel 229 230 // Code that must precede any return 231 // (e.g., copying value of heap-escaped paramout back to true paramout) 232 exitCode Nodes 233 234 // unlabeled break and continue statement tracking 235 breakTo *ssa.Block // current target for plain break statement 236 continueTo *ssa.Block // current target for plain continue statement 237 238 // current location where we're interpreting the AST 239 curBlock *ssa.Block 240 241 // variable assignments in the current block (map from variable symbol to ssa value) 242 // *Node is the unique identifier (an ONAME Node) for the variable. 243 // TODO: keep a single varnum map, then make all of these maps slices instead? 244 vars map[*Node]*ssa.Value 245 246 // fwdVars are variables that are used before they are defined in the current block. 247 // This map exists just to coalesce multiple references into a single FwdRef op. 248 // *Node is the unique identifier (an ONAME Node) for the variable. 249 fwdVars map[*Node]*ssa.Value 250 251 // all defined variables at the end of each block. Indexed by block ID. 252 defvars []map[*Node]*ssa.Value 253 254 // addresses of PPARAM and PPARAMOUT variables. 255 decladdrs map[*Node]*ssa.Value 256 257 // symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused. 258 varsyms map[*Node]interface{} 259 260 // starting values. Memory, stack pointer, and globals pointer 261 startmem *ssa.Value 262 sp *ssa.Value 263 sb *ssa.Value 264 265 // line number stack. The current line number is top of stack 266 line []src.XPos 267 268 // list of panic calls by function name and line number. 269 // Used to deduplicate panic calls. 270 panics map[funcLine]*ssa.Block 271 272 // list of PPARAMOUT (return) variables. 273 returns []*Node 274 275 cgoUnsafeArgs bool 276 hasdefer bool // whether the function contains a defer statement 277 } 278 279 type funcLine struct { 280 f *obj.LSym 281 line src.XPos 282 } 283 284 type ssaLabel struct { 285 target *ssa.Block // block identified by this label 286 breakTarget *ssa.Block // block to break to in control flow node identified by this label 287 continueTarget *ssa.Block // block to continue to in control flow node identified by this label 288 } 289 290 // label returns the label associated with sym, creating it if necessary. 291 func (s *state) label(sym *types.Sym) *ssaLabel { 292 lab := s.labels[sym.Name] 293 if lab == nil { 294 lab = new(ssaLabel) 295 s.labels[sym.Name] = lab 296 } 297 return lab 298 } 299 300 func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) } 301 func (s *state) Log() bool { return s.f.Log() } 302 func (s *state) Fatalf(msg string, args ...interface{}) { 303 s.f.Frontend().Fatalf(s.peekPos(), msg, args...) 304 } 305 func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) } 306 func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() } 307 308 var ( 309 // dummy node for the memory variable 310 memVar = Node{Op: ONAME, Sym: &types.Sym{Name: "mem"}} 311 312 // dummy nodes for temporary variables 313 ptrVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}} 314 lenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}} 315 newlenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}} 316 capVar = Node{Op: ONAME, Sym: &types.Sym{Name: "cap"}} 317 typVar = Node{Op: ONAME, Sym: &types.Sym{Name: "typ"}} 318 okVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ok"}} 319 ) 320 321 // startBlock sets the current block we're generating code in to b. 322 func (s *state) startBlock(b *ssa.Block) { 323 if s.curBlock != nil { 324 s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock) 325 } 326 s.curBlock = b 327 s.vars = map[*Node]*ssa.Value{} 328 for n := range s.fwdVars { 329 delete(s.fwdVars, n) 330 } 331 } 332 333 // endBlock marks the end of generating code for the current block. 334 // Returns the (former) current block. Returns nil if there is no current 335 // block, i.e. if no code flows to the current execution point. 336 func (s *state) endBlock() *ssa.Block { 337 b := s.curBlock 338 if b == nil { 339 return nil 340 } 341 for len(s.defvars) <= int(b.ID) { 342 s.defvars = append(s.defvars, nil) 343 } 344 s.defvars[b.ID] = s.vars 345 s.curBlock = nil 346 s.vars = nil 347 b.Pos = s.peekPos() 348 return b 349 } 350 351 // pushLine pushes a line number on the line number stack. 352 func (s *state) pushLine(line src.XPos) { 353 if !line.IsKnown() { 354 // the frontend may emit node with line number missing, 355 // use the parent line number in this case. 356 line = s.peekPos() 357 if Debug['K'] != 0 { 358 Warn("buildssa: unknown position (line 0)") 359 } 360 } 361 s.line = append(s.line, line) 362 } 363 364 // popLine pops the top of the line number stack. 365 func (s *state) popLine() { 366 s.line = s.line[:len(s.line)-1] 367 } 368 369 // peekPos peeks the top of the line number stack. 370 func (s *state) peekPos() src.XPos { 371 return s.line[len(s.line)-1] 372 } 373 374 // newValue0 adds a new value with no arguments to the current block. 375 func (s *state) newValue0(op ssa.Op, t ssa.Type) *ssa.Value { 376 return s.curBlock.NewValue0(s.peekPos(), op, t) 377 } 378 379 // newValue0A adds a new value with no arguments and an aux value to the current block. 380 func (s *state) newValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value { 381 return s.curBlock.NewValue0A(s.peekPos(), op, t, aux) 382 } 383 384 // newValue0I adds a new value with no arguments and an auxint value to the current block. 385 func (s *state) newValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value { 386 return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint) 387 } 388 389 // newValue1 adds a new value with one argument to the current block. 390 func (s *state) newValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value { 391 return s.curBlock.NewValue1(s.peekPos(), op, t, arg) 392 } 393 394 // newValue1A adds a new value with one argument and an aux value to the current block. 395 func (s *state) newValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 396 return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg) 397 } 398 399 // newValue1I adds a new value with one argument and an auxint value to the current block. 400 func (s *state) newValue1I(op ssa.Op, t ssa.Type, aux int64, arg *ssa.Value) *ssa.Value { 401 return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg) 402 } 403 404 // newValue2 adds a new value with two arguments to the current block. 405 func (s *state) newValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value { 406 return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1) 407 } 408 409 // newValue2I adds a new value with two arguments and an auxint value to the current block. 410 func (s *state) newValue2I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value { 411 return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1) 412 } 413 414 // newValue3 adds a new value with three arguments to the current block. 415 func (s *state) newValue3(op ssa.Op, t ssa.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 416 return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2) 417 } 418 419 // newValue3I adds a new value with three arguments and an auxint value to the current block. 420 func (s *state) newValue3I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 421 return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2) 422 } 423 424 // newValue3A adds a new value with three arguments and an aux value to the current block. 425 func (s *state) newValue3A(op ssa.Op, t ssa.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 426 return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2) 427 } 428 429 // newValue4 adds a new value with four arguments to the current block. 430 func (s *state) newValue4(op ssa.Op, t ssa.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value { 431 return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3) 432 } 433 434 // entryNewValue0 adds a new value with no arguments to the entry block. 435 func (s *state) entryNewValue0(op ssa.Op, t ssa.Type) *ssa.Value { 436 return s.f.Entry.NewValue0(s.peekPos(), op, t) 437 } 438 439 // entryNewValue0A adds a new value with no arguments and an aux value to the entry block. 440 func (s *state) entryNewValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value { 441 return s.f.Entry.NewValue0A(s.peekPos(), op, t, aux) 442 } 443 444 // entryNewValue0I adds a new value with no arguments and an auxint value to the entry block. 445 func (s *state) entryNewValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value { 446 return s.f.Entry.NewValue0I(s.peekPos(), op, t, auxint) 447 } 448 449 // entryNewValue1 adds a new value with one argument to the entry block. 450 func (s *state) entryNewValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value { 451 return s.f.Entry.NewValue1(s.peekPos(), op, t, arg) 452 } 453 454 // entryNewValue1 adds a new value with one argument and an auxint value to the entry block. 455 func (s *state) entryNewValue1I(op ssa.Op, t ssa.Type, auxint int64, arg *ssa.Value) *ssa.Value { 456 return s.f.Entry.NewValue1I(s.peekPos(), op, t, auxint, arg) 457 } 458 459 // entryNewValue1A adds a new value with one argument and an aux value to the entry block. 460 func (s *state) entryNewValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 461 return s.f.Entry.NewValue1A(s.peekPos(), op, t, aux, arg) 462 } 463 464 // entryNewValue2 adds a new value with two arguments to the entry block. 465 func (s *state) entryNewValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value { 466 return s.f.Entry.NewValue2(s.peekPos(), op, t, arg0, arg1) 467 } 468 469 // const* routines add a new const value to the entry block. 470 func (s *state) constSlice(t ssa.Type) *ssa.Value { return s.f.ConstSlice(s.peekPos(), t) } 471 func (s *state) constInterface(t ssa.Type) *ssa.Value { return s.f.ConstInterface(s.peekPos(), t) } 472 func (s *state) constNil(t ssa.Type) *ssa.Value { return s.f.ConstNil(s.peekPos(), t) } 473 func (s *state) constEmptyString(t ssa.Type) *ssa.Value { return s.f.ConstEmptyString(s.peekPos(), t) } 474 func (s *state) constBool(c bool) *ssa.Value { 475 return s.f.ConstBool(s.peekPos(), types.Types[TBOOL], c) 476 } 477 func (s *state) constInt8(t ssa.Type, c int8) *ssa.Value { 478 return s.f.ConstInt8(s.peekPos(), t, c) 479 } 480 func (s *state) constInt16(t ssa.Type, c int16) *ssa.Value { 481 return s.f.ConstInt16(s.peekPos(), t, c) 482 } 483 func (s *state) constInt32(t ssa.Type, c int32) *ssa.Value { 484 return s.f.ConstInt32(s.peekPos(), t, c) 485 } 486 func (s *state) constInt64(t ssa.Type, c int64) *ssa.Value { 487 return s.f.ConstInt64(s.peekPos(), t, c) 488 } 489 func (s *state) constFloat32(t ssa.Type, c float64) *ssa.Value { 490 return s.f.ConstFloat32(s.peekPos(), t, c) 491 } 492 func (s *state) constFloat64(t ssa.Type, c float64) *ssa.Value { 493 return s.f.ConstFloat64(s.peekPos(), t, c) 494 } 495 func (s *state) constInt(t ssa.Type, c int64) *ssa.Value { 496 if s.config.PtrSize == 8 { 497 return s.constInt64(t, c) 498 } 499 if int64(int32(c)) != c { 500 s.Fatalf("integer constant too big %d", c) 501 } 502 return s.constInt32(t, int32(c)) 503 } 504 func (s *state) constOffPtrSP(t ssa.Type, c int64) *ssa.Value { 505 return s.f.ConstOffPtrSP(s.peekPos(), t, c, s.sp) 506 } 507 508 // stmtList converts the statement list n to SSA and adds it to s. 509 func (s *state) stmtList(l Nodes) { 510 for _, n := range l.Slice() { 511 s.stmt(n) 512 } 513 } 514 515 // stmt converts the statement n to SSA and adds it to s. 516 func (s *state) stmt(n *Node) { 517 s.pushLine(n.Pos) 518 defer s.popLine() 519 520 // If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere), 521 // then this code is dead. Stop here. 522 if s.curBlock == nil && n.Op != OLABEL { 523 return 524 } 525 526 s.stmtList(n.Ninit) 527 switch n.Op { 528 529 case OBLOCK: 530 s.stmtList(n.List) 531 532 // No-ops 533 case OEMPTY, ODCLCONST, ODCLTYPE, OFALL: 534 535 // Expression statements 536 case OCALLFUNC: 537 if isIntrinsicCall(n) { 538 s.intrinsicCall(n) 539 return 540 } 541 fallthrough 542 543 case OCALLMETH, OCALLINTER: 544 s.call(n, callNormal) 545 if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class() == PFUNC { 546 if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" || 547 n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block") { 548 m := s.mem() 549 b := s.endBlock() 550 b.Kind = ssa.BlockExit 551 b.SetControl(m) 552 // TODO: never rewrite OPANIC to OCALLFUNC in the 553 // first place. Need to wait until all backends 554 // go through SSA. 555 } 556 } 557 case ODEFER: 558 s.call(n.Left, callDefer) 559 case OPROC: 560 s.call(n.Left, callGo) 561 562 case OAS2DOTTYPE: 563 res, resok := s.dottype(n.Rlist.First(), true) 564 deref := false 565 if !canSSAType(n.Rlist.First().Type) { 566 if res.Op != ssa.OpLoad { 567 s.Fatalf("dottype of non-load") 568 } 569 mem := s.mem() 570 if mem.Op == ssa.OpVarKill { 571 mem = mem.Args[0] 572 } 573 if res.Args[1] != mem { 574 s.Fatalf("memory no longer live from 2-result dottype load") 575 } 576 deref = true 577 res = res.Args[0] 578 } 579 s.assign(n.List.First(), res, deref, 0) 580 s.assign(n.List.Second(), resok, false, 0) 581 return 582 583 case OAS2FUNC: 584 // We come here only when it is an intrinsic call returning two values. 585 if !isIntrinsicCall(n.Rlist.First()) { 586 s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Rlist.First()) 587 } 588 v := s.intrinsicCall(n.Rlist.First()) 589 v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v) 590 v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v) 591 s.assign(n.List.First(), v1, false, 0) 592 s.assign(n.List.Second(), v2, false, 0) 593 return 594 595 case ODCL: 596 if n.Left.Class() == PAUTOHEAP { 597 Fatalf("DCL %v", n) 598 } 599 600 case OLABEL: 601 sym := n.Left.Sym 602 lab := s.label(sym) 603 604 // Associate label with its control flow node, if any 605 if ctl := n.labeledControl(); ctl != nil { 606 s.labeledNodes[ctl] = lab 607 } 608 609 // The label might already have a target block via a goto. 610 if lab.target == nil { 611 lab.target = s.f.NewBlock(ssa.BlockPlain) 612 } 613 614 // Go to that label. 615 // (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.) 616 if s.curBlock != nil { 617 b := s.endBlock() 618 b.AddEdgeTo(lab.target) 619 } 620 s.startBlock(lab.target) 621 622 case OGOTO: 623 sym := n.Left.Sym 624 625 lab := s.label(sym) 626 if lab.target == nil { 627 lab.target = s.f.NewBlock(ssa.BlockPlain) 628 } 629 630 b := s.endBlock() 631 b.AddEdgeTo(lab.target) 632 633 case OAS: 634 if n.Left == n.Right && n.Left.Op == ONAME { 635 // An x=x assignment. No point in doing anything 636 // here. In addition, skipping this assignment 637 // prevents generating: 638 // VARDEF x 639 // COPY x -> x 640 // which is bad because x is incorrectly considered 641 // dead before the vardef. See issue #14904. 642 return 643 } 644 645 // Evaluate RHS. 646 rhs := n.Right 647 if rhs != nil { 648 switch rhs.Op { 649 case OSTRUCTLIT, OARRAYLIT, OSLICELIT: 650 // All literals with nonzero fields have already been 651 // rewritten during walk. Any that remain are just T{} 652 // or equivalents. Use the zero value. 653 if !iszero(rhs) { 654 Fatalf("literal with nonzero value in SSA: %v", rhs) 655 } 656 rhs = nil 657 case OAPPEND: 658 // If we're writing the result of an append back to the same slice, 659 // handle it specially to avoid write barriers on the fast (non-growth) path. 660 // If the slice can be SSA'd, it'll be on the stack, 661 // so there will be no write barriers, 662 // so there's no need to attempt to prevent them. 663 if samesafeexpr(n.Left, rhs.List.First()) { 664 if !s.canSSA(n.Left) { 665 if Debug_append > 0 { 666 Warnl(n.Pos, "append: len-only update") 667 } 668 s.append(rhs, true) 669 return 670 } else { 671 if Debug_append > 0 { // replicating old diagnostic message 672 Warnl(n.Pos, "append: len-only update (in local slice)") 673 } 674 } 675 } 676 } 677 } 678 679 if isblank(n.Left) { 680 // _ = rhs 681 // Just evaluate rhs for side-effects. 682 if rhs != nil { 683 s.expr(rhs) 684 } 685 return 686 } 687 688 var t *types.Type 689 if n.Right != nil { 690 t = n.Right.Type 691 } else { 692 t = n.Left.Type 693 } 694 695 var r *ssa.Value 696 deref := !canSSAType(t) 697 if deref { 698 if rhs == nil { 699 r = nil // Signal assign to use OpZero. 700 } else { 701 r = s.addr(rhs, false) 702 } 703 } else { 704 if rhs == nil { 705 r = s.zeroVal(t) 706 } else { 707 r = s.expr(rhs) 708 } 709 } 710 711 var skip skipMask 712 if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) { 713 // We're assigning a slicing operation back to its source. 714 // Don't write back fields we aren't changing. See issue #14855. 715 i, j, k := rhs.SliceBounds() 716 if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) { 717 // [0:...] is the same as [:...] 718 i = nil 719 } 720 // TODO: detect defaults for len/cap also. 721 // Currently doesn't really work because (*p)[:len(*p)] appears here as: 722 // tmp = len(*p) 723 // (*p)[:tmp] 724 //if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) { 725 // j = nil 726 //} 727 //if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) { 728 // k = nil 729 //} 730 if i == nil { 731 skip |= skipPtr 732 if j == nil { 733 skip |= skipLen 734 } 735 if k == nil { 736 skip |= skipCap 737 } 738 } 739 } 740 741 s.assign(n.Left, r, deref, skip) 742 743 case OIF: 744 bThen := s.f.NewBlock(ssa.BlockPlain) 745 bEnd := s.f.NewBlock(ssa.BlockPlain) 746 var bElse *ssa.Block 747 var likely int8 748 if n.Likely() { 749 likely = 1 750 } 751 if n.Rlist.Len() != 0 { 752 bElse = s.f.NewBlock(ssa.BlockPlain) 753 s.condBranch(n.Left, bThen, bElse, likely) 754 } else { 755 s.condBranch(n.Left, bThen, bEnd, likely) 756 } 757 758 s.startBlock(bThen) 759 s.stmtList(n.Nbody) 760 if b := s.endBlock(); b != nil { 761 b.AddEdgeTo(bEnd) 762 } 763 764 if n.Rlist.Len() != 0 { 765 s.startBlock(bElse) 766 s.stmtList(n.Rlist) 767 if b := s.endBlock(); b != nil { 768 b.AddEdgeTo(bEnd) 769 } 770 } 771 s.startBlock(bEnd) 772 773 case ORETURN: 774 s.stmtList(n.List) 775 s.exit() 776 case ORETJMP: 777 s.stmtList(n.List) 778 b := s.exit() 779 b.Kind = ssa.BlockRetJmp // override BlockRet 780 b.Aux = n.Left.Sym.Linksym() 781 782 case OCONTINUE, OBREAK: 783 var to *ssa.Block 784 if n.Left == nil { 785 // plain break/continue 786 switch n.Op { 787 case OCONTINUE: 788 to = s.continueTo 789 case OBREAK: 790 to = s.breakTo 791 } 792 } else { 793 // labeled break/continue; look up the target 794 sym := n.Left.Sym 795 lab := s.label(sym) 796 switch n.Op { 797 case OCONTINUE: 798 to = lab.continueTarget 799 case OBREAK: 800 to = lab.breakTarget 801 } 802 } 803 804 b := s.endBlock() 805 b.AddEdgeTo(to) 806 807 case OFOR, OFORUNTIL: 808 // OFOR: for Ninit; Left; Right { Nbody } 809 // For = cond; body; incr 810 // Foruntil = body; incr; cond 811 bCond := s.f.NewBlock(ssa.BlockPlain) 812 bBody := s.f.NewBlock(ssa.BlockPlain) 813 bIncr := s.f.NewBlock(ssa.BlockPlain) 814 bEnd := s.f.NewBlock(ssa.BlockPlain) 815 816 // first, jump to condition test (OFOR) or body (OFORUNTIL) 817 b := s.endBlock() 818 if n.Op == OFOR { 819 b.AddEdgeTo(bCond) 820 // generate code to test condition 821 s.startBlock(bCond) 822 if n.Left != nil { 823 s.condBranch(n.Left, bBody, bEnd, 1) 824 } else { 825 b := s.endBlock() 826 b.Kind = ssa.BlockPlain 827 b.AddEdgeTo(bBody) 828 } 829 830 } else { 831 b.AddEdgeTo(bBody) 832 } 833 834 // set up for continue/break in body 835 prevContinue := s.continueTo 836 prevBreak := s.breakTo 837 s.continueTo = bIncr 838 s.breakTo = bEnd 839 lab := s.labeledNodes[n] 840 if lab != nil { 841 // labeled for loop 842 lab.continueTarget = bIncr 843 lab.breakTarget = bEnd 844 } 845 846 // generate body 847 s.startBlock(bBody) 848 s.stmtList(n.Nbody) 849 850 // tear down continue/break 851 s.continueTo = prevContinue 852 s.breakTo = prevBreak 853 if lab != nil { 854 lab.continueTarget = nil 855 lab.breakTarget = nil 856 } 857 858 // done with body, goto incr 859 if b := s.endBlock(); b != nil { 860 b.AddEdgeTo(bIncr) 861 } 862 863 // generate incr 864 s.startBlock(bIncr) 865 if n.Right != nil { 866 s.stmt(n.Right) 867 } 868 if b := s.endBlock(); b != nil { 869 b.AddEdgeTo(bCond) 870 } 871 872 if n.Op == OFORUNTIL { 873 // generate code to test condition 874 s.startBlock(bCond) 875 if n.Left != nil { 876 s.condBranch(n.Left, bBody, bEnd, 1) 877 } else { 878 b := s.endBlock() 879 b.Kind = ssa.BlockPlain 880 b.AddEdgeTo(bBody) 881 } 882 } 883 884 s.startBlock(bEnd) 885 886 case OSWITCH, OSELECT: 887 // These have been mostly rewritten by the front end into their Nbody fields. 888 // Our main task is to correctly hook up any break statements. 889 bEnd := s.f.NewBlock(ssa.BlockPlain) 890 891 prevBreak := s.breakTo 892 s.breakTo = bEnd 893 lab := s.labeledNodes[n] 894 if lab != nil { 895 // labeled 896 lab.breakTarget = bEnd 897 } 898 899 // generate body code 900 s.stmtList(n.Nbody) 901 902 s.breakTo = prevBreak 903 if lab != nil { 904 lab.breakTarget = nil 905 } 906 907 // walk adds explicit OBREAK nodes to the end of all reachable code paths. 908 // If we still have a current block here, then mark it unreachable. 909 if s.curBlock != nil { 910 m := s.mem() 911 b := s.endBlock() 912 b.Kind = ssa.BlockExit 913 b.SetControl(m) 914 } 915 s.startBlock(bEnd) 916 917 case OVARKILL: 918 // Insert a varkill op to record that a variable is no longer live. 919 // We only care about liveness info at call sites, so putting the 920 // varkill in the store chain is enough to keep it correctly ordered 921 // with respect to call ops. 922 if !s.canSSA(n.Left) { 923 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, n.Left, s.mem()) 924 } 925 926 case OVARLIVE: 927 // Insert a varlive op to record that a variable is still live. 928 if !n.Left.Addrtaken() { 929 s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left) 930 } 931 s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, ssa.TypeMem, n.Left, s.mem()) 932 933 case OCHECKNIL: 934 p := s.expr(n.Left) 935 s.nilCheck(p) 936 937 default: 938 s.Fatalf("unhandled stmt %v", n.Op) 939 } 940 } 941 942 // exit processes any code that needs to be generated just before returning. 943 // It returns a BlockRet block that ends the control flow. Its control value 944 // will be set to the final memory state. 945 func (s *state) exit() *ssa.Block { 946 if s.hasdefer { 947 s.rtcall(Deferreturn, true, nil) 948 } 949 950 // Run exit code. Typically, this code copies heap-allocated PPARAMOUT 951 // variables back to the stack. 952 s.stmtList(s.exitCode) 953 954 // Store SSAable PPARAMOUT variables back to stack locations. 955 for _, n := range s.returns { 956 addr := s.decladdrs[n] 957 val := s.variable(n, n.Type) 958 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, n, s.mem()) 959 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, n.Type, addr, val, s.mem()) 960 // TODO: if val is ever spilled, we'd like to use the 961 // PPARAMOUT slot for spilling it. That won't happen 962 // currently. 963 } 964 965 // Do actual return. 966 m := s.mem() 967 b := s.endBlock() 968 b.Kind = ssa.BlockRet 969 b.SetControl(m) 970 return b 971 } 972 973 type opAndType struct { 974 op Op 975 etype types.EType 976 } 977 978 var opToSSA = map[opAndType]ssa.Op{ 979 opAndType{OADD, TINT8}: ssa.OpAdd8, 980 opAndType{OADD, TUINT8}: ssa.OpAdd8, 981 opAndType{OADD, TINT16}: ssa.OpAdd16, 982 opAndType{OADD, TUINT16}: ssa.OpAdd16, 983 opAndType{OADD, TINT32}: ssa.OpAdd32, 984 opAndType{OADD, TUINT32}: ssa.OpAdd32, 985 opAndType{OADD, TPTR32}: ssa.OpAdd32, 986 opAndType{OADD, TINT64}: ssa.OpAdd64, 987 opAndType{OADD, TUINT64}: ssa.OpAdd64, 988 opAndType{OADD, TPTR64}: ssa.OpAdd64, 989 opAndType{OADD, TFLOAT32}: ssa.OpAdd32F, 990 opAndType{OADD, TFLOAT64}: ssa.OpAdd64F, 991 992 opAndType{OSUB, TINT8}: ssa.OpSub8, 993 opAndType{OSUB, TUINT8}: ssa.OpSub8, 994 opAndType{OSUB, TINT16}: ssa.OpSub16, 995 opAndType{OSUB, TUINT16}: ssa.OpSub16, 996 opAndType{OSUB, TINT32}: ssa.OpSub32, 997 opAndType{OSUB, TUINT32}: ssa.OpSub32, 998 opAndType{OSUB, TINT64}: ssa.OpSub64, 999 opAndType{OSUB, TUINT64}: ssa.OpSub64, 1000 opAndType{OSUB, TFLOAT32}: ssa.OpSub32F, 1001 opAndType{OSUB, TFLOAT64}: ssa.OpSub64F, 1002 1003 opAndType{ONOT, TBOOL}: ssa.OpNot, 1004 1005 opAndType{OMINUS, TINT8}: ssa.OpNeg8, 1006 opAndType{OMINUS, TUINT8}: ssa.OpNeg8, 1007 opAndType{OMINUS, TINT16}: ssa.OpNeg16, 1008 opAndType{OMINUS, TUINT16}: ssa.OpNeg16, 1009 opAndType{OMINUS, TINT32}: ssa.OpNeg32, 1010 opAndType{OMINUS, TUINT32}: ssa.OpNeg32, 1011 opAndType{OMINUS, TINT64}: ssa.OpNeg64, 1012 opAndType{OMINUS, TUINT64}: ssa.OpNeg64, 1013 opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F, 1014 opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F, 1015 1016 opAndType{OCOM, TINT8}: ssa.OpCom8, 1017 opAndType{OCOM, TUINT8}: ssa.OpCom8, 1018 opAndType{OCOM, TINT16}: ssa.OpCom16, 1019 opAndType{OCOM, TUINT16}: ssa.OpCom16, 1020 opAndType{OCOM, TINT32}: ssa.OpCom32, 1021 opAndType{OCOM, TUINT32}: ssa.OpCom32, 1022 opAndType{OCOM, TINT64}: ssa.OpCom64, 1023 opAndType{OCOM, TUINT64}: ssa.OpCom64, 1024 1025 opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag, 1026 opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag, 1027 opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal, 1028 opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal, 1029 1030 opAndType{OMUL, TINT8}: ssa.OpMul8, 1031 opAndType{OMUL, TUINT8}: ssa.OpMul8, 1032 opAndType{OMUL, TINT16}: ssa.OpMul16, 1033 opAndType{OMUL, TUINT16}: ssa.OpMul16, 1034 opAndType{OMUL, TINT32}: ssa.OpMul32, 1035 opAndType{OMUL, TUINT32}: ssa.OpMul32, 1036 opAndType{OMUL, TINT64}: ssa.OpMul64, 1037 opAndType{OMUL, TUINT64}: ssa.OpMul64, 1038 opAndType{OMUL, TFLOAT32}: ssa.OpMul32F, 1039 opAndType{OMUL, TFLOAT64}: ssa.OpMul64F, 1040 1041 opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F, 1042 opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F, 1043 1044 opAndType{ODIV, TINT8}: ssa.OpDiv8, 1045 opAndType{ODIV, TUINT8}: ssa.OpDiv8u, 1046 opAndType{ODIV, TINT16}: ssa.OpDiv16, 1047 opAndType{ODIV, TUINT16}: ssa.OpDiv16u, 1048 opAndType{ODIV, TINT32}: ssa.OpDiv32, 1049 opAndType{ODIV, TUINT32}: ssa.OpDiv32u, 1050 opAndType{ODIV, TINT64}: ssa.OpDiv64, 1051 opAndType{ODIV, TUINT64}: ssa.OpDiv64u, 1052 1053 opAndType{OMOD, TINT8}: ssa.OpMod8, 1054 opAndType{OMOD, TUINT8}: ssa.OpMod8u, 1055 opAndType{OMOD, TINT16}: ssa.OpMod16, 1056 opAndType{OMOD, TUINT16}: ssa.OpMod16u, 1057 opAndType{OMOD, TINT32}: ssa.OpMod32, 1058 opAndType{OMOD, TUINT32}: ssa.OpMod32u, 1059 opAndType{OMOD, TINT64}: ssa.OpMod64, 1060 opAndType{OMOD, TUINT64}: ssa.OpMod64u, 1061 1062 opAndType{OAND, TINT8}: ssa.OpAnd8, 1063 opAndType{OAND, TUINT8}: ssa.OpAnd8, 1064 opAndType{OAND, TINT16}: ssa.OpAnd16, 1065 opAndType{OAND, TUINT16}: ssa.OpAnd16, 1066 opAndType{OAND, TINT32}: ssa.OpAnd32, 1067 opAndType{OAND, TUINT32}: ssa.OpAnd32, 1068 opAndType{OAND, TINT64}: ssa.OpAnd64, 1069 opAndType{OAND, TUINT64}: ssa.OpAnd64, 1070 1071 opAndType{OOR, TINT8}: ssa.OpOr8, 1072 opAndType{OOR, TUINT8}: ssa.OpOr8, 1073 opAndType{OOR, TINT16}: ssa.OpOr16, 1074 opAndType{OOR, TUINT16}: ssa.OpOr16, 1075 opAndType{OOR, TINT32}: ssa.OpOr32, 1076 opAndType{OOR, TUINT32}: ssa.OpOr32, 1077 opAndType{OOR, TINT64}: ssa.OpOr64, 1078 opAndType{OOR, TUINT64}: ssa.OpOr64, 1079 1080 opAndType{OXOR, TINT8}: ssa.OpXor8, 1081 opAndType{OXOR, TUINT8}: ssa.OpXor8, 1082 opAndType{OXOR, TINT16}: ssa.OpXor16, 1083 opAndType{OXOR, TUINT16}: ssa.OpXor16, 1084 opAndType{OXOR, TINT32}: ssa.OpXor32, 1085 opAndType{OXOR, TUINT32}: ssa.OpXor32, 1086 opAndType{OXOR, TINT64}: ssa.OpXor64, 1087 opAndType{OXOR, TUINT64}: ssa.OpXor64, 1088 1089 opAndType{OEQ, TBOOL}: ssa.OpEqB, 1090 opAndType{OEQ, TINT8}: ssa.OpEq8, 1091 opAndType{OEQ, TUINT8}: ssa.OpEq8, 1092 opAndType{OEQ, TINT16}: ssa.OpEq16, 1093 opAndType{OEQ, TUINT16}: ssa.OpEq16, 1094 opAndType{OEQ, TINT32}: ssa.OpEq32, 1095 opAndType{OEQ, TUINT32}: ssa.OpEq32, 1096 opAndType{OEQ, TINT64}: ssa.OpEq64, 1097 opAndType{OEQ, TUINT64}: ssa.OpEq64, 1098 opAndType{OEQ, TINTER}: ssa.OpEqInter, 1099 opAndType{OEQ, TSLICE}: ssa.OpEqSlice, 1100 opAndType{OEQ, TFUNC}: ssa.OpEqPtr, 1101 opAndType{OEQ, TMAP}: ssa.OpEqPtr, 1102 opAndType{OEQ, TCHAN}: ssa.OpEqPtr, 1103 opAndType{OEQ, TPTR32}: ssa.OpEqPtr, 1104 opAndType{OEQ, TPTR64}: ssa.OpEqPtr, 1105 opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr, 1106 opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr, 1107 opAndType{OEQ, TFLOAT64}: ssa.OpEq64F, 1108 opAndType{OEQ, TFLOAT32}: ssa.OpEq32F, 1109 1110 opAndType{ONE, TBOOL}: ssa.OpNeqB, 1111 opAndType{ONE, TINT8}: ssa.OpNeq8, 1112 opAndType{ONE, TUINT8}: ssa.OpNeq8, 1113 opAndType{ONE, TINT16}: ssa.OpNeq16, 1114 opAndType{ONE, TUINT16}: ssa.OpNeq16, 1115 opAndType{ONE, TINT32}: ssa.OpNeq32, 1116 opAndType{ONE, TUINT32}: ssa.OpNeq32, 1117 opAndType{ONE, TINT64}: ssa.OpNeq64, 1118 opAndType{ONE, TUINT64}: ssa.OpNeq64, 1119 opAndType{ONE, TINTER}: ssa.OpNeqInter, 1120 opAndType{ONE, TSLICE}: ssa.OpNeqSlice, 1121 opAndType{ONE, TFUNC}: ssa.OpNeqPtr, 1122 opAndType{ONE, TMAP}: ssa.OpNeqPtr, 1123 opAndType{ONE, TCHAN}: ssa.OpNeqPtr, 1124 opAndType{ONE, TPTR32}: ssa.OpNeqPtr, 1125 opAndType{ONE, TPTR64}: ssa.OpNeqPtr, 1126 opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr, 1127 opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr, 1128 opAndType{ONE, TFLOAT64}: ssa.OpNeq64F, 1129 opAndType{ONE, TFLOAT32}: ssa.OpNeq32F, 1130 1131 opAndType{OLT, TINT8}: ssa.OpLess8, 1132 opAndType{OLT, TUINT8}: ssa.OpLess8U, 1133 opAndType{OLT, TINT16}: ssa.OpLess16, 1134 opAndType{OLT, TUINT16}: ssa.OpLess16U, 1135 opAndType{OLT, TINT32}: ssa.OpLess32, 1136 opAndType{OLT, TUINT32}: ssa.OpLess32U, 1137 opAndType{OLT, TINT64}: ssa.OpLess64, 1138 opAndType{OLT, TUINT64}: ssa.OpLess64U, 1139 opAndType{OLT, TFLOAT64}: ssa.OpLess64F, 1140 opAndType{OLT, TFLOAT32}: ssa.OpLess32F, 1141 1142 opAndType{OGT, TINT8}: ssa.OpGreater8, 1143 opAndType{OGT, TUINT8}: ssa.OpGreater8U, 1144 opAndType{OGT, TINT16}: ssa.OpGreater16, 1145 opAndType{OGT, TUINT16}: ssa.OpGreater16U, 1146 opAndType{OGT, TINT32}: ssa.OpGreater32, 1147 opAndType{OGT, TUINT32}: ssa.OpGreater32U, 1148 opAndType{OGT, TINT64}: ssa.OpGreater64, 1149 opAndType{OGT, TUINT64}: ssa.OpGreater64U, 1150 opAndType{OGT, TFLOAT64}: ssa.OpGreater64F, 1151 opAndType{OGT, TFLOAT32}: ssa.OpGreater32F, 1152 1153 opAndType{OLE, TINT8}: ssa.OpLeq8, 1154 opAndType{OLE, TUINT8}: ssa.OpLeq8U, 1155 opAndType{OLE, TINT16}: ssa.OpLeq16, 1156 opAndType{OLE, TUINT16}: ssa.OpLeq16U, 1157 opAndType{OLE, TINT32}: ssa.OpLeq32, 1158 opAndType{OLE, TUINT32}: ssa.OpLeq32U, 1159 opAndType{OLE, TINT64}: ssa.OpLeq64, 1160 opAndType{OLE, TUINT64}: ssa.OpLeq64U, 1161 opAndType{OLE, TFLOAT64}: ssa.OpLeq64F, 1162 opAndType{OLE, TFLOAT32}: ssa.OpLeq32F, 1163 1164 opAndType{OGE, TINT8}: ssa.OpGeq8, 1165 opAndType{OGE, TUINT8}: ssa.OpGeq8U, 1166 opAndType{OGE, TINT16}: ssa.OpGeq16, 1167 opAndType{OGE, TUINT16}: ssa.OpGeq16U, 1168 opAndType{OGE, TINT32}: ssa.OpGeq32, 1169 opAndType{OGE, TUINT32}: ssa.OpGeq32U, 1170 opAndType{OGE, TINT64}: ssa.OpGeq64, 1171 opAndType{OGE, TUINT64}: ssa.OpGeq64U, 1172 opAndType{OGE, TFLOAT64}: ssa.OpGeq64F, 1173 opAndType{OGE, TFLOAT32}: ssa.OpGeq32F, 1174 } 1175 1176 func (s *state) concreteEtype(t *types.Type) types.EType { 1177 e := t.Etype 1178 switch e { 1179 default: 1180 return e 1181 case TINT: 1182 if s.config.PtrSize == 8 { 1183 return TINT64 1184 } 1185 return TINT32 1186 case TUINT: 1187 if s.config.PtrSize == 8 { 1188 return TUINT64 1189 } 1190 return TUINT32 1191 case TUINTPTR: 1192 if s.config.PtrSize == 8 { 1193 return TUINT64 1194 } 1195 return TUINT32 1196 } 1197 } 1198 1199 func (s *state) ssaOp(op Op, t *types.Type) ssa.Op { 1200 etype := s.concreteEtype(t) 1201 x, ok := opToSSA[opAndType{op, etype}] 1202 if !ok { 1203 s.Fatalf("unhandled binary op %v %s", op, etype) 1204 } 1205 return x 1206 } 1207 1208 func floatForComplex(t *types.Type) *types.Type { 1209 if t.Size() == 8 { 1210 return types.Types[TFLOAT32] 1211 } else { 1212 return types.Types[TFLOAT64] 1213 } 1214 } 1215 1216 type opAndTwoTypes struct { 1217 op Op 1218 etype1 types.EType 1219 etype2 types.EType 1220 } 1221 1222 type twoTypes struct { 1223 etype1 types.EType 1224 etype2 types.EType 1225 } 1226 1227 type twoOpsAndType struct { 1228 op1 ssa.Op 1229 op2 ssa.Op 1230 intermediateType types.EType 1231 } 1232 1233 var fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1234 1235 twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32}, 1236 twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32}, 1237 twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32}, 1238 twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64}, 1239 1240 twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32}, 1241 twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32}, 1242 twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32}, 1243 twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64}, 1244 1245 twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1246 twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1247 twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32}, 1248 twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64}, 1249 1250 twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1251 twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1252 twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32}, 1253 twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64}, 1254 // unsigned 1255 twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32}, 1256 twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32}, 1257 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned 1258 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead 1259 1260 twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32}, 1261 twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32}, 1262 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned 1263 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead 1264 1265 twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1266 twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1267 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1268 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead 1269 1270 twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1271 twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1272 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1273 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead 1274 1275 // float 1276 twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32}, 1277 twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, TFLOAT64}, 1278 twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, TFLOAT32}, 1279 twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64}, 1280 } 1281 1282 // this map is used only for 32-bit arch, and only includes the difference 1283 // on 32-bit arch, don't use int64<->float conversion for uint32 1284 var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{ 1285 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32}, 1286 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32}, 1287 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32}, 1288 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32}, 1289 } 1290 1291 // uint64<->float conversions, only on machines that have intructions for that 1292 var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1293 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64}, 1294 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64}, 1295 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64}, 1296 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64}, 1297 } 1298 1299 var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{ 1300 opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8, 1301 opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8, 1302 opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16, 1303 opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16, 1304 opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32, 1305 opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32, 1306 opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64, 1307 opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64, 1308 1309 opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8, 1310 opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8, 1311 opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16, 1312 opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16, 1313 opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32, 1314 opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32, 1315 opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64, 1316 opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64, 1317 1318 opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8, 1319 opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8, 1320 opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16, 1321 opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16, 1322 opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32, 1323 opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32, 1324 opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64, 1325 opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64, 1326 1327 opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8, 1328 opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8, 1329 opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16, 1330 opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16, 1331 opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32, 1332 opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32, 1333 opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64, 1334 opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64, 1335 1336 opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8, 1337 opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8, 1338 opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16, 1339 opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16, 1340 opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32, 1341 opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32, 1342 opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64, 1343 opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64, 1344 1345 opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8, 1346 opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8, 1347 opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16, 1348 opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16, 1349 opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32, 1350 opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32, 1351 opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64, 1352 opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64, 1353 1354 opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8, 1355 opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8, 1356 opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16, 1357 opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16, 1358 opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32, 1359 opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32, 1360 opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64, 1361 opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64, 1362 1363 opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8, 1364 opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8, 1365 opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16, 1366 opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16, 1367 opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32, 1368 opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32, 1369 opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64, 1370 opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64, 1371 } 1372 1373 func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op { 1374 etype1 := s.concreteEtype(t) 1375 etype2 := s.concreteEtype(u) 1376 x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}] 1377 if !ok { 1378 s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2) 1379 } 1380 return x 1381 } 1382 1383 // expr converts the expression n to ssa, adds it to s and returns the ssa result. 1384 func (s *state) expr(n *Node) *ssa.Value { 1385 if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) { 1386 // ONAMEs and named OLITERALs have the line number 1387 // of the decl, not the use. See issue 14742. 1388 s.pushLine(n.Pos) 1389 defer s.popLine() 1390 } 1391 1392 s.stmtList(n.Ninit) 1393 switch n.Op { 1394 case OARRAYBYTESTRTMP: 1395 slice := s.expr(n.Left) 1396 ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice) 1397 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) 1398 return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) 1399 case OSTRARRAYBYTETMP: 1400 str := s.expr(n.Left) 1401 ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str) 1402 len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str) 1403 return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len) 1404 case OCFUNC: 1405 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: n.Left.Sym.Linksym()}) 1406 return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb) 1407 case ONAME: 1408 if n.Class() == PFUNC { 1409 // "value" of a function is the address of the function's closure 1410 sym := funcsym(n.Sym).Linksym() 1411 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: sym}) 1412 return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), aux, s.sb) 1413 } 1414 if s.canSSA(n) { 1415 return s.variable(n, n.Type) 1416 } 1417 addr := s.addr(n, false) 1418 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1419 case OCLOSUREVAR: 1420 addr := s.addr(n, false) 1421 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1422 case OLITERAL: 1423 switch u := n.Val().U.(type) { 1424 case *Mpint: 1425 i := u.Int64() 1426 switch n.Type.Size() { 1427 case 1: 1428 return s.constInt8(n.Type, int8(i)) 1429 case 2: 1430 return s.constInt16(n.Type, int16(i)) 1431 case 4: 1432 return s.constInt32(n.Type, int32(i)) 1433 case 8: 1434 return s.constInt64(n.Type, i) 1435 default: 1436 s.Fatalf("bad integer size %d", n.Type.Size()) 1437 return nil 1438 } 1439 case string: 1440 if u == "" { 1441 return s.constEmptyString(n.Type) 1442 } 1443 return s.entryNewValue0A(ssa.OpConstString, n.Type, u) 1444 case bool: 1445 return s.constBool(u) 1446 case *NilVal: 1447 t := n.Type 1448 switch { 1449 case t.IsSlice(): 1450 return s.constSlice(t) 1451 case t.IsInterface(): 1452 return s.constInterface(t) 1453 default: 1454 return s.constNil(t) 1455 } 1456 case *Mpflt: 1457 switch n.Type.Size() { 1458 case 4: 1459 return s.constFloat32(n.Type, u.Float32()) 1460 case 8: 1461 return s.constFloat64(n.Type, u.Float64()) 1462 default: 1463 s.Fatalf("bad float size %d", n.Type.Size()) 1464 return nil 1465 } 1466 case *Mpcplx: 1467 r := &u.Real 1468 i := &u.Imag 1469 switch n.Type.Size() { 1470 case 8: 1471 pt := types.Types[TFLOAT32] 1472 return s.newValue2(ssa.OpComplexMake, n.Type, 1473 s.constFloat32(pt, r.Float32()), 1474 s.constFloat32(pt, i.Float32())) 1475 case 16: 1476 pt := types.Types[TFLOAT64] 1477 return s.newValue2(ssa.OpComplexMake, n.Type, 1478 s.constFloat64(pt, r.Float64()), 1479 s.constFloat64(pt, i.Float64())) 1480 default: 1481 s.Fatalf("bad float size %d", n.Type.Size()) 1482 return nil 1483 } 1484 1485 default: 1486 s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype()) 1487 return nil 1488 } 1489 case OCONVNOP: 1490 to := n.Type 1491 from := n.Left.Type 1492 1493 // Assume everything will work out, so set up our return value. 1494 // Anything interesting that happens from here is a fatal. 1495 x := s.expr(n.Left) 1496 1497 // Special case for not confusing GC and liveness. 1498 // We don't want pointers accidentally classified 1499 // as not-pointers or vice-versa because of copy 1500 // elision. 1501 if to.IsPtrShaped() != from.IsPtrShaped() { 1502 return s.newValue2(ssa.OpConvert, to, x, s.mem()) 1503 } 1504 1505 v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type 1506 1507 // CONVNOP closure 1508 if to.Etype == TFUNC && from.IsPtrShaped() { 1509 return v 1510 } 1511 1512 // named <--> unnamed type or typed <--> untyped const 1513 if from.Etype == to.Etype { 1514 return v 1515 } 1516 1517 // unsafe.Pointer <--> *T 1518 if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() { 1519 return v 1520 } 1521 1522 dowidth(from) 1523 dowidth(to) 1524 if from.Width != to.Width { 1525 s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width) 1526 return nil 1527 } 1528 if etypesign(from.Etype) != etypesign(to.Etype) { 1529 s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype) 1530 return nil 1531 } 1532 1533 if instrumenting { 1534 // These appear to be fine, but they fail the 1535 // integer constraint below, so okay them here. 1536 // Sample non-integer conversion: map[string]string -> *uint8 1537 return v 1538 } 1539 1540 if etypesign(from.Etype) == 0 { 1541 s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to) 1542 return nil 1543 } 1544 1545 // integer, same width, same sign 1546 return v 1547 1548 case OCONV: 1549 x := s.expr(n.Left) 1550 ft := n.Left.Type // from type 1551 tt := n.Type // to type 1552 if ft.IsBoolean() && tt.IsKind(TUINT8) { 1553 // Bool -> uint8 is generated internally when indexing into runtime.staticbyte. 1554 return s.newValue1(ssa.OpCopy, n.Type, x) 1555 } 1556 if ft.IsInteger() && tt.IsInteger() { 1557 var op ssa.Op 1558 if tt.Size() == ft.Size() { 1559 op = ssa.OpCopy 1560 } else if tt.Size() < ft.Size() { 1561 // truncation 1562 switch 10*ft.Size() + tt.Size() { 1563 case 21: 1564 op = ssa.OpTrunc16to8 1565 case 41: 1566 op = ssa.OpTrunc32to8 1567 case 42: 1568 op = ssa.OpTrunc32to16 1569 case 81: 1570 op = ssa.OpTrunc64to8 1571 case 82: 1572 op = ssa.OpTrunc64to16 1573 case 84: 1574 op = ssa.OpTrunc64to32 1575 default: 1576 s.Fatalf("weird integer truncation %v -> %v", ft, tt) 1577 } 1578 } else if ft.IsSigned() { 1579 // sign extension 1580 switch 10*ft.Size() + tt.Size() { 1581 case 12: 1582 op = ssa.OpSignExt8to16 1583 case 14: 1584 op = ssa.OpSignExt8to32 1585 case 18: 1586 op = ssa.OpSignExt8to64 1587 case 24: 1588 op = ssa.OpSignExt16to32 1589 case 28: 1590 op = ssa.OpSignExt16to64 1591 case 48: 1592 op = ssa.OpSignExt32to64 1593 default: 1594 s.Fatalf("bad integer sign extension %v -> %v", ft, tt) 1595 } 1596 } else { 1597 // zero extension 1598 switch 10*ft.Size() + tt.Size() { 1599 case 12: 1600 op = ssa.OpZeroExt8to16 1601 case 14: 1602 op = ssa.OpZeroExt8to32 1603 case 18: 1604 op = ssa.OpZeroExt8to64 1605 case 24: 1606 op = ssa.OpZeroExt16to32 1607 case 28: 1608 op = ssa.OpZeroExt16to64 1609 case 48: 1610 op = ssa.OpZeroExt32to64 1611 default: 1612 s.Fatalf("weird integer sign extension %v -> %v", ft, tt) 1613 } 1614 } 1615 return s.newValue1(op, n.Type, x) 1616 } 1617 1618 if ft.IsFloat() || tt.IsFloat() { 1619 conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}] 1620 if s.config.RegSize == 4 && thearch.LinkArch.Family != sys.MIPS { 1621 if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { 1622 conv = conv1 1623 } 1624 } 1625 if thearch.LinkArch.Family == sys.ARM64 { 1626 if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { 1627 conv = conv1 1628 } 1629 } 1630 1631 if thearch.LinkArch.Family == sys.MIPS { 1632 if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() { 1633 // tt is float32 or float64, and ft is also unsigned 1634 if tt.Size() == 4 { 1635 return s.uint32Tofloat32(n, x, ft, tt) 1636 } 1637 if tt.Size() == 8 { 1638 return s.uint32Tofloat64(n, x, ft, tt) 1639 } 1640 } else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() { 1641 // ft is float32 or float64, and tt is unsigned integer 1642 if ft.Size() == 4 { 1643 return s.float32ToUint32(n, x, ft, tt) 1644 } 1645 if ft.Size() == 8 { 1646 return s.float64ToUint32(n, x, ft, tt) 1647 } 1648 } 1649 } 1650 1651 if !ok { 1652 s.Fatalf("weird float conversion %v -> %v", ft, tt) 1653 } 1654 op1, op2, it := conv.op1, conv.op2, conv.intermediateType 1655 1656 if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid { 1657 // normal case, not tripping over unsigned 64 1658 if op1 == ssa.OpCopy { 1659 if op2 == ssa.OpCopy { 1660 return x 1661 } 1662 return s.newValue1(op2, n.Type, x) 1663 } 1664 if op2 == ssa.OpCopy { 1665 return s.newValue1(op1, n.Type, x) 1666 } 1667 return s.newValue1(op2, n.Type, s.newValue1(op1, types.Types[it], x)) 1668 } 1669 // Tricky 64-bit unsigned cases. 1670 if ft.IsInteger() { 1671 // tt is float32 or float64, and ft is also unsigned 1672 if tt.Size() == 4 { 1673 return s.uint64Tofloat32(n, x, ft, tt) 1674 } 1675 if tt.Size() == 8 { 1676 return s.uint64Tofloat64(n, x, ft, tt) 1677 } 1678 s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt) 1679 } 1680 // ft is float32 or float64, and tt is unsigned integer 1681 if ft.Size() == 4 { 1682 return s.float32ToUint64(n, x, ft, tt) 1683 } 1684 if ft.Size() == 8 { 1685 return s.float64ToUint64(n, x, ft, tt) 1686 } 1687 s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt) 1688 return nil 1689 } 1690 1691 if ft.IsComplex() && tt.IsComplex() { 1692 var op ssa.Op 1693 if ft.Size() == tt.Size() { 1694 switch ft.Size() { 1695 case 8: 1696 op = ssa.OpRound32F 1697 case 16: 1698 op = ssa.OpRound64F 1699 default: 1700 s.Fatalf("weird complex conversion %v -> %v", ft, tt) 1701 } 1702 } else if ft.Size() == 8 && tt.Size() == 16 { 1703 op = ssa.OpCvt32Fto64F 1704 } else if ft.Size() == 16 && tt.Size() == 8 { 1705 op = ssa.OpCvt64Fto32F 1706 } else { 1707 s.Fatalf("weird complex conversion %v -> %v", ft, tt) 1708 } 1709 ftp := floatForComplex(ft) 1710 ttp := floatForComplex(tt) 1711 return s.newValue2(ssa.OpComplexMake, tt, 1712 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)), 1713 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x))) 1714 } 1715 1716 s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype) 1717 return nil 1718 1719 case ODOTTYPE: 1720 res, _ := s.dottype(n, false) 1721 return res 1722 1723 // binary ops 1724 case OLT, OEQ, ONE, OLE, OGE, OGT: 1725 a := s.expr(n.Left) 1726 b := s.expr(n.Right) 1727 if n.Left.Type.IsComplex() { 1728 pt := floatForComplex(n.Left.Type) 1729 op := s.ssaOp(OEQ, pt) 1730 r := s.newValue2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)) 1731 i := s.newValue2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)) 1732 c := s.newValue2(ssa.OpAndB, types.Types[TBOOL], r, i) 1733 switch n.Op { 1734 case OEQ: 1735 return c 1736 case ONE: 1737 return s.newValue1(ssa.OpNot, types.Types[TBOOL], c) 1738 default: 1739 s.Fatalf("ordered complex compare %v", n.Op) 1740 } 1741 } 1742 return s.newValue2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b) 1743 case OMUL: 1744 a := s.expr(n.Left) 1745 b := s.expr(n.Right) 1746 if n.Type.IsComplex() { 1747 mulop := ssa.OpMul64F 1748 addop := ssa.OpAdd64F 1749 subop := ssa.OpSub64F 1750 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1751 wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 1752 1753 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1754 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1755 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1756 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1757 1758 if pt != wt { // Widen for calculation 1759 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal) 1760 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal) 1761 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag) 1762 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag) 1763 } 1764 1765 xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag)) 1766 ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal)) 1767 1768 if pt != wt { // Narrow to store back 1769 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal) 1770 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag) 1771 } 1772 1773 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1774 } 1775 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1776 1777 case ODIV: 1778 a := s.expr(n.Left) 1779 b := s.expr(n.Right) 1780 if n.Type.IsComplex() { 1781 // TODO this is not executed because the front-end substitutes a runtime call. 1782 // That probably ought to change; with modest optimization the widen/narrow 1783 // conversions could all be elided in larger expression trees. 1784 mulop := ssa.OpMul64F 1785 addop := ssa.OpAdd64F 1786 subop := ssa.OpSub64F 1787 divop := ssa.OpDiv64F 1788 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1789 wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 1790 1791 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1792 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1793 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1794 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1795 1796 if pt != wt { // Widen for calculation 1797 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal) 1798 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal) 1799 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag) 1800 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag) 1801 } 1802 1803 denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag)) 1804 xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag)) 1805 ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag)) 1806 1807 // TODO not sure if this is best done in wide precision or narrow 1808 // Double-rounding might be an issue. 1809 // Note that the pre-SSA implementation does the entire calculation 1810 // in wide format, so wide is compatible. 1811 xreal = s.newValue2(divop, wt, xreal, denom) 1812 ximag = s.newValue2(divop, wt, ximag, denom) 1813 1814 if pt != wt { // Narrow to store back 1815 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal) 1816 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag) 1817 } 1818 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1819 } 1820 if n.Type.IsFloat() { 1821 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1822 } 1823 return s.intDivide(n, a, b) 1824 case OMOD: 1825 a := s.expr(n.Left) 1826 b := s.expr(n.Right) 1827 return s.intDivide(n, a, b) 1828 case OADD, OSUB: 1829 a := s.expr(n.Left) 1830 b := s.expr(n.Right) 1831 if n.Type.IsComplex() { 1832 pt := floatForComplex(n.Type) 1833 op := s.ssaOp(n.Op, pt) 1834 return s.newValue2(ssa.OpComplexMake, n.Type, 1835 s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)), 1836 s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))) 1837 } 1838 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1839 case OAND, OOR, OXOR: 1840 a := s.expr(n.Left) 1841 b := s.expr(n.Right) 1842 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1843 case OLSH, ORSH: 1844 a := s.expr(n.Left) 1845 b := s.expr(n.Right) 1846 return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b) 1847 case OANDAND, OOROR: 1848 // To implement OANDAND (and OOROR), we introduce a 1849 // new temporary variable to hold the result. The 1850 // variable is associated with the OANDAND node in the 1851 // s.vars table (normally variables are only 1852 // associated with ONAME nodes). We convert 1853 // A && B 1854 // to 1855 // var = A 1856 // if var { 1857 // var = B 1858 // } 1859 // Using var in the subsequent block introduces the 1860 // necessary phi variable. 1861 el := s.expr(n.Left) 1862 s.vars[n] = el 1863 1864 b := s.endBlock() 1865 b.Kind = ssa.BlockIf 1866 b.SetControl(el) 1867 // In theory, we should set b.Likely here based on context. 1868 // However, gc only gives us likeliness hints 1869 // in a single place, for plain OIF statements, 1870 // and passing around context is finnicky, so don't bother for now. 1871 1872 bRight := s.f.NewBlock(ssa.BlockPlain) 1873 bResult := s.f.NewBlock(ssa.BlockPlain) 1874 if n.Op == OANDAND { 1875 b.AddEdgeTo(bRight) 1876 b.AddEdgeTo(bResult) 1877 } else if n.Op == OOROR { 1878 b.AddEdgeTo(bResult) 1879 b.AddEdgeTo(bRight) 1880 } 1881 1882 s.startBlock(bRight) 1883 er := s.expr(n.Right) 1884 s.vars[n] = er 1885 1886 b = s.endBlock() 1887 b.AddEdgeTo(bResult) 1888 1889 s.startBlock(bResult) 1890 return s.variable(n, types.Types[TBOOL]) 1891 case OCOMPLEX: 1892 r := s.expr(n.Left) 1893 i := s.expr(n.Right) 1894 return s.newValue2(ssa.OpComplexMake, n.Type, r, i) 1895 1896 // unary ops 1897 case OMINUS: 1898 a := s.expr(n.Left) 1899 if n.Type.IsComplex() { 1900 tp := floatForComplex(n.Type) 1901 negop := s.ssaOp(n.Op, tp) 1902 return s.newValue2(ssa.OpComplexMake, n.Type, 1903 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)), 1904 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a))) 1905 } 1906 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 1907 case ONOT, OCOM: 1908 a := s.expr(n.Left) 1909 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 1910 case OIMAG, OREAL: 1911 a := s.expr(n.Left) 1912 return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a) 1913 case OPLUS: 1914 return s.expr(n.Left) 1915 1916 case OADDR: 1917 return s.addr(n.Left, n.Bounded()) 1918 1919 case OINDREGSP: 1920 addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset) 1921 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1922 1923 case OIND: 1924 p := s.exprPtr(n.Left, false, n.Pos) 1925 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1926 1927 case ODOT: 1928 t := n.Left.Type 1929 if canSSAType(t) { 1930 v := s.expr(n.Left) 1931 return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v) 1932 } 1933 if n.Left.Op == OSTRUCTLIT { 1934 // All literals with nonzero fields have already been 1935 // rewritten during walk. Any that remain are just T{} 1936 // or equivalents. Use the zero value. 1937 if !iszero(n.Left) { 1938 Fatalf("literal with nonzero value in SSA: %v", n.Left) 1939 } 1940 return s.zeroVal(n.Type) 1941 } 1942 p := s.addr(n, false) 1943 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1944 1945 case ODOTPTR: 1946 p := s.exprPtr(n.Left, false, n.Pos) 1947 p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type), n.Xoffset, p) 1948 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1949 1950 case OINDEX: 1951 switch { 1952 case n.Left.Type.IsString(): 1953 if n.Bounded() && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) { 1954 // Replace "abc"[1] with 'b'. 1955 // Delayed until now because "abc"[1] is not an ideal constant. 1956 // See test/fixedbugs/issue11370.go. 1957 return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(n.Left.Val().U.(string)[n.Right.Int64()]))) 1958 } 1959 a := s.expr(n.Left) 1960 i := s.expr(n.Right) 1961 i = s.extendIndex(i, panicindex) 1962 if !n.Bounded() { 1963 len := s.newValue1(ssa.OpStringLen, types.Types[TINT], a) 1964 s.boundsCheck(i, len) 1965 } 1966 ptrtyp := s.f.Config.Types.BytePtr 1967 ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a) 1968 if Isconst(n.Right, CTINT) { 1969 ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr) 1970 } else { 1971 ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i) 1972 } 1973 return s.newValue2(ssa.OpLoad, types.Types[TUINT8], ptr, s.mem()) 1974 case n.Left.Type.IsSlice(): 1975 p := s.addr(n, false) 1976 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 1977 case n.Left.Type.IsArray(): 1978 if bound := n.Left.Type.NumElem(); bound <= 1 { 1979 // SSA can handle arrays of length at most 1. 1980 a := s.expr(n.Left) 1981 i := s.expr(n.Right) 1982 if bound == 0 { 1983 // Bounds check will never succeed. Might as well 1984 // use constants for the bounds check. 1985 z := s.constInt(types.Types[TINT], 0) 1986 s.boundsCheck(z, z) 1987 // The return value won't be live, return junk. 1988 return s.newValue0(ssa.OpUnknown, n.Type) 1989 } 1990 i = s.extendIndex(i, panicindex) 1991 if !n.Bounded() { 1992 s.boundsCheck(i, s.constInt(types.Types[TINT], bound)) 1993 } 1994 return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a) 1995 } 1996 p := s.addr(n, false) 1997 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 1998 default: 1999 s.Fatalf("bad type for index %v", n.Left.Type) 2000 return nil 2001 } 2002 2003 case OLEN, OCAP: 2004 switch { 2005 case n.Left.Type.IsSlice(): 2006 op := ssa.OpSliceLen 2007 if n.Op == OCAP { 2008 op = ssa.OpSliceCap 2009 } 2010 return s.newValue1(op, types.Types[TINT], s.expr(n.Left)) 2011 case n.Left.Type.IsString(): // string; not reachable for OCAP 2012 return s.newValue1(ssa.OpStringLen, types.Types[TINT], s.expr(n.Left)) 2013 case n.Left.Type.IsMap(), n.Left.Type.IsChan(): 2014 return s.referenceTypeBuiltin(n, s.expr(n.Left)) 2015 default: // array 2016 return s.constInt(types.Types[TINT], n.Left.Type.NumElem()) 2017 } 2018 2019 case OSPTR: 2020 a := s.expr(n.Left) 2021 if n.Left.Type.IsSlice() { 2022 return s.newValue1(ssa.OpSlicePtr, n.Type, a) 2023 } else { 2024 return s.newValue1(ssa.OpStringPtr, n.Type, a) 2025 } 2026 2027 case OITAB: 2028 a := s.expr(n.Left) 2029 return s.newValue1(ssa.OpITab, n.Type, a) 2030 2031 case OIDATA: 2032 a := s.expr(n.Left) 2033 return s.newValue1(ssa.OpIData, n.Type, a) 2034 2035 case OEFACE: 2036 tab := s.expr(n.Left) 2037 data := s.expr(n.Right) 2038 return s.newValue2(ssa.OpIMake, n.Type, tab, data) 2039 2040 case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR: 2041 v := s.expr(n.Left) 2042 var i, j, k *ssa.Value 2043 low, high, max := n.SliceBounds() 2044 if low != nil { 2045 i = s.extendIndex(s.expr(low), panicslice) 2046 } 2047 if high != nil { 2048 j = s.extendIndex(s.expr(high), panicslice) 2049 } 2050 if max != nil { 2051 k = s.extendIndex(s.expr(max), panicslice) 2052 } 2053 p, l, c := s.slice(n.Left.Type, v, i, j, k) 2054 return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c) 2055 2056 case OSLICESTR: 2057 v := s.expr(n.Left) 2058 var i, j *ssa.Value 2059 low, high, _ := n.SliceBounds() 2060 if low != nil { 2061 i = s.extendIndex(s.expr(low), panicslice) 2062 } 2063 if high != nil { 2064 j = s.extendIndex(s.expr(high), panicslice) 2065 } 2066 p, l, _ := s.slice(n.Left.Type, v, i, j, nil) 2067 return s.newValue2(ssa.OpStringMake, n.Type, p, l) 2068 2069 case OCALLFUNC: 2070 if isIntrinsicCall(n) { 2071 return s.intrinsicCall(n) 2072 } 2073 fallthrough 2074 2075 case OCALLINTER, OCALLMETH: 2076 a := s.call(n, callNormal) 2077 return s.newValue2(ssa.OpLoad, n.Type, a, s.mem()) 2078 2079 case OGETG: 2080 return s.newValue1(ssa.OpGetG, n.Type, s.mem()) 2081 2082 case OAPPEND: 2083 return s.append(n, false) 2084 2085 case OSTRUCTLIT, OARRAYLIT: 2086 // All literals with nonzero fields have already been 2087 // rewritten during walk. Any that remain are just T{} 2088 // or equivalents. Use the zero value. 2089 if !iszero(n) { 2090 Fatalf("literal with nonzero value in SSA: %v", n) 2091 } 2092 return s.zeroVal(n.Type) 2093 2094 default: 2095 s.Fatalf("unhandled expr %v", n.Op) 2096 return nil 2097 } 2098 } 2099 2100 // append converts an OAPPEND node to SSA. 2101 // If inplace is false, it converts the OAPPEND expression n to an ssa.Value, 2102 // adds it to s, and returns the Value. 2103 // If inplace is true, it writes the result of the OAPPEND expression n 2104 // back to the slice being appended to, and returns nil. 2105 // inplace MUST be set to false if the slice can be SSA'd. 2106 func (s *state) append(n *Node, inplace bool) *ssa.Value { 2107 // If inplace is false, process as expression "append(s, e1, e2, e3)": 2108 // 2109 // ptr, len, cap := s 2110 // newlen := len + 3 2111 // if newlen > cap { 2112 // ptr, len, cap = growslice(s, newlen) 2113 // newlen = len + 3 // recalculate to avoid a spill 2114 // } 2115 // // with write barriers, if needed: 2116 // *(ptr+len) = e1 2117 // *(ptr+len+1) = e2 2118 // *(ptr+len+2) = e3 2119 // return makeslice(ptr, newlen, cap) 2120 // 2121 // 2122 // If inplace is true, process as statement "s = append(s, e1, e2, e3)": 2123 // 2124 // a := &s 2125 // ptr, len, cap := s 2126 // newlen := len + 3 2127 // if newlen > cap { 2128 // newptr, len, newcap = growslice(ptr, len, cap, newlen) 2129 // vardef(a) // if necessary, advise liveness we are writing a new a 2130 // *a.cap = newcap // write before ptr to avoid a spill 2131 // *a.ptr = newptr // with write barrier 2132 // } 2133 // newlen = len + 3 // recalculate to avoid a spill 2134 // *a.len = newlen 2135 // // with write barriers, if needed: 2136 // *(ptr+len) = e1 2137 // *(ptr+len+1) = e2 2138 // *(ptr+len+2) = e3 2139 2140 et := n.Type.Elem() 2141 pt := types.NewPtr(et) 2142 2143 // Evaluate slice 2144 sn := n.List.First() // the slice node is the first in the list 2145 2146 var slice, addr *ssa.Value 2147 if inplace { 2148 addr = s.addr(sn, false) 2149 slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 2150 } else { 2151 slice = s.expr(sn) 2152 } 2153 2154 // Allocate new blocks 2155 grow := s.f.NewBlock(ssa.BlockPlain) 2156 assign := s.f.NewBlock(ssa.BlockPlain) 2157 2158 // Decide if we need to grow 2159 nargs := int64(n.List.Len() - 1) 2160 p := s.newValue1(ssa.OpSlicePtr, pt, slice) 2161 l := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) 2162 c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice) 2163 nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs)) 2164 2165 cmp := s.newValue2(s.ssaOp(OGT, types.Types[TINT]), types.Types[TBOOL], nl, c) 2166 s.vars[&ptrVar] = p 2167 2168 if !inplace { 2169 s.vars[&newlenVar] = nl 2170 s.vars[&capVar] = c 2171 } else { 2172 s.vars[&lenVar] = l 2173 } 2174 2175 b := s.endBlock() 2176 b.Kind = ssa.BlockIf 2177 b.Likely = ssa.BranchUnlikely 2178 b.SetControl(cmp) 2179 b.AddEdgeTo(grow) 2180 b.AddEdgeTo(assign) 2181 2182 // Call growslice 2183 s.startBlock(grow) 2184 taddr := s.expr(n.Left) 2185 r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[TINT], types.Types[TINT]}, taddr, p, l, c, nl) 2186 2187 if inplace { 2188 if sn.Op == ONAME { 2189 // Tell liveness we're about to build a new slice 2190 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, sn, s.mem()) 2191 } 2192 capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_cap), addr) 2193 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TINT], capaddr, r[2], s.mem()) 2194 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, pt, addr, r[0], s.mem()) 2195 // load the value we just stored to avoid having to spill it 2196 s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem()) 2197 s.vars[&lenVar] = r[1] // avoid a spill in the fast path 2198 } else { 2199 s.vars[&ptrVar] = r[0] 2200 s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs)) 2201 s.vars[&capVar] = r[2] 2202 } 2203 2204 b = s.endBlock() 2205 b.AddEdgeTo(assign) 2206 2207 // assign new elements to slots 2208 s.startBlock(assign) 2209 2210 if inplace { 2211 l = s.variable(&lenVar, types.Types[TINT]) // generates phi for len 2212 nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs)) 2213 lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_nel), addr) 2214 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TINT], lenaddr, nl, s.mem()) 2215 } 2216 2217 // Evaluate args 2218 type argRec struct { 2219 // if store is true, we're appending the value v. If false, we're appending the 2220 // value at *v. 2221 v *ssa.Value 2222 store bool 2223 } 2224 args := make([]argRec, 0, nargs) 2225 for _, n := range n.List.Slice()[1:] { 2226 if canSSAType(n.Type) { 2227 args = append(args, argRec{v: s.expr(n), store: true}) 2228 } else { 2229 v := s.addr(n, false) 2230 args = append(args, argRec{v: v}) 2231 } 2232 } 2233 2234 p = s.variable(&ptrVar, pt) // generates phi for ptr 2235 if !inplace { 2236 nl = s.variable(&newlenVar, types.Types[TINT]) // generates phi for nl 2237 c = s.variable(&capVar, types.Types[TINT]) // generates phi for cap 2238 } 2239 p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l) 2240 for i, arg := range args { 2241 addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[TINT], int64(i))) 2242 if arg.store { 2243 s.storeType(et, addr, arg.v, 0) 2244 } else { 2245 store := s.newValue3I(ssa.OpMove, ssa.TypeMem, et.Size(), addr, arg.v, s.mem()) 2246 store.Aux = et 2247 s.vars[&memVar] = store 2248 } 2249 } 2250 2251 delete(s.vars, &ptrVar) 2252 if inplace { 2253 delete(s.vars, &lenVar) 2254 return nil 2255 } 2256 delete(s.vars, &newlenVar) 2257 delete(s.vars, &capVar) 2258 // make result 2259 return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c) 2260 } 2261 2262 // condBranch evaluates the boolean expression cond and branches to yes 2263 // if cond is true and no if cond is false. 2264 // This function is intended to handle && and || better than just calling 2265 // s.expr(cond) and branching on the result. 2266 func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) { 2267 if cond.Op == OANDAND { 2268 mid := s.f.NewBlock(ssa.BlockPlain) 2269 s.stmtList(cond.Ninit) 2270 s.condBranch(cond.Left, mid, no, max8(likely, 0)) 2271 s.startBlock(mid) 2272 s.condBranch(cond.Right, yes, no, likely) 2273 return 2274 // Note: if likely==1, then both recursive calls pass 1. 2275 // If likely==-1, then we don't have enough information to decide 2276 // whether the first branch is likely or not. So we pass 0 for 2277 // the likeliness of the first branch. 2278 // TODO: have the frontend give us branch prediction hints for 2279 // OANDAND and OOROR nodes (if it ever has such info). 2280 } 2281 if cond.Op == OOROR { 2282 mid := s.f.NewBlock(ssa.BlockPlain) 2283 s.stmtList(cond.Ninit) 2284 s.condBranch(cond.Left, yes, mid, min8(likely, 0)) 2285 s.startBlock(mid) 2286 s.condBranch(cond.Right, yes, no, likely) 2287 return 2288 // Note: if likely==-1, then both recursive calls pass -1. 2289 // If likely==1, then we don't have enough info to decide 2290 // the likelihood of the first branch. 2291 } 2292 if cond.Op == ONOT { 2293 s.stmtList(cond.Ninit) 2294 s.condBranch(cond.Left, no, yes, -likely) 2295 return 2296 } 2297 c := s.expr(cond) 2298 b := s.endBlock() 2299 b.Kind = ssa.BlockIf 2300 b.SetControl(c) 2301 b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness 2302 b.AddEdgeTo(yes) 2303 b.AddEdgeTo(no) 2304 } 2305 2306 type skipMask uint8 2307 2308 const ( 2309 skipPtr skipMask = 1 << iota 2310 skipLen 2311 skipCap 2312 ) 2313 2314 // assign does left = right. 2315 // Right has already been evaluated to ssa, left has not. 2316 // If deref is true, then we do left = *right instead (and right has already been nil-checked). 2317 // If deref is true and right == nil, just do left = 0. 2318 // skip indicates assignments (at the top level) that can be avoided. 2319 func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) { 2320 if left.Op == ONAME && isblank(left) { 2321 return 2322 } 2323 t := left.Type 2324 dowidth(t) 2325 if s.canSSA(left) { 2326 if deref { 2327 s.Fatalf("can SSA LHS %v but not RHS %s", left, right) 2328 } 2329 if left.Op == ODOT { 2330 // We're assigning to a field of an ssa-able value. 2331 // We need to build a new structure with the new value for the 2332 // field we're assigning and the old values for the other fields. 2333 // For instance: 2334 // type T struct {a, b, c int} 2335 // var T x 2336 // x.b = 5 2337 // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c} 2338 2339 // Grab information about the structure type. 2340 t := left.Left.Type 2341 nf := t.NumFields() 2342 idx := fieldIdx(left) 2343 2344 // Grab old value of structure. 2345 old := s.expr(left.Left) 2346 2347 // Make new structure. 2348 new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t) 2349 2350 // Add fields as args. 2351 for i := 0; i < nf; i++ { 2352 if i == idx { 2353 new.AddArg(right) 2354 } else { 2355 new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old)) 2356 } 2357 } 2358 2359 // Recursively assign the new value we've made to the base of the dot op. 2360 s.assign(left.Left, new, false, 0) 2361 // TODO: do we need to update named values here? 2362 return 2363 } 2364 if left.Op == OINDEX && left.Left.Type.IsArray() { 2365 // We're assigning to an element of an ssa-able array. 2366 // a[i] = v 2367 t := left.Left.Type 2368 n := t.NumElem() 2369 2370 i := s.expr(left.Right) // index 2371 if n == 0 { 2372 // The bounds check must fail. Might as well 2373 // ignore the actual index and just use zeros. 2374 z := s.constInt(types.Types[TINT], 0) 2375 s.boundsCheck(z, z) 2376 return 2377 } 2378 if n != 1 { 2379 s.Fatalf("assigning to non-1-length array") 2380 } 2381 // Rewrite to a = [1]{v} 2382 i = s.extendIndex(i, panicindex) 2383 s.boundsCheck(i, s.constInt(types.Types[TINT], 1)) 2384 v := s.newValue1(ssa.OpArrayMake1, t, right) 2385 s.assign(left.Left, v, false, 0) 2386 return 2387 } 2388 // Update variable assignment. 2389 s.vars[left] = right 2390 s.addNamedValue(left, right) 2391 return 2392 } 2393 // Left is not ssa-able. Compute its address. 2394 addr := s.addr(left, false) 2395 if left.Op == ONAME && skip == 0 { 2396 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, left, s.mem()) 2397 } 2398 if isReflectHeaderDataField(left) { 2399 // Package unsafe's documentation says storing pointers into 2400 // reflect.SliceHeader and reflect.StringHeader's Data fields 2401 // is valid, even though they have type uintptr (#19168). 2402 // Mark it pointer type to signal the writebarrier pass to 2403 // insert a write barrier. 2404 t = types.Types[TUNSAFEPTR] 2405 } 2406 if deref { 2407 // Treat as a mem->mem move. 2408 var store *ssa.Value 2409 if right == nil { 2410 store = s.newValue2I(ssa.OpZero, ssa.TypeMem, t.Size(), addr, s.mem()) 2411 } else { 2412 store = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), addr, right, s.mem()) 2413 } 2414 store.Aux = t 2415 s.vars[&memVar] = store 2416 return 2417 } 2418 // Treat as a store. 2419 s.storeType(t, addr, right, skip) 2420 } 2421 2422 // zeroVal returns the zero value for type t. 2423 func (s *state) zeroVal(t *types.Type) *ssa.Value { 2424 switch { 2425 case t.IsInteger(): 2426 switch t.Size() { 2427 case 1: 2428 return s.constInt8(t, 0) 2429 case 2: 2430 return s.constInt16(t, 0) 2431 case 4: 2432 return s.constInt32(t, 0) 2433 case 8: 2434 return s.constInt64(t, 0) 2435 default: 2436 s.Fatalf("bad sized integer type %v", t) 2437 } 2438 case t.IsFloat(): 2439 switch t.Size() { 2440 case 4: 2441 return s.constFloat32(t, 0) 2442 case 8: 2443 return s.constFloat64(t, 0) 2444 default: 2445 s.Fatalf("bad sized float type %v", t) 2446 } 2447 case t.IsComplex(): 2448 switch t.Size() { 2449 case 8: 2450 z := s.constFloat32(types.Types[TFLOAT32], 0) 2451 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2452 case 16: 2453 z := s.constFloat64(types.Types[TFLOAT64], 0) 2454 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2455 default: 2456 s.Fatalf("bad sized complex type %v", t) 2457 } 2458 2459 case t.IsString(): 2460 return s.constEmptyString(t) 2461 case t.IsPtrShaped(): 2462 return s.constNil(t) 2463 case t.IsBoolean(): 2464 return s.constBool(false) 2465 case t.IsInterface(): 2466 return s.constInterface(t) 2467 case t.IsSlice(): 2468 return s.constSlice(t) 2469 case t.IsStruct(): 2470 n := t.NumFields() 2471 v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t) 2472 for i := 0; i < n; i++ { 2473 v.AddArg(s.zeroVal(t.FieldType(i).(*types.Type))) 2474 } 2475 return v 2476 case t.IsArray(): 2477 switch t.NumElem() { 2478 case 0: 2479 return s.entryNewValue0(ssa.OpArrayMake0, t) 2480 case 1: 2481 return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem())) 2482 } 2483 } 2484 s.Fatalf("zero for type %v not implemented", t) 2485 return nil 2486 } 2487 2488 type callKind int8 2489 2490 const ( 2491 callNormal callKind = iota 2492 callDefer 2493 callGo 2494 ) 2495 2496 var intrinsics map[intrinsicKey]intrinsicBuilder 2497 2498 // An intrinsicBuilder converts a call node n into an ssa value that 2499 // implements that call as an intrinsic. args is a list of arguments to the func. 2500 type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value 2501 2502 type intrinsicKey struct { 2503 arch *sys.Arch 2504 pkg string 2505 fn string 2506 } 2507 2508 func init() { 2509 intrinsics = map[intrinsicKey]intrinsicBuilder{} 2510 2511 var all []*sys.Arch 2512 var p4 []*sys.Arch 2513 var p8 []*sys.Arch 2514 for _, a := range sys.Archs { 2515 all = append(all, a) 2516 if a.PtrSize == 4 { 2517 p4 = append(p4, a) 2518 } else { 2519 p8 = append(p8, a) 2520 } 2521 } 2522 2523 // add adds the intrinsic b for pkg.fn for the given list of architectures. 2524 add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) { 2525 for _, a := range archs { 2526 intrinsics[intrinsicKey{a, pkg, fn}] = b 2527 } 2528 } 2529 // addF does the same as add but operates on architecture families. 2530 addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) { 2531 m := 0 2532 for _, f := range archFamilies { 2533 if f >= 32 { 2534 panic("too many architecture families") 2535 } 2536 m |= 1 << uint(f) 2537 } 2538 for _, a := range all { 2539 if m>>uint(a.Family)&1 != 0 { 2540 intrinsics[intrinsicKey{a, pkg, fn}] = b 2541 } 2542 } 2543 } 2544 // alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists. 2545 alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) { 2546 for _, a := range archs { 2547 if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok { 2548 intrinsics[intrinsicKey{a, pkg, fn}] = b 2549 } 2550 } 2551 } 2552 2553 /******** runtime ********/ 2554 if !instrumenting { 2555 add("runtime", "slicebytetostringtmp", 2556 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2557 // Compiler frontend optimizations emit OARRAYBYTESTRTMP nodes 2558 // for the backend instead of slicebytetostringtmp calls 2559 // when not instrumenting. 2560 slice := args[0] 2561 ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice) 2562 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) 2563 return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) 2564 }, 2565 all...) 2566 } 2567 add("runtime", "KeepAlive", 2568 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2569 data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0]) 2570 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, data, s.mem()) 2571 return nil 2572 }, 2573 all...) 2574 2575 /******** runtime/internal/sys ********/ 2576 addF("runtime/internal/sys", "Ctz32", 2577 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2578 return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0]) 2579 }, 2580 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) 2581 addF("runtime/internal/sys", "Ctz64", 2582 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2583 return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0]) 2584 }, 2585 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) 2586 addF("runtime/internal/sys", "Bswap32", 2587 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2588 return s.newValue1(ssa.OpBswap32, types.Types[TUINT32], args[0]) 2589 }, 2590 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X) 2591 addF("runtime/internal/sys", "Bswap64", 2592 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2593 return s.newValue1(ssa.OpBswap64, types.Types[TUINT64], args[0]) 2594 }, 2595 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X) 2596 2597 /******** runtime/internal/atomic ********/ 2598 addF("runtime/internal/atomic", "Load", 2599 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2600 v := s.newValue2(ssa.OpAtomicLoad32, ssa.MakeTuple(types.Types[TUINT32], ssa.TypeMem), args[0], s.mem()) 2601 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2602 return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) 2603 }, 2604 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) 2605 2606 addF("runtime/internal/atomic", "Load64", 2607 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2608 v := s.newValue2(ssa.OpAtomicLoad64, ssa.MakeTuple(types.Types[TUINT64], ssa.TypeMem), args[0], s.mem()) 2609 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2610 return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) 2611 }, 2612 sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64) 2613 addF("runtime/internal/atomic", "Loadp", 2614 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2615 v := s.newValue2(ssa.OpAtomicLoadPtr, ssa.MakeTuple(s.f.Config.Types.BytePtr, ssa.TypeMem), args[0], s.mem()) 2616 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2617 return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v) 2618 }, 2619 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) 2620 2621 addF("runtime/internal/atomic", "Store", 2622 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2623 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, ssa.TypeMem, args[0], args[1], s.mem()) 2624 return nil 2625 }, 2626 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) 2627 addF("runtime/internal/atomic", "Store64", 2628 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2629 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, ssa.TypeMem, args[0], args[1], s.mem()) 2630 return nil 2631 }, 2632 sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64) 2633 addF("runtime/internal/atomic", "StorepNoWB", 2634 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2635 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, ssa.TypeMem, args[0], args[1], s.mem()) 2636 return nil 2637 }, 2638 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS) 2639 2640 addF("runtime/internal/atomic", "Xchg", 2641 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2642 v := s.newValue3(ssa.OpAtomicExchange32, ssa.MakeTuple(types.Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem()) 2643 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2644 return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) 2645 }, 2646 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) 2647 addF("runtime/internal/atomic", "Xchg64", 2648 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2649 v := s.newValue3(ssa.OpAtomicExchange64, ssa.MakeTuple(types.Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem()) 2650 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2651 return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) 2652 }, 2653 sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64) 2654 2655 addF("runtime/internal/atomic", "Xadd", 2656 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2657 v := s.newValue3(ssa.OpAtomicAdd32, ssa.MakeTuple(types.Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem()) 2658 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2659 return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) 2660 }, 2661 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) 2662 addF("runtime/internal/atomic", "Xadd64", 2663 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2664 v := s.newValue3(ssa.OpAtomicAdd64, ssa.MakeTuple(types.Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem()) 2665 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2666 return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) 2667 }, 2668 sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64) 2669 2670 addF("runtime/internal/atomic", "Cas", 2671 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2672 v := s.newValue4(ssa.OpAtomicCompareAndSwap32, ssa.MakeTuple(types.Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem()) 2673 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2674 return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) 2675 }, 2676 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) 2677 addF("runtime/internal/atomic", "Cas64", 2678 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2679 v := s.newValue4(ssa.OpAtomicCompareAndSwap64, ssa.MakeTuple(types.Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem()) 2680 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v) 2681 return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) 2682 }, 2683 sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64) 2684 2685 addF("runtime/internal/atomic", "And8", 2686 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2687 s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, ssa.TypeMem, args[0], args[1], s.mem()) 2688 return nil 2689 }, 2690 sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64) 2691 addF("runtime/internal/atomic", "Or8", 2692 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2693 s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, ssa.TypeMem, args[0], args[1], s.mem()) 2694 return nil 2695 }, 2696 sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64) 2697 2698 alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...) 2699 alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...) 2700 alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...) 2701 alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...) 2702 alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...) 2703 alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...) 2704 alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...) 2705 alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...) 2706 alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...) 2707 alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...) 2708 alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...) 2709 alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...) 2710 alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...) 2711 alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...) 2712 alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...) 2713 alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...) 2714 2715 /******** math ********/ 2716 addF("math", "Sqrt", 2717 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2718 return s.newValue1(ssa.OpSqrt, types.Types[TFLOAT64], args[0]) 2719 }, 2720 sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X) 2721 2722 /******** math/bits ********/ 2723 addF("math/bits", "TrailingZeros64", 2724 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2725 return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0]) 2726 }, 2727 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) 2728 addF("math/bits", "TrailingZeros32", 2729 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2730 return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0]) 2731 }, 2732 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) 2733 addF("math/bits", "TrailingZeros16", 2734 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2735 x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0]) 2736 c := s.constInt32(types.Types[TUINT32], 1<<16) 2737 y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c) 2738 return s.newValue1(ssa.OpCtz32, types.Types[TINT], y) 2739 }, 2740 sys.ARM, sys.MIPS) 2741 addF("math/bits", "TrailingZeros16", 2742 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2743 x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0]) 2744 c := s.constInt64(types.Types[TUINT64], 1<<16) 2745 y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c) 2746 return s.newValue1(ssa.OpCtz64, types.Types[TINT], y) 2747 }, 2748 sys.AMD64, sys.ARM64, sys.S390X) 2749 addF("math/bits", "TrailingZeros8", 2750 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2751 x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0]) 2752 c := s.constInt32(types.Types[TUINT32], 1<<8) 2753 y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c) 2754 return s.newValue1(ssa.OpCtz32, types.Types[TINT], y) 2755 }, 2756 sys.ARM, sys.MIPS) 2757 addF("math/bits", "TrailingZeros8", 2758 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2759 x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0]) 2760 c := s.constInt64(types.Types[TUINT64], 1<<8) 2761 y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c) 2762 return s.newValue1(ssa.OpCtz64, types.Types[TINT], y) 2763 }, 2764 sys.AMD64, sys.ARM64, sys.S390X) 2765 alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...) 2766 alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...) 2767 // ReverseBytes inlines correctly, no need to intrinsify it. 2768 // ReverseBytes16 lowers to a rotate, no need for anything special here. 2769 addF("math/bits", "Len64", 2770 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2771 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0]) 2772 }, 2773 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) 2774 addF("math/bits", "Len32", 2775 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2776 if s.config.PtrSize == 4 { 2777 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0]) 2778 } 2779 x := s.newValue1(ssa.OpZeroExt32to64, types.Types[TUINT64], args[0]) 2780 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) 2781 }, 2782 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) 2783 addF("math/bits", "Len16", 2784 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2785 if s.config.PtrSize == 4 { 2786 x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0]) 2787 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x) 2788 } 2789 x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0]) 2790 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) 2791 }, 2792 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) 2793 // Note: disabled on AMD64 because the Go code is faster! 2794 addF("math/bits", "Len8", 2795 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2796 if s.config.PtrSize == 4 { 2797 x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0]) 2798 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x) 2799 } 2800 x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0]) 2801 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) 2802 }, 2803 sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) 2804 2805 addF("math/bits", "Len", 2806 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2807 if s.config.PtrSize == 4 { 2808 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0]) 2809 } 2810 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0]) 2811 }, 2812 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) 2813 // LeadingZeros is handled because it trivially calls Len. 2814 addF("math/bits", "Reverse64", 2815 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2816 return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0]) 2817 }, 2818 sys.ARM64) 2819 addF("math/bits", "Reverse32", 2820 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2821 return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0]) 2822 }, 2823 sys.ARM64) 2824 addF("math/bits", "Reverse16", 2825 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2826 return s.newValue1(ssa.OpBitRev16, types.Types[TINT], args[0]) 2827 }, 2828 sys.ARM64) 2829 addF("math/bits", "Reverse8", 2830 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2831 return s.newValue1(ssa.OpBitRev8, types.Types[TINT], args[0]) 2832 }, 2833 sys.ARM64) 2834 addF("math/bits", "Reverse", 2835 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2836 if s.config.PtrSize == 4 { 2837 return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0]) 2838 } 2839 return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0]) 2840 }, 2841 sys.ARM64) 2842 makeOnesCount := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2843 return func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2844 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: syslook("support_popcnt").Sym.Linksym()}) 2845 addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), aux, s.sb) 2846 v := s.newValue2(ssa.OpLoad, types.Types[TBOOL], addr, s.mem()) 2847 b := s.endBlock() 2848 b.Kind = ssa.BlockIf 2849 b.SetControl(v) 2850 bTrue := s.f.NewBlock(ssa.BlockPlain) 2851 bFalse := s.f.NewBlock(ssa.BlockPlain) 2852 bEnd := s.f.NewBlock(ssa.BlockPlain) 2853 b.AddEdgeTo(bTrue) 2854 b.AddEdgeTo(bFalse) 2855 b.Likely = ssa.BranchLikely // most machines have popcnt nowadays 2856 2857 // We have the intrinsic - use it directly. 2858 s.startBlock(bTrue) 2859 op := op64 2860 if s.config.PtrSize == 4 { 2861 op = op32 2862 } 2863 s.vars[n] = s.newValue1(op, types.Types[TINT], args[0]) 2864 s.endBlock().AddEdgeTo(bEnd) 2865 2866 // Call the pure Go version. 2867 s.startBlock(bFalse) 2868 a := s.call(n, callNormal) 2869 s.vars[n] = s.newValue2(ssa.OpLoad, types.Types[TINT], a, s.mem()) 2870 s.endBlock().AddEdgeTo(bEnd) 2871 2872 // Merge results. 2873 s.startBlock(bEnd) 2874 return s.variable(n, types.Types[TINT]) 2875 } 2876 } 2877 addF("math/bits", "OnesCount64", 2878 makeOnesCount(ssa.OpPopCount64, ssa.OpPopCount64), 2879 sys.AMD64) 2880 addF("math/bits", "OnesCount32", 2881 makeOnesCount(ssa.OpPopCount32, ssa.OpPopCount32), 2882 sys.AMD64) 2883 addF("math/bits", "OnesCount16", 2884 makeOnesCount(ssa.OpPopCount16, ssa.OpPopCount16), 2885 sys.AMD64) 2886 // Note: no OnesCount8, the Go implementation is faster - just a table load. 2887 addF("math/bits", "OnesCount", 2888 makeOnesCount(ssa.OpPopCount64, ssa.OpPopCount32), 2889 sys.AMD64) 2890 2891 /******** sync/atomic ********/ 2892 2893 // Note: these are disabled by flag_race in findIntrinsic below. 2894 alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...) 2895 alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...) 2896 alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...) 2897 alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...) 2898 alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...) 2899 alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...) 2900 alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...) 2901 2902 alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...) 2903 alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...) 2904 // Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap. 2905 alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...) 2906 alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...) 2907 alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...) 2908 alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...) 2909 2910 alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...) 2911 alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...) 2912 alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...) 2913 alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...) 2914 alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...) 2915 alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...) 2916 2917 alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...) 2918 alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...) 2919 alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...) 2920 alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...) 2921 alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...) 2922 alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...) 2923 2924 alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...) 2925 alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...) 2926 alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...) 2927 alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...) 2928 alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...) 2929 alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...) 2930 2931 /******** math/big ********/ 2932 add("math/big", "mulWW", 2933 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2934 return s.newValue2(ssa.OpMul64uhilo, ssa.MakeTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1]) 2935 }, 2936 sys.ArchAMD64) 2937 add("math/big", "divWW", 2938 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2939 return s.newValue3(ssa.OpDiv128u, ssa.MakeTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2]) 2940 }, 2941 sys.ArchAMD64) 2942 } 2943 2944 // findIntrinsic returns a function which builds the SSA equivalent of the 2945 // function identified by the symbol sym. If sym is not an intrinsic call, returns nil. 2946 func findIntrinsic(sym *types.Sym) intrinsicBuilder { 2947 if ssa.IntrinsicsDisable { 2948 return nil 2949 } 2950 if sym == nil || sym.Pkg == nil { 2951 return nil 2952 } 2953 pkg := sym.Pkg.Path 2954 if sym.Pkg == localpkg { 2955 pkg = myimportpath 2956 } 2957 if flag_race && pkg == "sync/atomic" { 2958 // The race detector needs to be able to intercept these calls. 2959 // We can't intrinsify them. 2960 return nil 2961 } 2962 fn := sym.Name 2963 return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}] 2964 } 2965 2966 func isIntrinsicCall(n *Node) bool { 2967 if n == nil || n.Left == nil { 2968 return false 2969 } 2970 return findIntrinsic(n.Left.Sym) != nil 2971 } 2972 2973 // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation. 2974 func (s *state) intrinsicCall(n *Node) *ssa.Value { 2975 v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n)) 2976 if ssa.IntrinsicsDebug > 0 { 2977 x := v 2978 if x == nil { 2979 x = s.mem() 2980 } 2981 if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 { 2982 x = x.Args[0] 2983 } 2984 Warnl(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString()) 2985 } 2986 return v 2987 } 2988 2989 type callArg struct { 2990 offset int64 2991 v *ssa.Value 2992 } 2993 type byOffset []callArg 2994 2995 func (x byOffset) Len() int { return len(x) } 2996 func (x byOffset) Swap(i, j int) { x[i], x[j] = x[j], x[i] } 2997 func (x byOffset) Less(i, j int) bool { 2998 return x[i].offset < x[j].offset 2999 } 3000 3001 // intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them. 3002 func (s *state) intrinsicArgs(n *Node) []*ssa.Value { 3003 // This code is complicated because of how walk transforms calls. For a call node, 3004 // each entry in n.List is either an assignment to OINDREGSP which actually 3005 // stores an arg, or an assignment to a temporary which computes an arg 3006 // which is later assigned. 3007 // The args can also be out of order. 3008 // TODO: when walk goes away someday, this code can go away also. 3009 var args []callArg 3010 temps := map[*Node]*ssa.Value{} 3011 for _, a := range n.List.Slice() { 3012 if a.Op != OAS { 3013 s.Fatalf("non-assignment as a function argument %s", opnames[a.Op]) 3014 } 3015 l, r := a.Left, a.Right 3016 switch l.Op { 3017 case ONAME: 3018 // Evaluate and store to "temporary". 3019 // Walk ensures these temporaries are dead outside of n. 3020 temps[l] = s.expr(r) 3021 case OINDREGSP: 3022 // Store a value to an argument slot. 3023 var v *ssa.Value 3024 if x, ok := temps[r]; ok { 3025 // This is a previously computed temporary. 3026 v = x 3027 } else { 3028 // This is an explicit value; evaluate it. 3029 v = s.expr(r) 3030 } 3031 args = append(args, callArg{l.Xoffset, v}) 3032 default: 3033 s.Fatalf("function argument assignment target not allowed: %s", opnames[l.Op]) 3034 } 3035 } 3036 sort.Sort(byOffset(args)) 3037 res := make([]*ssa.Value, len(args)) 3038 for i, a := range args { 3039 res[i] = a.v 3040 } 3041 return res 3042 } 3043 3044 // Calls the function n using the specified call type. 3045 // Returns the address of the return value (or nil if none). 3046 func (s *state) call(n *Node, k callKind) *ssa.Value { 3047 var sym *types.Sym // target symbol (if static) 3048 var closure *ssa.Value // ptr to closure to run (if dynamic) 3049 var codeptr *ssa.Value // ptr to target code (if dynamic) 3050 var rcvr *ssa.Value // receiver to set 3051 fn := n.Left 3052 switch n.Op { 3053 case OCALLFUNC: 3054 if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC { 3055 sym = fn.Sym 3056 break 3057 } 3058 closure = s.expr(fn) 3059 case OCALLMETH: 3060 if fn.Op != ODOTMETH { 3061 Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) 3062 } 3063 if k == callNormal { 3064 sym = fn.Sym 3065 break 3066 } 3067 // Make a name n2 for the function. 3068 // fn.Sym might be sync.(*Mutex).Unlock. 3069 // Make a PFUNC node out of that, then evaluate it. 3070 // We get back an SSA value representing &sync.(*Mutex).Unlock·f. 3071 // We can then pass that to defer or go. 3072 n2 := newnamel(fn.Pos, fn.Sym) 3073 n2.Name.Curfn = s.curfn 3074 n2.SetClass(PFUNC) 3075 n2.Pos = fn.Pos 3076 n2.Type = types.Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it. 3077 closure = s.expr(n2) 3078 // Note: receiver is already assigned in n.List, so we don't 3079 // want to set it here. 3080 case OCALLINTER: 3081 if fn.Op != ODOTINTER { 3082 Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op) 3083 } 3084 i := s.expr(fn.Left) 3085 itab := s.newValue1(ssa.OpITab, types.Types[TUINTPTR], i) 3086 if k != callNormal { 3087 s.nilCheck(itab) 3088 } 3089 itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab 3090 itab = s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab) 3091 if k == callNormal { 3092 codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], itab, s.mem()) 3093 } else { 3094 closure = itab 3095 } 3096 rcvr = s.newValue1(ssa.OpIData, types.Types[TUINTPTR], i) 3097 } 3098 dowidth(fn.Type) 3099 stksize := fn.Type.ArgWidth() // includes receiver 3100 3101 // Run all argument assignments. The arg slots have already 3102 // been offset by the appropriate amount (+2*widthptr for go/defer, 3103 // +widthptr for interface calls). 3104 // For OCALLMETH, the receiver is set in these statements. 3105 s.stmtList(n.List) 3106 3107 // Set receiver (for interface calls) 3108 if rcvr != nil { 3109 argStart := Ctxt.FixedFrameSize() 3110 if k != callNormal { 3111 argStart += int64(2 * Widthptr) 3112 } 3113 addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart) 3114 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TUINTPTR], addr, rcvr, s.mem()) 3115 } 3116 3117 // Defer/go args 3118 if k != callNormal { 3119 // Write argsize and closure (args to Newproc/Deferproc). 3120 argStart := Ctxt.FixedFrameSize() 3121 argsize := s.constInt32(types.Types[TUINT32], int32(stksize)) 3122 addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart) 3123 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TUINT32], addr, argsize, s.mem()) 3124 addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr)) 3125 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TUINTPTR], addr, closure, s.mem()) 3126 stksize += 2 * int64(Widthptr) 3127 } 3128 3129 // call target 3130 var call *ssa.Value 3131 switch { 3132 case k == callDefer: 3133 call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, Deferproc, s.mem()) 3134 case k == callGo: 3135 call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, Newproc, s.mem()) 3136 case closure != nil: 3137 codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], closure, s.mem()) 3138 call = s.newValue3(ssa.OpClosureCall, ssa.TypeMem, codeptr, closure, s.mem()) 3139 case codeptr != nil: 3140 call = s.newValue2(ssa.OpInterCall, ssa.TypeMem, codeptr, s.mem()) 3141 case sym != nil: 3142 call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, sym.Linksym(), s.mem()) 3143 default: 3144 Fatalf("bad call type %v %v", n.Op, n) 3145 } 3146 call.AuxInt = stksize // Call operations carry the argsize of the callee along with them 3147 s.vars[&memVar] = call 3148 3149 // Finish block for defers 3150 if k == callDefer { 3151 b := s.endBlock() 3152 b.Kind = ssa.BlockDefer 3153 b.SetControl(call) 3154 bNext := s.f.NewBlock(ssa.BlockPlain) 3155 b.AddEdgeTo(bNext) 3156 // Add recover edge to exit code. 3157 r := s.f.NewBlock(ssa.BlockPlain) 3158 s.startBlock(r) 3159 s.exit() 3160 b.AddEdgeTo(r) 3161 b.Likely = ssa.BranchLikely 3162 s.startBlock(bNext) 3163 } 3164 3165 res := n.Left.Type.Results() 3166 if res.NumFields() == 0 || k != callNormal { 3167 // call has no return value. Continue with the next statement. 3168 return nil 3169 } 3170 fp := res.Field(0) 3171 return s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize()) 3172 } 3173 3174 // etypesign returns the signed-ness of e, for integer/pointer etypes. 3175 // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer. 3176 func etypesign(e types.EType) int8 { 3177 switch e { 3178 case TINT8, TINT16, TINT32, TINT64, TINT: 3179 return -1 3180 case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR: 3181 return +1 3182 } 3183 return 0 3184 } 3185 3186 // lookupSymbol is used to retrieve the symbol (Extern, Arg or Auto) used for a particular node. 3187 // This improves the effectiveness of cse by using the same Aux values for the 3188 // same symbols. 3189 func (s *state) lookupSymbol(n *Node, sym interface{}) interface{} { 3190 switch sym.(type) { 3191 default: 3192 s.Fatalf("sym %v is of unknown type %T", sym, sym) 3193 case *ssa.ExternSymbol, *ssa.ArgSymbol, *ssa.AutoSymbol: 3194 // these are the only valid types 3195 } 3196 3197 if lsym, ok := s.varsyms[n]; ok { 3198 return lsym 3199 } 3200 s.varsyms[n] = sym 3201 return sym 3202 } 3203 3204 // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result. 3205 // The value that the returned Value represents is guaranteed to be non-nil. 3206 // If bounded is true then this address does not require a nil check for its operand 3207 // even if that would otherwise be implied. 3208 func (s *state) addr(n *Node, bounded bool) *ssa.Value { 3209 t := types.NewPtr(n.Type) 3210 switch n.Op { 3211 case ONAME: 3212 switch n.Class() { 3213 case PEXTERN: 3214 // global variable 3215 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: n.Sym.Linksym()}) 3216 v := s.entryNewValue1A(ssa.OpAddr, t, aux, s.sb) 3217 // TODO: Make OpAddr use AuxInt as well as Aux. 3218 if n.Xoffset != 0 { 3219 v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v) 3220 } 3221 return v 3222 case PPARAM: 3223 // parameter slot 3224 v := s.decladdrs[n] 3225 if v != nil { 3226 return v 3227 } 3228 if n == nodfp { 3229 // Special arg that points to the frame pointer (Used by ORECOVER). 3230 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n}) 3231 return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp) 3232 } 3233 s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs) 3234 return nil 3235 case PAUTO: 3236 aux := s.lookupSymbol(n, &ssa.AutoSymbol{Node: n}) 3237 return s.newValue1A(ssa.OpAddr, t, aux, s.sp) 3238 case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early. 3239 // ensure that we reuse symbols for out parameters so 3240 // that cse works on their addresses 3241 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n}) 3242 return s.newValue1A(ssa.OpAddr, t, aux, s.sp) 3243 default: 3244 s.Fatalf("variable address class %v not implemented", classnames[n.Class()]) 3245 return nil 3246 } 3247 case OINDREGSP: 3248 // indirect off REGSP 3249 // used for storing/loading arguments/returns to/from callees 3250 return s.constOffPtrSP(t, n.Xoffset) 3251 case OINDEX: 3252 if n.Left.Type.IsSlice() { 3253 a := s.expr(n.Left) 3254 i := s.expr(n.Right) 3255 i = s.extendIndex(i, panicindex) 3256 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], a) 3257 if !n.Bounded() { 3258 s.boundsCheck(i, len) 3259 } 3260 p := s.newValue1(ssa.OpSlicePtr, t, a) 3261 return s.newValue2(ssa.OpPtrIndex, t, p, i) 3262 } else { // array 3263 a := s.addr(n.Left, bounded) 3264 i := s.expr(n.Right) 3265 i = s.extendIndex(i, panicindex) 3266 len := s.constInt(types.Types[TINT], n.Left.Type.NumElem()) 3267 if !n.Bounded() { 3268 s.boundsCheck(i, len) 3269 } 3270 return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left.Type.Elem()), a, i) 3271 } 3272 case OIND: 3273 return s.exprPtr(n.Left, bounded, n.Pos) 3274 case ODOT: 3275 p := s.addr(n.Left, bounded) 3276 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p) 3277 case ODOTPTR: 3278 p := s.exprPtr(n.Left, bounded, n.Pos) 3279 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p) 3280 case OCLOSUREVAR: 3281 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, 3282 s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)) 3283 case OCONVNOP: 3284 addr := s.addr(n.Left, bounded) 3285 return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type 3286 case OCALLFUNC, OCALLINTER, OCALLMETH: 3287 return s.call(n, callNormal) 3288 case ODOTTYPE: 3289 v, _ := s.dottype(n, false) 3290 if v.Op != ssa.OpLoad { 3291 s.Fatalf("dottype of non-load") 3292 } 3293 if v.Args[1] != s.mem() { 3294 s.Fatalf("memory no longer live from dottype load") 3295 } 3296 return v.Args[0] 3297 default: 3298 s.Fatalf("unhandled addr %v", n.Op) 3299 return nil 3300 } 3301 } 3302 3303 // canSSA reports whether n is SSA-able. 3304 // n must be an ONAME (or an ODOT sequence with an ONAME base). 3305 func (s *state) canSSA(n *Node) bool { 3306 if Debug['N'] != 0 { 3307 return false 3308 } 3309 for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) { 3310 n = n.Left 3311 } 3312 if n.Op != ONAME { 3313 return false 3314 } 3315 if n.Addrtaken() { 3316 return false 3317 } 3318 if n.isParamHeapCopy() { 3319 return false 3320 } 3321 if n.Class() == PAUTOHEAP { 3322 Fatalf("canSSA of PAUTOHEAP %v", n) 3323 } 3324 switch n.Class() { 3325 case PEXTERN: 3326 return false 3327 case PPARAMOUT: 3328 if s.hasdefer { 3329 // TODO: handle this case? Named return values must be 3330 // in memory so that the deferred function can see them. 3331 // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false } 3332 // Or maybe not, see issue 18860. Even unnamed return values 3333 // must be written back so if a defer recovers, the caller can see them. 3334 return false 3335 } 3336 if s.cgoUnsafeArgs { 3337 // Cgo effectively takes the address of all result args, 3338 // but the compiler can't see that. 3339 return false 3340 } 3341 } 3342 if n.Class() == PPARAM && n.Sym != nil && n.Sym.Name == ".this" { 3343 // wrappers generated by genwrapper need to update 3344 // the .this pointer in place. 3345 // TODO: treat as a PPARMOUT? 3346 return false 3347 } 3348 return canSSAType(n.Type) 3349 // TODO: try to make more variables SSAable? 3350 } 3351 3352 // canSSA reports whether variables of type t are SSA-able. 3353 func canSSAType(t *types.Type) bool { 3354 dowidth(t) 3355 if t.Width > int64(4*Widthptr) { 3356 // 4*Widthptr is an arbitrary constant. We want it 3357 // to be at least 3*Widthptr so slices can be registerized. 3358 // Too big and we'll introduce too much register pressure. 3359 return false 3360 } 3361 switch t.Etype { 3362 case TARRAY: 3363 // We can't do larger arrays because dynamic indexing is 3364 // not supported on SSA variables. 3365 // TODO: allow if all indexes are constant. 3366 if t.NumElem() <= 1 { 3367 return canSSAType(t.Elem()) 3368 } 3369 return false 3370 case TSTRUCT: 3371 if t.NumFields() > ssa.MaxStruct { 3372 return false 3373 } 3374 for _, t1 := range t.Fields().Slice() { 3375 if !canSSAType(t1.Type) { 3376 return false 3377 } 3378 } 3379 return true 3380 default: 3381 return true 3382 } 3383 } 3384 3385 // exprPtr evaluates n to a pointer and nil-checks it. 3386 func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value { 3387 p := s.expr(n) 3388 if bounded || n.NonNil() { 3389 if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 { 3390 s.f.Warnl(lineno, "removed nil check") 3391 } 3392 return p 3393 } 3394 s.nilCheck(p) 3395 return p 3396 } 3397 3398 // nilCheck generates nil pointer checking code. 3399 // Used only for automatically inserted nil checks, 3400 // not for user code like 'x != nil'. 3401 func (s *state) nilCheck(ptr *ssa.Value) { 3402 if disable_checknil != 0 { 3403 return 3404 } 3405 s.newValue2(ssa.OpNilCheck, ssa.TypeVoid, ptr, s.mem()) 3406 } 3407 3408 // boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not. 3409 // Starts a new block on return. 3410 // idx is already converted to full int width. 3411 func (s *state) boundsCheck(idx, len *ssa.Value) { 3412 if Debug['B'] != 0 { 3413 return 3414 } 3415 3416 // bounds check 3417 cmp := s.newValue2(ssa.OpIsInBounds, types.Types[TBOOL], idx, len) 3418 s.check(cmp, panicindex) 3419 } 3420 3421 // sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not. 3422 // Starts a new block on return. 3423 // idx and len are already converted to full int width. 3424 func (s *state) sliceBoundsCheck(idx, len *ssa.Value) { 3425 if Debug['B'] != 0 { 3426 return 3427 } 3428 3429 // bounds check 3430 cmp := s.newValue2(ssa.OpIsSliceInBounds, types.Types[TBOOL], idx, len) 3431 s.check(cmp, panicslice) 3432 } 3433 3434 // If cmp (a bool) is false, panic using the given function. 3435 func (s *state) check(cmp *ssa.Value, fn *obj.LSym) { 3436 b := s.endBlock() 3437 b.Kind = ssa.BlockIf 3438 b.SetControl(cmp) 3439 b.Likely = ssa.BranchLikely 3440 bNext := s.f.NewBlock(ssa.BlockPlain) 3441 line := s.peekPos() 3442 bPanic := s.panics[funcLine{fn, line}] 3443 if bPanic == nil { 3444 bPanic = s.f.NewBlock(ssa.BlockPlain) 3445 s.panics[funcLine{fn, line}] = bPanic 3446 s.startBlock(bPanic) 3447 // The panic call takes/returns memory to ensure that the right 3448 // memory state is observed if the panic happens. 3449 s.rtcall(fn, false, nil) 3450 } 3451 b.AddEdgeTo(bNext) 3452 b.AddEdgeTo(bPanic) 3453 s.startBlock(bNext) 3454 } 3455 3456 func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value { 3457 needcheck := true 3458 switch b.Op { 3459 case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64: 3460 if b.AuxInt != 0 { 3461 needcheck = false 3462 } 3463 } 3464 if needcheck { 3465 // do a size-appropriate check for zero 3466 cmp := s.newValue2(s.ssaOp(ONE, n.Type), types.Types[TBOOL], b, s.zeroVal(n.Type)) 3467 s.check(cmp, panicdivide) 3468 } 3469 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 3470 } 3471 3472 // rtcall issues a call to the given runtime function fn with the listed args. 3473 // Returns a slice of results of the given result types. 3474 // The call is added to the end of the current block. 3475 // If returns is false, the block is marked as an exit block. 3476 func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value { 3477 // Write args to the stack 3478 off := Ctxt.FixedFrameSize() 3479 for _, arg := range args { 3480 t := arg.Type 3481 off = Rnd(off, t.Alignment()) 3482 ptr := s.constOffPtrSP(t.PtrTo(), off) 3483 size := t.Size() 3484 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, ptr, arg, s.mem()) 3485 off += size 3486 } 3487 off = Rnd(off, int64(Widthreg)) 3488 3489 // Issue call 3490 call := s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, fn, s.mem()) 3491 s.vars[&memVar] = call 3492 3493 if !returns { 3494 // Finish block 3495 b := s.endBlock() 3496 b.Kind = ssa.BlockExit 3497 b.SetControl(call) 3498 call.AuxInt = off - Ctxt.FixedFrameSize() 3499 if len(results) > 0 { 3500 Fatalf("panic call can't have results") 3501 } 3502 return nil 3503 } 3504 3505 // Load results 3506 res := make([]*ssa.Value, len(results)) 3507 for i, t := range results { 3508 off = Rnd(off, t.Alignment()) 3509 ptr := s.constOffPtrSP(types.NewPtr(t), off) 3510 res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem()) 3511 off += t.Size() 3512 } 3513 off = Rnd(off, int64(Widthptr)) 3514 3515 // Remember how much callee stack space we needed. 3516 call.AuxInt = off 3517 3518 return res 3519 } 3520 3521 // do *left = right for type t. 3522 func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask) { 3523 if skip == 0 && (!types.Haspointers(t) || ssa.IsStackAddr(left)) { 3524 // Known to not have write barrier. Store the whole type. 3525 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, left, right, s.mem()) 3526 return 3527 } 3528 3529 // store scalar fields first, so write barrier stores for 3530 // pointer fields can be grouped together, and scalar values 3531 // don't need to be live across the write barrier call. 3532 // TODO: if the writebarrier pass knows how to reorder stores, 3533 // we can do a single store here as long as skip==0. 3534 s.storeTypeScalars(t, left, right, skip) 3535 if skip&skipPtr == 0 && types.Haspointers(t) { 3536 s.storeTypePtrs(t, left, right) 3537 } 3538 } 3539 3540 // do *left = right for all scalar (non-pointer) parts of t. 3541 func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) { 3542 switch { 3543 case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex(): 3544 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, left, right, s.mem()) 3545 case t.IsPtrShaped(): 3546 // no scalar fields. 3547 case t.IsString(): 3548 if skip&skipLen != 0 { 3549 return 3550 } 3551 len := s.newValue1(ssa.OpStringLen, types.Types[TINT], right) 3552 lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left) 3553 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TINT], lenAddr, len, s.mem()) 3554 case t.IsSlice(): 3555 if skip&skipLen == 0 { 3556 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], right) 3557 lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left) 3558 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TINT], lenAddr, len, s.mem()) 3559 } 3560 if skip&skipCap == 0 { 3561 cap := s.newValue1(ssa.OpSliceCap, types.Types[TINT], right) 3562 capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left) 3563 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TINT], capAddr, cap, s.mem()) 3564 } 3565 case t.IsInterface(): 3566 // itab field doesn't need a write barrier (even though it is a pointer). 3567 itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right) 3568 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TUINTPTR], left, itab, s.mem()) 3569 case t.IsStruct(): 3570 n := t.NumFields() 3571 for i := 0; i < n; i++ { 3572 ft := t.FieldType(i) 3573 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3574 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3575 s.storeTypeScalars(ft.(*types.Type), addr, val, 0) 3576 } 3577 case t.IsArray() && t.NumElem() == 0: 3578 // nothing 3579 case t.IsArray() && t.NumElem() == 1: 3580 s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0) 3581 default: 3582 s.Fatalf("bad write barrier type %v", t) 3583 } 3584 } 3585 3586 // do *left = right for all pointer parts of t. 3587 func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) { 3588 switch { 3589 case t.IsPtrShaped(): 3590 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, left, right, s.mem()) 3591 case t.IsString(): 3592 ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right) 3593 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem()) 3594 case t.IsSlice(): 3595 ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, right) 3596 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem()) 3597 case t.IsInterface(): 3598 // itab field is treated as a scalar. 3599 idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right) 3600 idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left) 3601 s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, s.f.Config.Types.BytePtr, idataAddr, idata, s.mem()) 3602 case t.IsStruct(): 3603 n := t.NumFields() 3604 for i := 0; i < n; i++ { 3605 ft := t.FieldType(i) 3606 if !types.Haspointers(ft.(*types.Type)) { 3607 continue 3608 } 3609 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3610 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3611 s.storeTypePtrs(ft.(*types.Type), addr, val) 3612 } 3613 case t.IsArray() && t.NumElem() == 0: 3614 // nothing 3615 case t.IsArray() && t.NumElem() == 1: 3616 s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right)) 3617 default: 3618 s.Fatalf("bad write barrier type %v", t) 3619 } 3620 } 3621 3622 // slice computes the slice v[i:j:k] and returns ptr, len, and cap of result. 3623 // i,j,k may be nil, in which case they are set to their default value. 3624 // t is a slice, ptr to array, or string type. 3625 func (s *state) slice(t *types.Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) { 3626 var elemtype *types.Type 3627 var ptrtype *types.Type 3628 var ptr *ssa.Value 3629 var len *ssa.Value 3630 var cap *ssa.Value 3631 zero := s.constInt(types.Types[TINT], 0) 3632 switch { 3633 case t.IsSlice(): 3634 elemtype = t.Elem() 3635 ptrtype = types.NewPtr(elemtype) 3636 ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v) 3637 len = s.newValue1(ssa.OpSliceLen, types.Types[TINT], v) 3638 cap = s.newValue1(ssa.OpSliceCap, types.Types[TINT], v) 3639 case t.IsString(): 3640 elemtype = types.Types[TUINT8] 3641 ptrtype = types.NewPtr(elemtype) 3642 ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v) 3643 len = s.newValue1(ssa.OpStringLen, types.Types[TINT], v) 3644 cap = len 3645 case t.IsPtr(): 3646 if !t.Elem().IsArray() { 3647 s.Fatalf("bad ptr to array in slice %v\n", t) 3648 } 3649 elemtype = t.Elem().Elem() 3650 ptrtype = types.NewPtr(elemtype) 3651 s.nilCheck(v) 3652 ptr = v 3653 len = s.constInt(types.Types[TINT], t.Elem().NumElem()) 3654 cap = len 3655 default: 3656 s.Fatalf("bad type in slice %v\n", t) 3657 } 3658 3659 // Set default values 3660 if i == nil { 3661 i = zero 3662 } 3663 if j == nil { 3664 j = len 3665 } 3666 if k == nil { 3667 k = cap 3668 } 3669 3670 // Panic if slice indices are not in bounds. 3671 s.sliceBoundsCheck(i, j) 3672 if j != k { 3673 s.sliceBoundsCheck(j, k) 3674 } 3675 if k != cap { 3676 s.sliceBoundsCheck(k, cap) 3677 } 3678 3679 // Generate the following code assuming that indexes are in bounds. 3680 // The masking is to make sure that we don't generate a slice 3681 // that points to the next object in memory. 3682 // rlen = j - i 3683 // rcap = k - i 3684 // delta = i * elemsize 3685 // rptr = p + delta&mask(rcap) 3686 // result = (SliceMake rptr rlen rcap) 3687 // where mask(x) is 0 if x==0 and -1 if x>0. 3688 subOp := s.ssaOp(OSUB, types.Types[TINT]) 3689 mulOp := s.ssaOp(OMUL, types.Types[TINT]) 3690 andOp := s.ssaOp(OAND, types.Types[TINT]) 3691 rlen := s.newValue2(subOp, types.Types[TINT], j, i) 3692 var rcap *ssa.Value 3693 switch { 3694 case t.IsString(): 3695 // Capacity of the result is unimportant. However, we use 3696 // rcap to test if we've generated a zero-length slice. 3697 // Use length of strings for that. 3698 rcap = rlen 3699 case j == k: 3700 rcap = rlen 3701 default: 3702 rcap = s.newValue2(subOp, types.Types[TINT], k, i) 3703 } 3704 3705 var rptr *ssa.Value 3706 if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 { 3707 // No pointer arithmetic necessary. 3708 rptr = ptr 3709 } else { 3710 // delta = # of bytes to offset pointer by. 3711 delta := s.newValue2(mulOp, types.Types[TINT], i, s.constInt(types.Types[TINT], elemtype.Width)) 3712 // If we're slicing to the point where the capacity is zero, 3713 // zero out the delta. 3714 mask := s.newValue1(ssa.OpSlicemask, types.Types[TINT], rcap) 3715 delta = s.newValue2(andOp, types.Types[TINT], delta, mask) 3716 // Compute rptr = ptr + delta 3717 rptr = s.newValue2(ssa.OpAddPtr, ptrtype, ptr, delta) 3718 } 3719 3720 return rptr, rlen, rcap 3721 } 3722 3723 type u642fcvtTab struct { 3724 geq, cvt2F, and, rsh, or, add ssa.Op 3725 one func(*state, ssa.Type, int64) *ssa.Value 3726 } 3727 3728 var u64_f64 u642fcvtTab = u642fcvtTab{ 3729 geq: ssa.OpGeq64, 3730 cvt2F: ssa.OpCvt64to64F, 3731 and: ssa.OpAnd64, 3732 rsh: ssa.OpRsh64Ux64, 3733 or: ssa.OpOr64, 3734 add: ssa.OpAdd64F, 3735 one: (*state).constInt64, 3736 } 3737 3738 var u64_f32 u642fcvtTab = u642fcvtTab{ 3739 geq: ssa.OpGeq64, 3740 cvt2F: ssa.OpCvt64to32F, 3741 and: ssa.OpAnd64, 3742 rsh: ssa.OpRsh64Ux64, 3743 or: ssa.OpOr64, 3744 add: ssa.OpAdd32F, 3745 one: (*state).constInt64, 3746 } 3747 3748 func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3749 return s.uint64Tofloat(&u64_f64, n, x, ft, tt) 3750 } 3751 3752 func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3753 return s.uint64Tofloat(&u64_f32, n, x, ft, tt) 3754 } 3755 3756 func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3757 // if x >= 0 { 3758 // result = (floatY) x 3759 // } else { 3760 // y = uintX(x) ; y = x & 1 3761 // z = uintX(x) ; z = z >> 1 3762 // z = z >> 1 3763 // z = z | y 3764 // result = floatY(z) 3765 // result = result + result 3766 // } 3767 // 3768 // Code borrowed from old code generator. 3769 // What's going on: large 64-bit "unsigned" looks like 3770 // negative number to hardware's integer-to-float 3771 // conversion. However, because the mantissa is only 3772 // 63 bits, we don't need the LSB, so instead we do an 3773 // unsigned right shift (divide by two), convert, and 3774 // double. However, before we do that, we need to be 3775 // sure that we do not lose a "1" if that made the 3776 // difference in the resulting rounding. Therefore, we 3777 // preserve it, and OR (not ADD) it back in. The case 3778 // that matters is when the eleven discarded bits are 3779 // equal to 10000000001; that rounds up, and the 1 cannot 3780 // be lost else it would round down if the LSB of the 3781 // candidate mantissa is 0. 3782 cmp := s.newValue2(cvttab.geq, types.Types[TBOOL], x, s.zeroVal(ft)) 3783 b := s.endBlock() 3784 b.Kind = ssa.BlockIf 3785 b.SetControl(cmp) 3786 b.Likely = ssa.BranchLikely 3787 3788 bThen := s.f.NewBlock(ssa.BlockPlain) 3789 bElse := s.f.NewBlock(ssa.BlockPlain) 3790 bAfter := s.f.NewBlock(ssa.BlockPlain) 3791 3792 b.AddEdgeTo(bThen) 3793 s.startBlock(bThen) 3794 a0 := s.newValue1(cvttab.cvt2F, tt, x) 3795 s.vars[n] = a0 3796 s.endBlock() 3797 bThen.AddEdgeTo(bAfter) 3798 3799 b.AddEdgeTo(bElse) 3800 s.startBlock(bElse) 3801 one := cvttab.one(s, ft, 1) 3802 y := s.newValue2(cvttab.and, ft, x, one) 3803 z := s.newValue2(cvttab.rsh, ft, x, one) 3804 z = s.newValue2(cvttab.or, ft, z, y) 3805 a := s.newValue1(cvttab.cvt2F, tt, z) 3806 a1 := s.newValue2(cvttab.add, tt, a, a) 3807 s.vars[n] = a1 3808 s.endBlock() 3809 bElse.AddEdgeTo(bAfter) 3810 3811 s.startBlock(bAfter) 3812 return s.variable(n, n.Type) 3813 } 3814 3815 type u322fcvtTab struct { 3816 cvtI2F, cvtF2F ssa.Op 3817 } 3818 3819 var u32_f64 u322fcvtTab = u322fcvtTab{ 3820 cvtI2F: ssa.OpCvt32to64F, 3821 cvtF2F: ssa.OpCopy, 3822 } 3823 3824 var u32_f32 u322fcvtTab = u322fcvtTab{ 3825 cvtI2F: ssa.OpCvt32to32F, 3826 cvtF2F: ssa.OpCvt64Fto32F, 3827 } 3828 3829 func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3830 return s.uint32Tofloat(&u32_f64, n, x, ft, tt) 3831 } 3832 3833 func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3834 return s.uint32Tofloat(&u32_f32, n, x, ft, tt) 3835 } 3836 3837 func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3838 // if x >= 0 { 3839 // result = floatY(x) 3840 // } else { 3841 // result = floatY(float64(x) + (1<<32)) 3842 // } 3843 cmp := s.newValue2(ssa.OpGeq32, types.Types[TBOOL], x, s.zeroVal(ft)) 3844 b := s.endBlock() 3845 b.Kind = ssa.BlockIf 3846 b.SetControl(cmp) 3847 b.Likely = ssa.BranchLikely 3848 3849 bThen := s.f.NewBlock(ssa.BlockPlain) 3850 bElse := s.f.NewBlock(ssa.BlockPlain) 3851 bAfter := s.f.NewBlock(ssa.BlockPlain) 3852 3853 b.AddEdgeTo(bThen) 3854 s.startBlock(bThen) 3855 a0 := s.newValue1(cvttab.cvtI2F, tt, x) 3856 s.vars[n] = a0 3857 s.endBlock() 3858 bThen.AddEdgeTo(bAfter) 3859 3860 b.AddEdgeTo(bElse) 3861 s.startBlock(bElse) 3862 a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[TFLOAT64], x) 3863 twoToThe32 := s.constFloat64(types.Types[TFLOAT64], float64(1<<32)) 3864 a2 := s.newValue2(ssa.OpAdd64F, types.Types[TFLOAT64], a1, twoToThe32) 3865 a3 := s.newValue1(cvttab.cvtF2F, tt, a2) 3866 3867 s.vars[n] = a3 3868 s.endBlock() 3869 bElse.AddEdgeTo(bAfter) 3870 3871 s.startBlock(bAfter) 3872 return s.variable(n, n.Type) 3873 } 3874 3875 // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels. 3876 func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value { 3877 if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() { 3878 s.Fatalf("node must be a map or a channel") 3879 } 3880 // if n == nil { 3881 // return 0 3882 // } else { 3883 // // len 3884 // return *((*int)n) 3885 // // cap 3886 // return *(((*int)n)+1) 3887 // } 3888 lenType := n.Type 3889 nilValue := s.constNil(types.Types[TUINTPTR]) 3890 cmp := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], x, nilValue) 3891 b := s.endBlock() 3892 b.Kind = ssa.BlockIf 3893 b.SetControl(cmp) 3894 b.Likely = ssa.BranchUnlikely 3895 3896 bThen := s.f.NewBlock(ssa.BlockPlain) 3897 bElse := s.f.NewBlock(ssa.BlockPlain) 3898 bAfter := s.f.NewBlock(ssa.BlockPlain) 3899 3900 // length/capacity of a nil map/chan is zero 3901 b.AddEdgeTo(bThen) 3902 s.startBlock(bThen) 3903 s.vars[n] = s.zeroVal(lenType) 3904 s.endBlock() 3905 bThen.AddEdgeTo(bAfter) 3906 3907 b.AddEdgeTo(bElse) 3908 s.startBlock(bElse) 3909 if n.Op == OLEN { 3910 // length is stored in the first word for map/chan 3911 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem()) 3912 } else if n.Op == OCAP { 3913 // capacity is stored in the second word for chan 3914 sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x) 3915 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem()) 3916 } else { 3917 s.Fatalf("op must be OLEN or OCAP") 3918 } 3919 s.endBlock() 3920 bElse.AddEdgeTo(bAfter) 3921 3922 s.startBlock(bAfter) 3923 return s.variable(n, lenType) 3924 } 3925 3926 type f2uCvtTab struct { 3927 ltf, cvt2U, subf, or ssa.Op 3928 floatValue func(*state, ssa.Type, float64) *ssa.Value 3929 intValue func(*state, ssa.Type, int64) *ssa.Value 3930 cutoff uint64 3931 } 3932 3933 var f32_u64 f2uCvtTab = f2uCvtTab{ 3934 ltf: ssa.OpLess32F, 3935 cvt2U: ssa.OpCvt32Fto64, 3936 subf: ssa.OpSub32F, 3937 or: ssa.OpOr64, 3938 floatValue: (*state).constFloat32, 3939 intValue: (*state).constInt64, 3940 cutoff: 9223372036854775808, 3941 } 3942 3943 var f64_u64 f2uCvtTab = f2uCvtTab{ 3944 ltf: ssa.OpLess64F, 3945 cvt2U: ssa.OpCvt64Fto64, 3946 subf: ssa.OpSub64F, 3947 or: ssa.OpOr64, 3948 floatValue: (*state).constFloat64, 3949 intValue: (*state).constInt64, 3950 cutoff: 9223372036854775808, 3951 } 3952 3953 var f32_u32 f2uCvtTab = f2uCvtTab{ 3954 ltf: ssa.OpLess32F, 3955 cvt2U: ssa.OpCvt32Fto32, 3956 subf: ssa.OpSub32F, 3957 or: ssa.OpOr32, 3958 floatValue: (*state).constFloat32, 3959 intValue: func(s *state, t ssa.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) }, 3960 cutoff: 2147483648, 3961 } 3962 3963 var f64_u32 f2uCvtTab = f2uCvtTab{ 3964 ltf: ssa.OpLess64F, 3965 cvt2U: ssa.OpCvt64Fto32, 3966 subf: ssa.OpSub64F, 3967 or: ssa.OpOr32, 3968 floatValue: (*state).constFloat64, 3969 intValue: func(s *state, t ssa.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) }, 3970 cutoff: 2147483648, 3971 } 3972 3973 func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3974 return s.floatToUint(&f32_u64, n, x, ft, tt) 3975 } 3976 func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3977 return s.floatToUint(&f64_u64, n, x, ft, tt) 3978 } 3979 3980 func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3981 return s.floatToUint(&f32_u32, n, x, ft, tt) 3982 } 3983 3984 func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3985 return s.floatToUint(&f64_u32, n, x, ft, tt) 3986 } 3987 3988 func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3989 // cutoff:=1<<(intY_Size-1) 3990 // if x < floatX(cutoff) { 3991 // result = uintY(x) 3992 // } else { 3993 // y = x - floatX(cutoff) 3994 // z = uintY(y) 3995 // result = z | -(cutoff) 3996 // } 3997 cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff)) 3998 cmp := s.newValue2(cvttab.ltf, types.Types[TBOOL], x, cutoff) 3999 b := s.endBlock() 4000 b.Kind = ssa.BlockIf 4001 b.SetControl(cmp) 4002 b.Likely = ssa.BranchLikely 4003 4004 bThen := s.f.NewBlock(ssa.BlockPlain) 4005 bElse := s.f.NewBlock(ssa.BlockPlain) 4006 bAfter := s.f.NewBlock(ssa.BlockPlain) 4007 4008 b.AddEdgeTo(bThen) 4009 s.startBlock(bThen) 4010 a0 := s.newValue1(cvttab.cvt2U, tt, x) 4011 s.vars[n] = a0 4012 s.endBlock() 4013 bThen.AddEdgeTo(bAfter) 4014 4015 b.AddEdgeTo(bElse) 4016 s.startBlock(bElse) 4017 y := s.newValue2(cvttab.subf, ft, x, cutoff) 4018 y = s.newValue1(cvttab.cvt2U, tt, y) 4019 z := cvttab.intValue(s, tt, int64(-cvttab.cutoff)) 4020 a1 := s.newValue2(cvttab.or, tt, y, z) 4021 s.vars[n] = a1 4022 s.endBlock() 4023 bElse.AddEdgeTo(bAfter) 4024 4025 s.startBlock(bAfter) 4026 return s.variable(n, n.Type) 4027 } 4028 4029 // dottype generates SSA for a type assertion node. 4030 // commaok indicates whether to panic or return a bool. 4031 // If commaok is false, resok will be nil. 4032 func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { 4033 iface := s.expr(n.Left) // input interface 4034 target := s.expr(n.Right) // target type 4035 byteptr := s.f.Config.Types.BytePtr 4036 4037 if n.Type.IsInterface() { 4038 if n.Type.IsEmptyInterface() { 4039 // Converting to an empty interface. 4040 // Input could be an empty or nonempty interface. 4041 if Debug_typeassert > 0 { 4042 Warnl(n.Pos, "type assertion inlined") 4043 } 4044 4045 // Get itab/type field from input. 4046 itab := s.newValue1(ssa.OpITab, byteptr, iface) 4047 // Conversion succeeds iff that field is not nil. 4048 cond := s.newValue2(ssa.OpNeqPtr, types.Types[TBOOL], itab, s.constNil(byteptr)) 4049 4050 if n.Left.Type.IsEmptyInterface() && commaok { 4051 // Converting empty interface to empty interface with ,ok is just a nil check. 4052 return iface, cond 4053 } 4054 4055 // Branch on nilness. 4056 b := s.endBlock() 4057 b.Kind = ssa.BlockIf 4058 b.SetControl(cond) 4059 b.Likely = ssa.BranchLikely 4060 bOk := s.f.NewBlock(ssa.BlockPlain) 4061 bFail := s.f.NewBlock(ssa.BlockPlain) 4062 b.AddEdgeTo(bOk) 4063 b.AddEdgeTo(bFail) 4064 4065 if !commaok { 4066 // On failure, panic by calling panicnildottype. 4067 s.startBlock(bFail) 4068 s.rtcall(panicnildottype, false, nil, target) 4069 4070 // On success, return (perhaps modified) input interface. 4071 s.startBlock(bOk) 4072 if n.Left.Type.IsEmptyInterface() { 4073 res = iface // Use input interface unchanged. 4074 return 4075 } 4076 // Load type out of itab, build interface with existing idata. 4077 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) 4078 typ := s.newValue2(ssa.OpLoad, byteptr, off, s.mem()) 4079 idata := s.newValue1(ssa.OpIData, n.Type, iface) 4080 res = s.newValue2(ssa.OpIMake, n.Type, typ, idata) 4081 return 4082 } 4083 4084 s.startBlock(bOk) 4085 // nonempty -> empty 4086 // Need to load type from itab 4087 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) 4088 s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem()) 4089 s.endBlock() 4090 4091 // itab is nil, might as well use that as the nil result. 4092 s.startBlock(bFail) 4093 s.vars[&typVar] = itab 4094 s.endBlock() 4095 4096 // Merge point. 4097 bEnd := s.f.NewBlock(ssa.BlockPlain) 4098 bOk.AddEdgeTo(bEnd) 4099 bFail.AddEdgeTo(bEnd) 4100 s.startBlock(bEnd) 4101 idata := s.newValue1(ssa.OpIData, n.Type, iface) 4102 res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata) 4103 resok = cond 4104 delete(s.vars, &typVar) 4105 return 4106 } 4107 // converting to a nonempty interface needs a runtime call. 4108 if Debug_typeassert > 0 { 4109 Warnl(n.Pos, "type assertion not inlined") 4110 } 4111 if n.Left.Type.IsEmptyInterface() { 4112 if commaok { 4113 call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface) 4114 return call[0], call[1] 4115 } 4116 return s.rtcall(assertE2I, true, []*types.Type{n.Type}, target, iface)[0], nil 4117 } 4118 if commaok { 4119 call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface) 4120 return call[0], call[1] 4121 } 4122 return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil 4123 } 4124 4125 if Debug_typeassert > 0 { 4126 Warnl(n.Pos, "type assertion inlined") 4127 } 4128 4129 // Converting to a concrete type. 4130 direct := isdirectiface(n.Type) 4131 itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface 4132 if Debug_typeassert > 0 { 4133 Warnl(n.Pos, "type assertion inlined") 4134 } 4135 var targetITab *ssa.Value 4136 if n.Left.Type.IsEmptyInterface() { 4137 // Looking for pointer to target type. 4138 targetITab = target 4139 } else { 4140 // Looking for pointer to itab for target type and source interface. 4141 targetITab = s.expr(n.List.First()) 4142 } 4143 4144 var tmp *Node // temporary for use with large types 4145 var addr *ssa.Value // address of tmp 4146 if commaok && !canSSAType(n.Type) { 4147 // unSSAable type, use temporary. 4148 // TODO: get rid of some of these temporaries. 4149 tmp = tempAt(n.Pos, s.curfn, n.Type) 4150 addr = s.addr(tmp, false) 4151 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, tmp, s.mem()) 4152 } 4153 4154 cond := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], itab, targetITab) 4155 b := s.endBlock() 4156 b.Kind = ssa.BlockIf 4157 b.SetControl(cond) 4158 b.Likely = ssa.BranchLikely 4159 4160 bOk := s.f.NewBlock(ssa.BlockPlain) 4161 bFail := s.f.NewBlock(ssa.BlockPlain) 4162 b.AddEdgeTo(bOk) 4163 b.AddEdgeTo(bFail) 4164 4165 if !commaok { 4166 // on failure, panic by calling panicdottype 4167 s.startBlock(bFail) 4168 taddr := s.expr(n.Right.Right) 4169 if n.Left.Type.IsEmptyInterface() { 4170 s.rtcall(panicdottypeE, false, nil, itab, target, taddr) 4171 } else { 4172 s.rtcall(panicdottypeI, false, nil, itab, target, taddr) 4173 } 4174 4175 // on success, return data from interface 4176 s.startBlock(bOk) 4177 if direct { 4178 return s.newValue1(ssa.OpIData, n.Type, iface), nil 4179 } 4180 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) 4181 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()), nil 4182 } 4183 4184 // commaok is the more complicated case because we have 4185 // a control flow merge point. 4186 bEnd := s.f.NewBlock(ssa.BlockPlain) 4187 // Note that we need a new valVar each time (unlike okVar where we can 4188 // reuse the variable) because it might have a different type every time. 4189 valVar := &Node{Op: ONAME, Sym: &types.Sym{Name: "val"}} 4190 4191 // type assertion succeeded 4192 s.startBlock(bOk) 4193 if tmp == nil { 4194 if direct { 4195 s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface) 4196 } else { 4197 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) 4198 s.vars[valVar] = s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 4199 } 4200 } else { 4201 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) 4202 store := s.newValue3I(ssa.OpMove, ssa.TypeMem, n.Type.Size(), addr, p, s.mem()) 4203 store.Aux = n.Type 4204 s.vars[&memVar] = store 4205 } 4206 s.vars[&okVar] = s.constBool(true) 4207 s.endBlock() 4208 bOk.AddEdgeTo(bEnd) 4209 4210 // type assertion failed 4211 s.startBlock(bFail) 4212 if tmp == nil { 4213 s.vars[valVar] = s.zeroVal(n.Type) 4214 } else { 4215 store := s.newValue2I(ssa.OpZero, ssa.TypeMem, n.Type.Size(), addr, s.mem()) 4216 store.Aux = n.Type 4217 s.vars[&memVar] = store 4218 } 4219 s.vars[&okVar] = s.constBool(false) 4220 s.endBlock() 4221 bFail.AddEdgeTo(bEnd) 4222 4223 // merge point 4224 s.startBlock(bEnd) 4225 if tmp == nil { 4226 res = s.variable(valVar, n.Type) 4227 delete(s.vars, valVar) 4228 } else { 4229 res = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 4230 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, tmp, s.mem()) 4231 } 4232 resok = s.variable(&okVar, types.Types[TBOOL]) 4233 delete(s.vars, &okVar) 4234 return res, resok 4235 } 4236 4237 // variable returns the value of a variable at the current location. 4238 func (s *state) variable(name *Node, t ssa.Type) *ssa.Value { 4239 v := s.vars[name] 4240 if v != nil { 4241 return v 4242 } 4243 v = s.fwdVars[name] 4244 if v != nil { 4245 return v 4246 } 4247 4248 if s.curBlock == s.f.Entry { 4249 // No variable should be live at entry. 4250 s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, name, v) 4251 } 4252 // Make a FwdRef, which records a value that's live on block input. 4253 // We'll find the matching definition as part of insertPhis. 4254 v = s.newValue0A(ssa.OpFwdRef, t, name) 4255 s.fwdVars[name] = v 4256 s.addNamedValue(name, v) 4257 return v 4258 } 4259 4260 func (s *state) mem() *ssa.Value { 4261 return s.variable(&memVar, ssa.TypeMem) 4262 } 4263 4264 func (s *state) addNamedValue(n *Node, v *ssa.Value) { 4265 if n.Class() == Pxxx { 4266 // Don't track our dummy nodes (&memVar etc.). 4267 return 4268 } 4269 if n.IsAutoTmp() { 4270 // Don't track temporary variables. 4271 return 4272 } 4273 if n.Class() == PPARAMOUT { 4274 // Don't track named output values. This prevents return values 4275 // from being assigned too early. See #14591 and #14762. TODO: allow this. 4276 return 4277 } 4278 if n.Class() == PAUTO && n.Xoffset != 0 { 4279 s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset) 4280 } 4281 loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0} 4282 values, ok := s.f.NamedValues[loc] 4283 if !ok { 4284 s.f.Names = append(s.f.Names, loc) 4285 } 4286 s.f.NamedValues[loc] = append(values, v) 4287 } 4288 4289 // Branch is an unresolved branch. 4290 type Branch struct { 4291 P *obj.Prog // branch instruction 4292 B *ssa.Block // target 4293 } 4294 4295 // SSAGenState contains state needed during Prog generation. 4296 type SSAGenState struct { 4297 pp *Progs 4298 4299 // Branches remembers all the branch instructions we've seen 4300 // and where they would like to go. 4301 Branches []Branch 4302 4303 // bstart remembers where each block starts (indexed by block ID) 4304 bstart []*obj.Prog 4305 4306 // 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?) 4307 SSEto387 map[int16]int16 4308 // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8. 4309 ScratchFpMem *Node 4310 4311 maxarg int64 // largest frame size for arguments to calls made by the function 4312 4313 // Map from GC safe points to stack map index, generated by 4314 // liveness analysis. 4315 stackMapIndex map[*ssa.Value]int 4316 } 4317 4318 // Prog appends a new Prog. 4319 func (s *SSAGenState) Prog(as obj.As) *obj.Prog { 4320 return s.pp.Prog(as) 4321 } 4322 4323 // Pc returns the current Prog. 4324 func (s *SSAGenState) Pc() *obj.Prog { 4325 return s.pp.next 4326 } 4327 4328 // SetPos sets the current source position. 4329 func (s *SSAGenState) SetPos(pos src.XPos) { 4330 s.pp.pos = pos 4331 } 4332 4333 // genssa appends entries to pp for each instruction in f. 4334 func genssa(f *ssa.Func, pp *Progs) { 4335 var s SSAGenState 4336 4337 e := f.Frontend().(*ssafn) 4338 4339 // Generate GC bitmaps. 4340 s.stackMapIndex = liveness(e, f) 4341 4342 // Remember where each block starts. 4343 s.bstart = make([]*obj.Prog, f.NumBlocks()) 4344 s.pp = pp 4345 var valueProgs map[*obj.Prog]*ssa.Value 4346 var blockProgs map[*obj.Prog]*ssa.Block 4347 var logProgs = e.log 4348 if logProgs { 4349 valueProgs = make(map[*obj.Prog]*ssa.Value, f.NumValues()) 4350 blockProgs = make(map[*obj.Prog]*ssa.Block, f.NumBlocks()) 4351 f.Logf("genssa %s\n", f.Name) 4352 blockProgs[s.pp.next] = f.Blocks[0] 4353 } 4354 4355 if thearch.Use387 { 4356 s.SSEto387 = map[int16]int16{} 4357 } 4358 4359 s.ScratchFpMem = e.scratchFpMem 4360 4361 // Emit basic blocks 4362 for i, b := range f.Blocks { 4363 s.bstart[b.ID] = s.pp.next 4364 // Emit values in block 4365 thearch.SSAMarkMoves(&s, b) 4366 for _, v := range b.Values { 4367 x := s.pp.next 4368 s.SetPos(v.Pos) 4369 4370 switch v.Op { 4371 case ssa.OpInitMem: 4372 // memory arg needs no code 4373 case ssa.OpArg: 4374 // input args need no code 4375 case ssa.OpSP, ssa.OpSB: 4376 // nothing to do 4377 case ssa.OpSelect0, ssa.OpSelect1: 4378 // nothing to do 4379 case ssa.OpGetG: 4380 // nothing to do when there's a g register, 4381 // and checkLower complains if there's not 4382 case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive: 4383 // nothing to do; already used by liveness 4384 case ssa.OpVarKill: 4385 // Zero variable if it is ambiguously live. 4386 // After the VARKILL anything this variable references 4387 // might be collected. If it were to become live again later, 4388 // the GC will see references to already-collected objects. 4389 // See issue 20029. 4390 n := v.Aux.(*Node) 4391 if n.Name.Needzero() { 4392 if n.Class() != PAUTO { 4393 v.Fatalf("zero of variable which isn't PAUTO %v", n) 4394 } 4395 if n.Type.Size()%int64(Widthptr) != 0 { 4396 v.Fatalf("zero of variable not a multiple of ptr size %v", n) 4397 } 4398 thearch.ZeroAuto(s.pp, n) 4399 } 4400 case ssa.OpPhi: 4401 CheckLoweredPhi(v) 4402 4403 default: 4404 // let the backend handle it 4405 thearch.SSAGenValue(&s, v) 4406 } 4407 4408 if logProgs { 4409 for ; x != s.pp.next; x = x.Link { 4410 valueProgs[x] = v 4411 } 4412 } 4413 } 4414 // Emit control flow instructions for block 4415 var next *ssa.Block 4416 if i < len(f.Blocks)-1 && Debug['N'] == 0 { 4417 // If -N, leave next==nil so every block with successors 4418 // ends in a JMP (except call blocks - plive doesn't like 4419 // select{send,recv} followed by a JMP call). Helps keep 4420 // line numbers for otherwise empty blocks. 4421 next = f.Blocks[i+1] 4422 } 4423 x := s.pp.next 4424 s.SetPos(b.Pos) 4425 thearch.SSAGenBlock(&s, b, next) 4426 if logProgs { 4427 for ; x != s.pp.next; x = x.Link { 4428 blockProgs[x] = b 4429 } 4430 } 4431 } 4432 4433 // Resolve branches 4434 for _, br := range s.Branches { 4435 br.P.To.Val = s.bstart[br.B.ID] 4436 } 4437 4438 if logProgs { 4439 for p := pp.Text; p != nil; p = p.Link { 4440 var s string 4441 if v, ok := valueProgs[p]; ok { 4442 s = v.String() 4443 } else if b, ok := blockProgs[p]; ok { 4444 s = b.String() 4445 } else { 4446 s = " " // most value and branch strings are 2-3 characters long 4447 } 4448 f.Logf("%s\t%s\n", s, p) 4449 } 4450 if f.HTMLWriter != nil { 4451 // LineHist is defunct now - this code won't do 4452 // anything. 4453 // TODO: fix this (ideally without a global variable) 4454 // saved := pp.Text.Ctxt.LineHist.PrintFilenameOnly 4455 // pp.Text.Ctxt.LineHist.PrintFilenameOnly = true 4456 var buf bytes.Buffer 4457 buf.WriteString("<code>") 4458 buf.WriteString("<dl class=\"ssa-gen\">") 4459 for p := pp.Text; p != nil; p = p.Link { 4460 buf.WriteString("<dt class=\"ssa-prog-src\">") 4461 if v, ok := valueProgs[p]; ok { 4462 buf.WriteString(v.HTML()) 4463 } else if b, ok := blockProgs[p]; ok { 4464 buf.WriteString(b.HTML()) 4465 } 4466 buf.WriteString("</dt>") 4467 buf.WriteString("<dd class=\"ssa-prog\">") 4468 buf.WriteString(html.EscapeString(p.String())) 4469 buf.WriteString("</dd>") 4470 buf.WriteString("</li>") 4471 } 4472 buf.WriteString("</dl>") 4473 buf.WriteString("</code>") 4474 f.HTMLWriter.WriteColumn("genssa", buf.String()) 4475 // pp.Text.Ctxt.LineHist.PrintFilenameOnly = saved 4476 } 4477 } 4478 4479 defframe(&s, e) 4480 if Debug['f'] != 0 { 4481 frame(0) 4482 } 4483 4484 f.HTMLWriter.Close() 4485 f.HTMLWriter = nil 4486 } 4487 4488 func defframe(s *SSAGenState, e *ssafn) { 4489 pp := s.pp 4490 4491 frame := Rnd(s.maxarg+e.stksize, int64(Widthreg)) 4492 if thearch.PadFrame != nil { 4493 frame = thearch.PadFrame(frame) 4494 } 4495 4496 // Fill in argument and frame size. 4497 pp.Text.To.Type = obj.TYPE_TEXTSIZE 4498 pp.Text.To.Val = int32(Rnd(e.curfn.Type.ArgWidth(), int64(Widthreg))) 4499 pp.Text.To.Offset = frame 4500 4501 // Insert code to zero ambiguously live variables so that the 4502 // garbage collector only sees initialized values when it 4503 // looks for pointers. 4504 p := pp.Text 4505 var lo, hi int64 4506 4507 // Opaque state for backend to use. Current backends use it to 4508 // keep track of which helper registers have been zeroed. 4509 var state uint32 4510 4511 // Iterate through declarations. They are sorted in decreasing Xoffset order. 4512 for _, n := range e.curfn.Func.Dcl { 4513 if !n.Name.Needzero() { 4514 continue 4515 } 4516 if n.Class() != PAUTO { 4517 Fatalf("needzero class %d", n.Class()) 4518 } 4519 if n.Type.Size()%int64(Widthptr) != 0 || n.Xoffset%int64(Widthptr) != 0 || n.Type.Size() == 0 { 4520 Fatalf("var %L has size %d offset %d", n, n.Type.Size(), n.Xoffset) 4521 } 4522 4523 if lo != hi && n.Xoffset+n.Type.Size() >= lo-int64(2*Widthreg) { 4524 // Merge with range we already have. 4525 lo = n.Xoffset 4526 continue 4527 } 4528 4529 // Zero old range 4530 p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state) 4531 4532 // Set new range. 4533 lo = n.Xoffset 4534 hi = lo + n.Type.Size() 4535 } 4536 4537 // Zero final range. 4538 thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state) 4539 } 4540 4541 type FloatingEQNEJump struct { 4542 Jump obj.As 4543 Index int 4544 } 4545 4546 func (s *SSAGenState) oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump) { 4547 p := s.Prog(jumps.Jump) 4548 p.To.Type = obj.TYPE_BRANCH 4549 to := jumps.Index 4550 s.Branches = append(s.Branches, Branch{p, b.Succs[to].Block()}) 4551 } 4552 4553 func (s *SSAGenState) FPJump(b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) { 4554 switch next { 4555 case b.Succs[0].Block(): 4556 s.oneFPJump(b, &jumps[0][0]) 4557 s.oneFPJump(b, &jumps[0][1]) 4558 case b.Succs[1].Block(): 4559 s.oneFPJump(b, &jumps[1][0]) 4560 s.oneFPJump(b, &jumps[1][1]) 4561 default: 4562 s.oneFPJump(b, &jumps[1][0]) 4563 s.oneFPJump(b, &jumps[1][1]) 4564 q := s.Prog(obj.AJMP) 4565 q.To.Type = obj.TYPE_BRANCH 4566 s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()}) 4567 } 4568 } 4569 4570 func AuxOffset(v *ssa.Value) (offset int64) { 4571 if v.Aux == nil { 4572 return 0 4573 } 4574 switch sym := v.Aux.(type) { 4575 4576 case *ssa.AutoSymbol: 4577 n := sym.Node.(*Node) 4578 return n.Xoffset 4579 } 4580 return 0 4581 } 4582 4583 // AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a. 4584 func AddAux(a *obj.Addr, v *ssa.Value) { 4585 AddAux2(a, v, v.AuxInt) 4586 } 4587 func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { 4588 if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR { 4589 v.Fatalf("bad AddAux addr %v", a) 4590 } 4591 // add integer offset 4592 a.Offset += offset 4593 4594 // If no additional symbol offset, we're done. 4595 if v.Aux == nil { 4596 return 4597 } 4598 // Add symbol's offset from its base register. 4599 switch sym := v.Aux.(type) { 4600 case *ssa.ExternSymbol: 4601 a.Name = obj.NAME_EXTERN 4602 a.Sym = sym.Sym 4603 case *ssa.ArgSymbol: 4604 n := sym.Node.(*Node) 4605 a.Name = obj.NAME_PARAM 4606 a.Sym = n.Orig.Sym.Linksym() 4607 a.Offset += n.Xoffset 4608 case *ssa.AutoSymbol: 4609 n := sym.Node.(*Node) 4610 a.Name = obj.NAME_AUTO 4611 a.Sym = n.Sym.Linksym() 4612 a.Offset += n.Xoffset 4613 default: 4614 v.Fatalf("aux in %s not implemented %#v", v, v.Aux) 4615 } 4616 } 4617 4618 // extendIndex extends v to a full int width. 4619 // panic using the given function if v does not fit in an int (only on 32-bit archs). 4620 func (s *state) extendIndex(v *ssa.Value, panicfn *obj.LSym) *ssa.Value { 4621 size := v.Type.Size() 4622 if size == s.config.PtrSize { 4623 return v 4624 } 4625 if size > s.config.PtrSize { 4626 // truncate 64-bit indexes on 32-bit pointer archs. Test the 4627 // high word and branch to out-of-bounds failure if it is not 0. 4628 if Debug['B'] == 0 { 4629 hi := s.newValue1(ssa.OpInt64Hi, types.Types[TUINT32], v) 4630 cmp := s.newValue2(ssa.OpEq32, types.Types[TBOOL], hi, s.constInt32(types.Types[TUINT32], 0)) 4631 s.check(cmp, panicfn) 4632 } 4633 return s.newValue1(ssa.OpTrunc64to32, types.Types[TINT], v) 4634 } 4635 4636 // Extend value to the required size 4637 var op ssa.Op 4638 if v.Type.IsSigned() { 4639 switch 10*size + s.config.PtrSize { 4640 case 14: 4641 op = ssa.OpSignExt8to32 4642 case 18: 4643 op = ssa.OpSignExt8to64 4644 case 24: 4645 op = ssa.OpSignExt16to32 4646 case 28: 4647 op = ssa.OpSignExt16to64 4648 case 48: 4649 op = ssa.OpSignExt32to64 4650 default: 4651 s.Fatalf("bad signed index extension %s", v.Type) 4652 } 4653 } else { 4654 switch 10*size + s.config.PtrSize { 4655 case 14: 4656 op = ssa.OpZeroExt8to32 4657 case 18: 4658 op = ssa.OpZeroExt8to64 4659 case 24: 4660 op = ssa.OpZeroExt16to32 4661 case 28: 4662 op = ssa.OpZeroExt16to64 4663 case 48: 4664 op = ssa.OpZeroExt32to64 4665 default: 4666 s.Fatalf("bad unsigned index extension %s", v.Type) 4667 } 4668 } 4669 return s.newValue1(op, types.Types[TINT], v) 4670 } 4671 4672 // CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values. 4673 // Called during ssaGenValue. 4674 func CheckLoweredPhi(v *ssa.Value) { 4675 if v.Op != ssa.OpPhi { 4676 v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString()) 4677 } 4678 if v.Type.IsMemory() { 4679 return 4680 } 4681 f := v.Block.Func 4682 loc := f.RegAlloc[v.ID] 4683 for _, a := range v.Args { 4684 if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead? 4685 v.Fatalf("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n%s\n", v, loc, a, aloc, v.Block.Func) 4686 } 4687 } 4688 } 4689 4690 // CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block. 4691 // The output of LoweredGetClosurePtr is generally hardwired to the correct register. 4692 // That register contains the closure pointer on closure entry. 4693 func CheckLoweredGetClosurePtr(v *ssa.Value) { 4694 entry := v.Block.Func.Entry 4695 if entry != v.Block || entry.Values[0] != v { 4696 Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v) 4697 } 4698 } 4699 4700 // AutoVar returns a *Node and int64 representing the auto variable and offset within it 4701 // where v should be spilled. 4702 func AutoVar(v *ssa.Value) (*Node, int64) { 4703 loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot) 4704 if v.Type.Size() > loc.Type.Size() { 4705 v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) 4706 } 4707 return loc.N.(*Node), loc.Off 4708 } 4709 4710 func AddrAuto(a *obj.Addr, v *ssa.Value) { 4711 n, off := AutoVar(v) 4712 a.Type = obj.TYPE_MEM 4713 a.Sym = n.Sym.Linksym() 4714 a.Reg = int16(thearch.REGSP) 4715 a.Offset = n.Xoffset + off 4716 if n.Class() == PPARAM || n.Class() == PPARAMOUT { 4717 a.Name = obj.NAME_PARAM 4718 } else { 4719 a.Name = obj.NAME_AUTO 4720 } 4721 } 4722 4723 func (s *SSAGenState) AddrScratch(a *obj.Addr) { 4724 if s.ScratchFpMem == nil { 4725 panic("no scratch memory available; forgot to declare usesScratch for Op?") 4726 } 4727 a.Type = obj.TYPE_MEM 4728 a.Name = obj.NAME_AUTO 4729 a.Sym = s.ScratchFpMem.Sym.Linksym() 4730 a.Reg = int16(thearch.REGSP) 4731 a.Offset = s.ScratchFpMem.Xoffset 4732 } 4733 4734 func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog { 4735 idx, ok := s.stackMapIndex[v] 4736 if !ok { 4737 Fatalf("missing stack map index for %v", v.LongString()) 4738 } 4739 p := s.Prog(obj.APCDATA) 4740 Addrconst(&p.From, objabi.PCDATA_StackMapIndex) 4741 Addrconst(&p.To, int64(idx)) 4742 4743 if sym, _ := v.Aux.(*obj.LSym); sym == Deferreturn { 4744 // Deferred calls will appear to be returning to 4745 // the CALL deferreturn(SB) that we are about to emit. 4746 // However, the stack trace code will show the line 4747 // of the instruction byte before the return PC. 4748 // To avoid that being an unrelated instruction, 4749 // insert an actual hardware NOP that will have the right line number. 4750 // This is different from obj.ANOP, which is a virtual no-op 4751 // that doesn't make it into the instruction stream. 4752 thearch.Ginsnop(s.pp) 4753 } 4754 4755 p = s.Prog(obj.ACALL) 4756 if sym, ok := v.Aux.(*obj.LSym); ok { 4757 p.To.Type = obj.TYPE_MEM 4758 p.To.Name = obj.NAME_EXTERN 4759 p.To.Sym = sym 4760 } else { 4761 // TODO(mdempsky): Can these differences be eliminated? 4762 switch thearch.LinkArch.Family { 4763 case sys.AMD64, sys.I386, sys.PPC64, sys.S390X: 4764 p.To.Type = obj.TYPE_REG 4765 case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64: 4766 p.To.Type = obj.TYPE_MEM 4767 default: 4768 Fatalf("unknown indirect call family") 4769 } 4770 p.To.Reg = v.Args[0].Reg() 4771 } 4772 if s.maxarg < v.AuxInt { 4773 s.maxarg = v.AuxInt 4774 } 4775 return p 4776 } 4777 4778 // fieldIdx finds the index of the field referred to by the ODOT node n. 4779 func fieldIdx(n *Node) int { 4780 t := n.Left.Type 4781 f := n.Sym 4782 if !t.IsStruct() { 4783 panic("ODOT's LHS is not a struct") 4784 } 4785 4786 var i int 4787 for _, t1 := range t.Fields().Slice() { 4788 if t1.Sym != f { 4789 i++ 4790 continue 4791 } 4792 if t1.Offset != n.Xoffset { 4793 panic("field offset doesn't match") 4794 } 4795 return i 4796 } 4797 panic(fmt.Sprintf("can't find field in expr %v\n", n)) 4798 4799 // TODO: keep the result of this function somewhere in the ODOT Node 4800 // so we don't have to recompute it each time we need it. 4801 } 4802 4803 // ssafn holds frontend information about a function that the backend is processing. 4804 // It also exports a bunch of compiler services for the ssa backend. 4805 type ssafn struct { 4806 curfn *Node 4807 strings map[string]interface{} // map from constant string to data symbols 4808 scratchFpMem *Node // temp for floating point register / memory moves on some architectures 4809 stksize int64 // stack size for current frame 4810 stkptrsize int64 // prefix of stack containing pointers 4811 log bool 4812 } 4813 4814 // StringData returns a symbol (a *types.Sym wrapped in an interface) which 4815 // is the data component of a global string constant containing s. 4816 func (e *ssafn) StringData(s string) interface{} { 4817 if aux, ok := e.strings[s]; ok { 4818 return aux 4819 } 4820 if e.strings == nil { 4821 e.strings = make(map[string]interface{}) 4822 } 4823 data := stringsym(s) 4824 aux := &ssa.ExternSymbol{Sym: data} 4825 e.strings[s] = aux 4826 return aux 4827 } 4828 4829 func (e *ssafn) Auto(pos src.XPos, t ssa.Type) ssa.GCNode { 4830 n := tempAt(pos, e.curfn, t.(*types.Type)) // Note: adds new auto to e.curfn.Func.Dcl list 4831 return n 4832 } 4833 4834 func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4835 n := name.N.(*Node) 4836 ptrType := types.NewPtr(types.Types[TUINT8]) 4837 lenType := types.Types[TINT] 4838 if n.Class() == PAUTO && !n.Addrtaken() { 4839 // Split this string up into two separate variables. 4840 p := e.namedAuto(n.Sym.Name+".ptr", ptrType, n.Pos) 4841 l := e.namedAuto(n.Sym.Name+".len", lenType, n.Pos) 4842 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0} 4843 } 4844 // Return the two parts of the larger variable. 4845 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)} 4846 } 4847 4848 func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4849 n := name.N.(*Node) 4850 t := types.NewPtr(types.Types[TUINT8]) 4851 if n.Class() == PAUTO && !n.Addrtaken() { 4852 // Split this interface up into two separate variables. 4853 f := ".itab" 4854 if n.Type.IsEmptyInterface() { 4855 f = ".type" 4856 } 4857 c := e.namedAuto(n.Sym.Name+f, t, n.Pos) 4858 d := e.namedAuto(n.Sym.Name+".data", t, n.Pos) 4859 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0} 4860 } 4861 // Return the two parts of the larger variable. 4862 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)} 4863 } 4864 4865 func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) { 4866 n := name.N.(*Node) 4867 ptrType := types.NewPtr(name.Type.ElemType().(*types.Type)) 4868 lenType := types.Types[TINT] 4869 if n.Class() == PAUTO && !n.Addrtaken() { 4870 // Split this slice up into three separate variables. 4871 p := e.namedAuto(n.Sym.Name+".ptr", ptrType, n.Pos) 4872 l := e.namedAuto(n.Sym.Name+".len", lenType, n.Pos) 4873 c := e.namedAuto(n.Sym.Name+".cap", lenType, n.Pos) 4874 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}, ssa.LocalSlot{N: c, Type: lenType, Off: 0} 4875 } 4876 // Return the three parts of the larger variable. 4877 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, 4878 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}, 4879 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)} 4880 } 4881 4882 func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4883 n := name.N.(*Node) 4884 s := name.Type.Size() / 2 4885 var t *types.Type 4886 if s == 8 { 4887 t = types.Types[TFLOAT64] 4888 } else { 4889 t = types.Types[TFLOAT32] 4890 } 4891 if n.Class() == PAUTO && !n.Addrtaken() { 4892 // Split this complex up into two separate variables. 4893 c := e.namedAuto(n.Sym.Name+".real", t, n.Pos) 4894 d := e.namedAuto(n.Sym.Name+".imag", t, n.Pos) 4895 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0} 4896 } 4897 // Return the two parts of the larger variable. 4898 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s} 4899 } 4900 4901 func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4902 n := name.N.(*Node) 4903 var t *types.Type 4904 if name.Type.IsSigned() { 4905 t = types.Types[TINT32] 4906 } else { 4907 t = types.Types[TUINT32] 4908 } 4909 if n.Class() == PAUTO && !n.Addrtaken() { 4910 // Split this int64 up into two separate variables. 4911 h := e.namedAuto(n.Sym.Name+".hi", t, n.Pos) 4912 l := e.namedAuto(n.Sym.Name+".lo", types.Types[TUINT32], n.Pos) 4913 return ssa.LocalSlot{N: h, Type: t, Off: 0}, ssa.LocalSlot{N: l, Type: types.Types[TUINT32], Off: 0} 4914 } 4915 // Return the two parts of the larger variable. 4916 if thearch.LinkArch.ByteOrder == binary.BigEndian { 4917 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off + 4} 4918 } 4919 return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off} 4920 } 4921 4922 func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot { 4923 n := name.N.(*Node) 4924 st := name.Type 4925 ft := st.FieldType(i) 4926 if n.Class() == PAUTO && !n.Addrtaken() { 4927 // Note: the _ field may appear several times. But 4928 // have no fear, identically-named but distinct Autos are 4929 // ok, albeit maybe confusing for a debugger. 4930 x := e.namedAuto(n.Sym.Name+"."+st.FieldName(i), ft, n.Pos) 4931 return ssa.LocalSlot{N: x, Type: ft, Off: 0} 4932 } 4933 return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)} 4934 } 4935 4936 func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot { 4937 n := name.N.(*Node) 4938 at := name.Type 4939 if at.NumElem() != 1 { 4940 Fatalf("bad array size") 4941 } 4942 et := at.ElemType() 4943 if n.Class() == PAUTO && !n.Addrtaken() { 4944 x := e.namedAuto(n.Sym.Name+"[0]", et, n.Pos) 4945 return ssa.LocalSlot{N: x, Type: et, Off: 0} 4946 } 4947 return ssa.LocalSlot{N: n, Type: et, Off: name.Off} 4948 } 4949 4950 func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym { 4951 return itabsym(it, offset) 4952 } 4953 4954 // namedAuto returns a new AUTO variable with the given name and type. 4955 // These are exposed to the debugger. 4956 func (e *ssafn) namedAuto(name string, typ ssa.Type, pos src.XPos) ssa.GCNode { 4957 t := typ.(*types.Type) 4958 s := &types.Sym{Name: name, Pkg: localpkg} 4959 4960 n := new(Node) 4961 n.Name = new(Name) 4962 n.Op = ONAME 4963 n.Pos = pos 4964 n.Orig = n 4965 4966 s.Def = asTypesNode(n) 4967 asNode(s.Def).Name.SetUsed(true) 4968 n.Sym = s 4969 n.Type = t 4970 n.SetClass(PAUTO) 4971 n.SetAddable(true) 4972 n.Esc = EscNever 4973 n.Name.Curfn = e.curfn 4974 e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n) 4975 dowidth(t) 4976 return n 4977 } 4978 4979 func (e *ssafn) CanSSA(t ssa.Type) bool { 4980 return canSSAType(t.(*types.Type)) 4981 } 4982 4983 func (e *ssafn) Line(pos src.XPos) string { 4984 return linestr(pos) 4985 } 4986 4987 // Log logs a message from the compiler. 4988 func (e *ssafn) Logf(msg string, args ...interface{}) { 4989 if e.log { 4990 fmt.Printf(msg, args...) 4991 } 4992 } 4993 4994 func (e *ssafn) Log() bool { 4995 return e.log 4996 } 4997 4998 // Fatal reports a compiler error and exits. 4999 func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) { 5000 lineno = pos 5001 Fatalf(msg, args...) 5002 } 5003 5004 // Warnl reports a "warning", which is usually flag-triggered 5005 // logging output for the benefit of tests. 5006 func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) { 5007 Warnl(pos, fmt_, args...) 5008 } 5009 5010 func (e *ssafn) Debug_checknil() bool { 5011 return Debug_checknil != 0 5012 } 5013 5014 func (e *ssafn) Debug_wb() bool { 5015 return Debug_wb != 0 5016 } 5017 5018 func (e *ssafn) UseWriteBarrier() bool { 5019 return use_writebarrier 5020 } 5021 5022 func (e *ssafn) Syslook(name string) *obj.LSym { 5023 switch name { 5024 case "goschedguarded": 5025 return goschedguarded 5026 case "writeBarrier": 5027 return writeBarrier 5028 case "writebarrierptr": 5029 return writebarrierptr 5030 case "typedmemmove": 5031 return typedmemmove 5032 case "typedmemclr": 5033 return typedmemclr 5034 } 5035 Fatalf("unknown Syslook func %v", name) 5036 return nil 5037 } 5038 5039 func (n *Node) Typ() ssa.Type { 5040 return n.Type 5041 }