github.com/sanprasirt/go@v0.0.0-20170607001320-a027466e4b6d/src/cmd/compile/internal/gc/ssa.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "bytes" 9 "encoding/binary" 10 "fmt" 11 "html" 12 "os" 13 "sort" 14 15 "cmd/compile/internal/ssa" 16 "cmd/compile/internal/types" 17 "cmd/internal/obj" 18 "cmd/internal/objabi" 19 "cmd/internal/src" 20 "cmd/internal/sys" 21 ) 22 23 var ssaConfig *ssa.Config 24 var ssaCaches []ssa.Cache 25 26 func initssaconfig() { 27 types_ := ssa.Types{ 28 Bool: types.Types[TBOOL], 29 Int8: types.Types[TINT8], 30 Int16: types.Types[TINT16], 31 Int32: types.Types[TINT32], 32 Int64: types.Types[TINT64], 33 UInt8: types.Types[TUINT8], 34 UInt16: types.Types[TUINT16], 35 UInt32: types.Types[TUINT32], 36 UInt64: types.Types[TUINT64], 37 Float32: types.Types[TFLOAT32], 38 Float64: types.Types[TFLOAT64], 39 Int: types.Types[TINT], 40 Uintptr: types.Types[TUINTPTR], 41 String: types.Types[TSTRING], 42 BytePtr: types.NewPtr(types.Types[TUINT8]), 43 Int32Ptr: types.NewPtr(types.Types[TINT32]), 44 UInt32Ptr: types.NewPtr(types.Types[TUINT32]), 45 IntPtr: types.NewPtr(types.Types[TINT]), 46 UintptrPtr: types.NewPtr(types.Types[TUINTPTR]), 47 Float32Ptr: types.NewPtr(types.Types[TFLOAT32]), 48 Float64Ptr: types.NewPtr(types.Types[TFLOAT64]), 49 BytePtrPtr: types.NewPtr(types.NewPtr(types.Types[TUINT8])), 50 } 51 // Generate a few pointer types that are uncommon in the frontend but common in the backend. 52 // Caching is disabled in the backend, so generating these here avoids allocations. 53 _ = types.NewPtr(types.Types[TINTER]) // *interface{} 54 _ = types.NewPtr(types.NewPtr(types.Types[TSTRING])) // **string 55 _ = types.NewPtr(types.NewPtr(types.Idealstring)) // **string 56 _ = types.NewPtr(types.NewSlice(types.Types[TINTER])) // *[]interface{} 57 _ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte 58 _ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte 59 _ = types.NewPtr(types.NewSlice(types.Types[TSTRING])) // *[]string 60 _ = types.NewPtr(types.NewSlice(types.Idealstring)) // *[]string 61 _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8 62 _ = types.NewPtr(types.Types[TINT16]) // *int16 63 _ = types.NewPtr(types.Types[TINT64]) // *int64 64 _ = types.NewPtr(types.Errortype) // *error 65 types.NewPtrCacheEnabled = false 66 ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, types_, Ctxt, Debug['N'] == 0) 67 if thearch.LinkArch.Name == "386" { 68 ssaConfig.Set387(thearch.Use387) 69 } 70 ssaCaches = make([]ssa.Cache, nBackendWorkers) 71 72 // Set up some runtime functions we'll need to call. 73 Newproc = Sysfunc("newproc") 74 Deferproc = Sysfunc("deferproc") 75 Deferreturn = Sysfunc("deferreturn") 76 Duffcopy = Sysfunc("duffcopy") 77 Duffzero = Sysfunc("duffzero") 78 panicindex = Sysfunc("panicindex") 79 panicslice = Sysfunc("panicslice") 80 panicdivide = Sysfunc("panicdivide") 81 growslice = Sysfunc("growslice") 82 panicdottypeE = Sysfunc("panicdottypeE") 83 panicdottypeI = Sysfunc("panicdottypeI") 84 panicnildottype = Sysfunc("panicnildottype") 85 assertE2I = Sysfunc("assertE2I") 86 assertE2I2 = Sysfunc("assertE2I2") 87 assertI2I = Sysfunc("assertI2I") 88 assertI2I2 = Sysfunc("assertI2I2") 89 goschedguarded = Sysfunc("goschedguarded") 90 writeBarrier = Sysfunc("writeBarrier") 91 writebarrierptr = Sysfunc("writebarrierptr") 92 typedmemmove = Sysfunc("typedmemmove") 93 typedmemclr = Sysfunc("typedmemclr") 94 Udiv = Sysfunc("udiv") 95 } 96 97 // buildssa builds an SSA function for fn. 98 // worker indicates which of the backend workers is doing the processing. 99 func buildssa(fn *Node, worker int) *ssa.Func { 100 name := fn.funcname() 101 printssa := name == os.Getenv("GOSSAFUNC") 102 if printssa { 103 fmt.Println("generating SSA for", name) 104 dumplist("buildssa-enter", fn.Func.Enter) 105 dumplist("buildssa-body", fn.Nbody) 106 dumplist("buildssa-exit", fn.Func.Exit) 107 } 108 109 var s state 110 s.pushLine(fn.Pos) 111 defer s.popLine() 112 113 s.hasdefer = fn.Func.HasDefer() 114 if fn.Func.Pragma&CgoUnsafeArgs != 0 { 115 s.cgoUnsafeArgs = true 116 } 117 118 fe := ssafn{ 119 curfn: fn, 120 log: printssa, 121 } 122 s.curfn = fn 123 124 s.f = ssa.NewFunc(&fe) 125 s.config = ssaConfig 126 s.f.Config = ssaConfig 127 s.f.Cache = &ssaCaches[worker] 128 s.f.Cache.Reset() 129 s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH", name) 130 s.f.Name = name 131 if fn.Func.Pragma&Nosplit != 0 { 132 s.f.NoSplit = true 133 } 134 defer func() { 135 if s.f.WBPos.IsKnown() { 136 fn.Func.WBPos = s.f.WBPos 137 } 138 }() 139 s.exitCode = fn.Func.Exit 140 s.panics = map[funcLine]*ssa.Block{} 141 142 if name == os.Getenv("GOSSAFUNC") { 143 s.f.HTMLWriter = ssa.NewHTMLWriter("ssa.html", s.f.Frontend(), name) 144 // TODO: generate and print a mapping from nodes to values and blocks 145 } 146 147 // Allocate starting block 148 s.f.Entry = s.f.NewBlock(ssa.BlockPlain) 149 150 // Allocate starting values 151 s.labels = map[string]*ssaLabel{} 152 s.labeledNodes = map[*Node]*ssaLabel{} 153 s.fwdVars = map[*Node]*ssa.Value{} 154 s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem) 155 s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead 156 s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR]) 157 158 s.startBlock(s.f.Entry) 159 s.vars[&memVar] = s.startmem 160 161 s.varsyms = map[*Node]interface{}{} 162 163 // Generate addresses of local declarations 164 s.decladdrs = map[*Node]*ssa.Value{} 165 for _, n := range fn.Func.Dcl { 166 switch n.Class() { 167 case PPARAM, PPARAMOUT: 168 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n}) 169 s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), aux, s.sp) 170 if n.Class() == PPARAMOUT && s.canSSA(n) { 171 // Save ssa-able PPARAMOUT variables so we can 172 // store them back to the stack at the end of 173 // the function. 174 s.returns = append(s.returns, n) 175 } 176 case PAUTO: 177 // processed at each use, to prevent Addr coming 178 // before the decl. 179 case PAUTOHEAP: 180 // moved to heap - already handled by frontend 181 case PFUNC: 182 // local function - already handled by frontend 183 default: 184 s.Fatalf("local variable with class %s unimplemented", classnames[n.Class()]) 185 } 186 } 187 188 // Populate SSAable arguments. 189 for _, n := range fn.Func.Dcl { 190 if n.Class() == PPARAM && s.canSSA(n) { 191 s.vars[n] = s.newValue0A(ssa.OpArg, n.Type, n) 192 } 193 } 194 195 // Convert the AST-based IR to the SSA-based IR 196 s.stmtList(fn.Func.Enter) 197 s.stmtList(fn.Nbody) 198 199 // fallthrough to exit 200 if s.curBlock != nil { 201 s.pushLine(fn.Func.Endlineno) 202 s.exit() 203 s.popLine() 204 } 205 206 s.insertPhis() 207 208 // Don't carry reference this around longer than necessary 209 s.exitCode = Nodes{} 210 211 // Main call to ssa package to compile function 212 ssa.Compile(s.f) 213 return s.f 214 } 215 216 type state struct { 217 // configuration (arch) information 218 config *ssa.Config 219 220 // function we're building 221 f *ssa.Func 222 223 // Node for function 224 curfn *Node 225 226 // labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f 227 labels map[string]*ssaLabel 228 labeledNodes map[*Node]*ssaLabel 229 230 // Code that must precede any return 231 // (e.g., copying value of heap-escaped paramout back to true paramout) 232 exitCode Nodes 233 234 // unlabeled break and continue statement tracking 235 breakTo *ssa.Block // current target for plain break statement 236 continueTo *ssa.Block // current target for plain continue statement 237 238 // current location where we're interpreting the AST 239 curBlock *ssa.Block 240 241 // variable assignments in the current block (map from variable symbol to ssa value) 242 // *Node is the unique identifier (an ONAME Node) for the variable. 243 // TODO: keep a single varnum map, then make all of these maps slices instead? 244 vars map[*Node]*ssa.Value 245 246 // fwdVars are variables that are used before they are defined in the current block. 247 // This map exists just to coalesce multiple references into a single FwdRef op. 248 // *Node is the unique identifier (an ONAME Node) for the variable. 249 fwdVars map[*Node]*ssa.Value 250 251 // all defined variables at the end of each block. Indexed by block ID. 252 defvars []map[*Node]*ssa.Value 253 254 // addresses of PPARAM and PPARAMOUT variables. 255 decladdrs map[*Node]*ssa.Value 256 257 // symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused. 258 varsyms map[*Node]interface{} 259 260 // starting values. Memory, stack pointer, and globals pointer 261 startmem *ssa.Value 262 sp *ssa.Value 263 sb *ssa.Value 264 265 // line number stack. The current line number is top of stack 266 line []src.XPos 267 268 // list of panic calls by function name and line number. 269 // Used to deduplicate panic calls. 270 panics map[funcLine]*ssa.Block 271 272 // list of PPARAMOUT (return) variables. 273 returns []*Node 274 275 cgoUnsafeArgs bool 276 hasdefer bool // whether the function contains a defer statement 277 } 278 279 type funcLine struct { 280 f *obj.LSym 281 file string 282 line uint 283 } 284 285 type ssaLabel struct { 286 target *ssa.Block // block identified by this label 287 breakTarget *ssa.Block // block to break to in control flow node identified by this label 288 continueTarget *ssa.Block // block to continue to in control flow node identified by this label 289 } 290 291 // label returns the label associated with sym, creating it if necessary. 292 func (s *state) label(sym *types.Sym) *ssaLabel { 293 lab := s.labels[sym.Name] 294 if lab == nil { 295 lab = new(ssaLabel) 296 s.labels[sym.Name] = lab 297 } 298 return lab 299 } 300 301 func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) } 302 func (s *state) Log() bool { return s.f.Log() } 303 func (s *state) Fatalf(msg string, args ...interface{}) { 304 s.f.Frontend().Fatalf(s.peekPos(), msg, args...) 305 } 306 func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) } 307 func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() } 308 309 var ( 310 // dummy node for the memory variable 311 memVar = Node{Op: ONAME, Sym: &types.Sym{Name: "mem"}} 312 313 // dummy nodes for temporary variables 314 ptrVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}} 315 lenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}} 316 newlenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}} 317 capVar = Node{Op: ONAME, Sym: &types.Sym{Name: "cap"}} 318 typVar = Node{Op: ONAME, Sym: &types.Sym{Name: "typ"}} 319 okVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ok"}} 320 ) 321 322 // startBlock sets the current block we're generating code in to b. 323 func (s *state) startBlock(b *ssa.Block) { 324 if s.curBlock != nil { 325 s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock) 326 } 327 s.curBlock = b 328 s.vars = map[*Node]*ssa.Value{} 329 for n := range s.fwdVars { 330 delete(s.fwdVars, n) 331 } 332 } 333 334 // endBlock marks the end of generating code for the current block. 335 // Returns the (former) current block. Returns nil if there is no current 336 // block, i.e. if no code flows to the current execution point. 337 func (s *state) endBlock() *ssa.Block { 338 b := s.curBlock 339 if b == nil { 340 return nil 341 } 342 for len(s.defvars) <= int(b.ID) { 343 s.defvars = append(s.defvars, nil) 344 } 345 s.defvars[b.ID] = s.vars 346 s.curBlock = nil 347 s.vars = nil 348 b.Pos = s.peekPos() 349 return b 350 } 351 352 // pushLine pushes a line number on the line number stack. 353 func (s *state) pushLine(line src.XPos) { 354 if !line.IsKnown() { 355 // the frontend may emit node with line number missing, 356 // use the parent line number in this case. 357 line = s.peekPos() 358 if Debug['K'] != 0 { 359 Warn("buildssa: unknown position (line 0)") 360 } 361 } 362 s.line = append(s.line, line) 363 } 364 365 // popLine pops the top of the line number stack. 366 func (s *state) popLine() { 367 s.line = s.line[:len(s.line)-1] 368 } 369 370 // peekPos peeks the top of the line number stack. 371 func (s *state) peekPos() src.XPos { 372 return s.line[len(s.line)-1] 373 } 374 375 // newValue0 adds a new value with no arguments to the current block. 376 func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value { 377 return s.curBlock.NewValue0(s.peekPos(), op, t) 378 } 379 380 // newValue0A adds a new value with no arguments and an aux value to the current block. 381 func (s *state) newValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value { 382 return s.curBlock.NewValue0A(s.peekPos(), op, t, aux) 383 } 384 385 // newValue0I adds a new value with no arguments and an auxint value to the current block. 386 func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value { 387 return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint) 388 } 389 390 // newValue1 adds a new value with one argument to the current block. 391 func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { 392 return s.curBlock.NewValue1(s.peekPos(), op, t, arg) 393 } 394 395 // newValue1A adds a new value with one argument and an aux value to the current block. 396 func (s *state) newValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 397 return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg) 398 } 399 400 // newValue1I adds a new value with one argument and an auxint value to the current block. 401 func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value { 402 return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg) 403 } 404 405 // newValue2 adds a new value with two arguments to the current block. 406 func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value { 407 return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1) 408 } 409 410 // newValue2I adds a new value with two arguments and an auxint value to the current block. 411 func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value { 412 return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1) 413 } 414 415 // newValue3 adds a new value with three arguments to the current block. 416 func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 417 return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2) 418 } 419 420 // newValue3I adds a new value with three arguments and an auxint value to the current block. 421 func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 422 return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2) 423 } 424 425 // newValue3A adds a new value with three arguments and an aux value to the current block. 426 func (s *state) newValue3A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value) *ssa.Value { 427 return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2) 428 } 429 430 // newValue4 adds a new value with four arguments to the current block. 431 func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value { 432 return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3) 433 } 434 435 // entryNewValue0 adds a new value with no arguments to the entry block. 436 func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value { 437 return s.f.Entry.NewValue0(src.NoXPos, op, t) 438 } 439 440 // entryNewValue0A adds a new value with no arguments and an aux value to the entry block. 441 func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value { 442 return s.f.Entry.NewValue0A(s.peekPos(), op, t, aux) 443 } 444 445 // entryNewValue1 adds a new value with one argument to the entry block. 446 func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { 447 return s.f.Entry.NewValue1(s.peekPos(), op, t, arg) 448 } 449 450 // entryNewValue1 adds a new value with one argument and an auxint value to the entry block. 451 func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value { 452 return s.f.Entry.NewValue1I(s.peekPos(), op, t, auxint, arg) 453 } 454 455 // entryNewValue1A adds a new value with one argument and an aux value to the entry block. 456 func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value { 457 return s.f.Entry.NewValue1A(s.peekPos(), op, t, aux, arg) 458 } 459 460 // entryNewValue2 adds a new value with two arguments to the entry block. 461 func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value { 462 return s.f.Entry.NewValue2(s.peekPos(), op, t, arg0, arg1) 463 } 464 465 // const* routines add a new const value to the entry block. 466 func (s *state) constSlice(t *types.Type) *ssa.Value { 467 return s.f.ConstSlice(s.peekPos(), t) 468 } 469 func (s *state) constInterface(t *types.Type) *ssa.Value { 470 return s.f.ConstInterface(s.peekPos(), t) 471 } 472 func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(s.peekPos(), t) } 473 func (s *state) constEmptyString(t *types.Type) *ssa.Value { 474 return s.f.ConstEmptyString(s.peekPos(), t) 475 } 476 func (s *state) constBool(c bool) *ssa.Value { 477 return s.f.ConstBool(s.peekPos(), types.Types[TBOOL], c) 478 } 479 func (s *state) constInt8(t *types.Type, c int8) *ssa.Value { 480 return s.f.ConstInt8(s.peekPos(), t, c) 481 } 482 func (s *state) constInt16(t *types.Type, c int16) *ssa.Value { 483 return s.f.ConstInt16(s.peekPos(), t, c) 484 } 485 func (s *state) constInt32(t *types.Type, c int32) *ssa.Value { 486 return s.f.ConstInt32(s.peekPos(), t, c) 487 } 488 func (s *state) constInt64(t *types.Type, c int64) *ssa.Value { 489 return s.f.ConstInt64(s.peekPos(), t, c) 490 } 491 func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value { 492 return s.f.ConstFloat32(s.peekPos(), t, c) 493 } 494 func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value { 495 return s.f.ConstFloat64(s.peekPos(), t, c) 496 } 497 func (s *state) constInt(t *types.Type, c int64) *ssa.Value { 498 if s.config.PtrSize == 8 { 499 return s.constInt64(t, c) 500 } 501 if int64(int32(c)) != c { 502 s.Fatalf("integer constant too big %d", c) 503 } 504 return s.constInt32(t, int32(c)) 505 } 506 func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value { 507 return s.f.ConstOffPtrSP(s.peekPos(), t, c, s.sp) 508 } 509 510 // stmtList converts the statement list n to SSA and adds it to s. 511 func (s *state) stmtList(l Nodes) { 512 for _, n := range l.Slice() { 513 s.stmt(n) 514 } 515 } 516 517 // stmt converts the statement n to SSA and adds it to s. 518 func (s *state) stmt(n *Node) { 519 s.pushLine(n.Pos) 520 defer s.popLine() 521 522 // If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere), 523 // then this code is dead. Stop here. 524 if s.curBlock == nil && n.Op != OLABEL { 525 return 526 } 527 528 s.stmtList(n.Ninit) 529 switch n.Op { 530 531 case OBLOCK: 532 s.stmtList(n.List) 533 534 // No-ops 535 case OEMPTY, ODCLCONST, ODCLTYPE, OFALL: 536 537 // Expression statements 538 case OCALLFUNC: 539 if isIntrinsicCall(n) { 540 s.intrinsicCall(n) 541 return 542 } 543 fallthrough 544 545 case OCALLMETH, OCALLINTER: 546 s.call(n, callNormal) 547 if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class() == PFUNC { 548 if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" || 549 n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block") { 550 m := s.mem() 551 b := s.endBlock() 552 b.Kind = ssa.BlockExit 553 b.SetControl(m) 554 // TODO: never rewrite OPANIC to OCALLFUNC in the 555 // first place. Need to wait until all backends 556 // go through SSA. 557 } 558 } 559 case ODEFER: 560 s.call(n.Left, callDefer) 561 case OPROC: 562 s.call(n.Left, callGo) 563 564 case OAS2DOTTYPE: 565 res, resok := s.dottype(n.Rlist.First(), true) 566 deref := false 567 if !canSSAType(n.Rlist.First().Type) { 568 if res.Op != ssa.OpLoad { 569 s.Fatalf("dottype of non-load") 570 } 571 mem := s.mem() 572 if mem.Op == ssa.OpVarKill { 573 mem = mem.Args[0] 574 } 575 if res.Args[1] != mem { 576 s.Fatalf("memory no longer live from 2-result dottype load") 577 } 578 deref = true 579 res = res.Args[0] 580 } 581 s.assign(n.List.First(), res, deref, 0) 582 s.assign(n.List.Second(), resok, false, 0) 583 return 584 585 case OAS2FUNC: 586 // We come here only when it is an intrinsic call returning two values. 587 if !isIntrinsicCall(n.Rlist.First()) { 588 s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Rlist.First()) 589 } 590 v := s.intrinsicCall(n.Rlist.First()) 591 v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v) 592 v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v) 593 s.assign(n.List.First(), v1, false, 0) 594 s.assign(n.List.Second(), v2, false, 0) 595 return 596 597 case ODCL: 598 if n.Left.Class() == PAUTOHEAP { 599 Fatalf("DCL %v", n) 600 } 601 602 case OLABEL: 603 sym := n.Left.Sym 604 lab := s.label(sym) 605 606 // Associate label with its control flow node, if any 607 if ctl := n.labeledControl(); ctl != nil { 608 s.labeledNodes[ctl] = lab 609 } 610 611 // The label might already have a target block via a goto. 612 if lab.target == nil { 613 lab.target = s.f.NewBlock(ssa.BlockPlain) 614 } 615 616 // Go to that label. 617 // (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.) 618 if s.curBlock != nil { 619 b := s.endBlock() 620 b.AddEdgeTo(lab.target) 621 } 622 s.startBlock(lab.target) 623 624 case OGOTO: 625 sym := n.Left.Sym 626 627 lab := s.label(sym) 628 if lab.target == nil { 629 lab.target = s.f.NewBlock(ssa.BlockPlain) 630 } 631 632 b := s.endBlock() 633 b.AddEdgeTo(lab.target) 634 635 case OAS: 636 if n.Left == n.Right && n.Left.Op == ONAME { 637 // An x=x assignment. No point in doing anything 638 // here. In addition, skipping this assignment 639 // prevents generating: 640 // VARDEF x 641 // COPY x -> x 642 // which is bad because x is incorrectly considered 643 // dead before the vardef. See issue #14904. 644 return 645 } 646 647 // Evaluate RHS. 648 rhs := n.Right 649 if rhs != nil { 650 switch rhs.Op { 651 case OSTRUCTLIT, OARRAYLIT, OSLICELIT: 652 // All literals with nonzero fields have already been 653 // rewritten during walk. Any that remain are just T{} 654 // or equivalents. Use the zero value. 655 if !iszero(rhs) { 656 Fatalf("literal with nonzero value in SSA: %v", rhs) 657 } 658 rhs = nil 659 case OAPPEND: 660 // If we're writing the result of an append back to the same slice, 661 // handle it specially to avoid write barriers on the fast (non-growth) path. 662 // If the slice can be SSA'd, it'll be on the stack, 663 // so there will be no write barriers, 664 // so there's no need to attempt to prevent them. 665 if samesafeexpr(n.Left, rhs.List.First()) { 666 if !s.canSSA(n.Left) { 667 if Debug_append > 0 { 668 Warnl(n.Pos, "append: len-only update") 669 } 670 s.append(rhs, true) 671 return 672 } else { 673 if Debug_append > 0 { // replicating old diagnostic message 674 Warnl(n.Pos, "append: len-only update (in local slice)") 675 } 676 } 677 } 678 } 679 } 680 681 if isblank(n.Left) { 682 // _ = rhs 683 // Just evaluate rhs for side-effects. 684 if rhs != nil { 685 s.expr(rhs) 686 } 687 return 688 } 689 690 var t *types.Type 691 if n.Right != nil { 692 t = n.Right.Type 693 } else { 694 t = n.Left.Type 695 } 696 697 var r *ssa.Value 698 deref := !canSSAType(t) 699 if deref { 700 if rhs == nil { 701 r = nil // Signal assign to use OpZero. 702 } else { 703 r = s.addr(rhs, false) 704 } 705 } else { 706 if rhs == nil { 707 r = s.zeroVal(t) 708 } else { 709 r = s.expr(rhs) 710 } 711 } 712 713 var skip skipMask 714 if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) { 715 // We're assigning a slicing operation back to its source. 716 // Don't write back fields we aren't changing. See issue #14855. 717 i, j, k := rhs.SliceBounds() 718 if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) { 719 // [0:...] is the same as [:...] 720 i = nil 721 } 722 // TODO: detect defaults for len/cap also. 723 // Currently doesn't really work because (*p)[:len(*p)] appears here as: 724 // tmp = len(*p) 725 // (*p)[:tmp] 726 //if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) { 727 // j = nil 728 //} 729 //if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) { 730 // k = nil 731 //} 732 if i == nil { 733 skip |= skipPtr 734 if j == nil { 735 skip |= skipLen 736 } 737 if k == nil { 738 skip |= skipCap 739 } 740 } 741 } 742 743 s.assign(n.Left, r, deref, skip) 744 745 case OIF: 746 bThen := s.f.NewBlock(ssa.BlockPlain) 747 bEnd := s.f.NewBlock(ssa.BlockPlain) 748 var bElse *ssa.Block 749 var likely int8 750 if n.Likely() { 751 likely = 1 752 } 753 if n.Rlist.Len() != 0 { 754 bElse = s.f.NewBlock(ssa.BlockPlain) 755 s.condBranch(n.Left, bThen, bElse, likely) 756 } else { 757 s.condBranch(n.Left, bThen, bEnd, likely) 758 } 759 760 s.startBlock(bThen) 761 s.stmtList(n.Nbody) 762 if b := s.endBlock(); b != nil { 763 b.AddEdgeTo(bEnd) 764 } 765 766 if n.Rlist.Len() != 0 { 767 s.startBlock(bElse) 768 s.stmtList(n.Rlist) 769 if b := s.endBlock(); b != nil { 770 b.AddEdgeTo(bEnd) 771 } 772 } 773 s.startBlock(bEnd) 774 775 case ORETURN: 776 s.stmtList(n.List) 777 s.exit() 778 case ORETJMP: 779 s.stmtList(n.List) 780 b := s.exit() 781 b.Kind = ssa.BlockRetJmp // override BlockRet 782 b.Aux = n.Left.Sym.Linksym() 783 784 case OCONTINUE, OBREAK: 785 var to *ssa.Block 786 if n.Left == nil { 787 // plain break/continue 788 switch n.Op { 789 case OCONTINUE: 790 to = s.continueTo 791 case OBREAK: 792 to = s.breakTo 793 } 794 } else { 795 // labeled break/continue; look up the target 796 sym := n.Left.Sym 797 lab := s.label(sym) 798 switch n.Op { 799 case OCONTINUE: 800 to = lab.continueTarget 801 case OBREAK: 802 to = lab.breakTarget 803 } 804 } 805 806 b := s.endBlock() 807 b.AddEdgeTo(to) 808 809 case OFOR, OFORUNTIL: 810 // OFOR: for Ninit; Left; Right { Nbody } 811 // For = cond; body; incr 812 // Foruntil = body; incr; cond 813 bCond := s.f.NewBlock(ssa.BlockPlain) 814 bBody := s.f.NewBlock(ssa.BlockPlain) 815 bIncr := s.f.NewBlock(ssa.BlockPlain) 816 bEnd := s.f.NewBlock(ssa.BlockPlain) 817 818 // first, jump to condition test (OFOR) or body (OFORUNTIL) 819 b := s.endBlock() 820 if n.Op == OFOR { 821 b.AddEdgeTo(bCond) 822 // generate code to test condition 823 s.startBlock(bCond) 824 if n.Left != nil { 825 s.condBranch(n.Left, bBody, bEnd, 1) 826 } else { 827 b := s.endBlock() 828 b.Kind = ssa.BlockPlain 829 b.AddEdgeTo(bBody) 830 } 831 832 } else { 833 b.AddEdgeTo(bBody) 834 } 835 836 // set up for continue/break in body 837 prevContinue := s.continueTo 838 prevBreak := s.breakTo 839 s.continueTo = bIncr 840 s.breakTo = bEnd 841 lab := s.labeledNodes[n] 842 if lab != nil { 843 // labeled for loop 844 lab.continueTarget = bIncr 845 lab.breakTarget = bEnd 846 } 847 848 // generate body 849 s.startBlock(bBody) 850 s.stmtList(n.Nbody) 851 852 // tear down continue/break 853 s.continueTo = prevContinue 854 s.breakTo = prevBreak 855 if lab != nil { 856 lab.continueTarget = nil 857 lab.breakTarget = nil 858 } 859 860 // done with body, goto incr 861 if b := s.endBlock(); b != nil { 862 b.AddEdgeTo(bIncr) 863 } 864 865 // generate incr 866 s.startBlock(bIncr) 867 if n.Right != nil { 868 s.stmt(n.Right) 869 } 870 if b := s.endBlock(); b != nil { 871 b.AddEdgeTo(bCond) 872 } 873 874 if n.Op == OFORUNTIL { 875 // generate code to test condition 876 s.startBlock(bCond) 877 if n.Left != nil { 878 s.condBranch(n.Left, bBody, bEnd, 1) 879 } else { 880 b := s.endBlock() 881 b.Kind = ssa.BlockPlain 882 b.AddEdgeTo(bBody) 883 } 884 } 885 886 s.startBlock(bEnd) 887 888 case OSWITCH, OSELECT: 889 // These have been mostly rewritten by the front end into their Nbody fields. 890 // Our main task is to correctly hook up any break statements. 891 bEnd := s.f.NewBlock(ssa.BlockPlain) 892 893 prevBreak := s.breakTo 894 s.breakTo = bEnd 895 lab := s.labeledNodes[n] 896 if lab != nil { 897 // labeled 898 lab.breakTarget = bEnd 899 } 900 901 // generate body code 902 s.stmtList(n.Nbody) 903 904 s.breakTo = prevBreak 905 if lab != nil { 906 lab.breakTarget = nil 907 } 908 909 // walk adds explicit OBREAK nodes to the end of all reachable code paths. 910 // If we still have a current block here, then mark it unreachable. 911 if s.curBlock != nil { 912 m := s.mem() 913 b := s.endBlock() 914 b.Kind = ssa.BlockExit 915 b.SetControl(m) 916 } 917 s.startBlock(bEnd) 918 919 case OVARKILL: 920 // Insert a varkill op to record that a variable is no longer live. 921 // We only care about liveness info at call sites, so putting the 922 // varkill in the store chain is enough to keep it correctly ordered 923 // with respect to call ops. 924 if !s.canSSA(n.Left) { 925 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, n.Left, s.mem()) 926 } 927 928 case OVARLIVE: 929 // Insert a varlive op to record that a variable is still live. 930 if !n.Left.Addrtaken() { 931 s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left) 932 } 933 s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem()) 934 935 case OCHECKNIL: 936 p := s.expr(n.Left) 937 s.nilCheck(p) 938 939 default: 940 s.Fatalf("unhandled stmt %v", n.Op) 941 } 942 } 943 944 // exit processes any code that needs to be generated just before returning. 945 // It returns a BlockRet block that ends the control flow. Its control value 946 // will be set to the final memory state. 947 func (s *state) exit() *ssa.Block { 948 if s.hasdefer { 949 s.rtcall(Deferreturn, true, nil) 950 } 951 952 // Run exit code. Typically, this code copies heap-allocated PPARAMOUT 953 // variables back to the stack. 954 s.stmtList(s.exitCode) 955 956 // Store SSAable PPARAMOUT variables back to stack locations. 957 for _, n := range s.returns { 958 addr := s.decladdrs[n] 959 val := s.variable(n, n.Type) 960 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) 961 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, n.Type, addr, val, s.mem()) 962 // TODO: if val is ever spilled, we'd like to use the 963 // PPARAMOUT slot for spilling it. That won't happen 964 // currently. 965 } 966 967 // Do actual return. 968 m := s.mem() 969 b := s.endBlock() 970 b.Kind = ssa.BlockRet 971 b.SetControl(m) 972 return b 973 } 974 975 type opAndType struct { 976 op Op 977 etype types.EType 978 } 979 980 var opToSSA = map[opAndType]ssa.Op{ 981 opAndType{OADD, TINT8}: ssa.OpAdd8, 982 opAndType{OADD, TUINT8}: ssa.OpAdd8, 983 opAndType{OADD, TINT16}: ssa.OpAdd16, 984 opAndType{OADD, TUINT16}: ssa.OpAdd16, 985 opAndType{OADD, TINT32}: ssa.OpAdd32, 986 opAndType{OADD, TUINT32}: ssa.OpAdd32, 987 opAndType{OADD, TPTR32}: ssa.OpAdd32, 988 opAndType{OADD, TINT64}: ssa.OpAdd64, 989 opAndType{OADD, TUINT64}: ssa.OpAdd64, 990 opAndType{OADD, TPTR64}: ssa.OpAdd64, 991 opAndType{OADD, TFLOAT32}: ssa.OpAdd32F, 992 opAndType{OADD, TFLOAT64}: ssa.OpAdd64F, 993 994 opAndType{OSUB, TINT8}: ssa.OpSub8, 995 opAndType{OSUB, TUINT8}: ssa.OpSub8, 996 opAndType{OSUB, TINT16}: ssa.OpSub16, 997 opAndType{OSUB, TUINT16}: ssa.OpSub16, 998 opAndType{OSUB, TINT32}: ssa.OpSub32, 999 opAndType{OSUB, TUINT32}: ssa.OpSub32, 1000 opAndType{OSUB, TINT64}: ssa.OpSub64, 1001 opAndType{OSUB, TUINT64}: ssa.OpSub64, 1002 opAndType{OSUB, TFLOAT32}: ssa.OpSub32F, 1003 opAndType{OSUB, TFLOAT64}: ssa.OpSub64F, 1004 1005 opAndType{ONOT, TBOOL}: ssa.OpNot, 1006 1007 opAndType{OMINUS, TINT8}: ssa.OpNeg8, 1008 opAndType{OMINUS, TUINT8}: ssa.OpNeg8, 1009 opAndType{OMINUS, TINT16}: ssa.OpNeg16, 1010 opAndType{OMINUS, TUINT16}: ssa.OpNeg16, 1011 opAndType{OMINUS, TINT32}: ssa.OpNeg32, 1012 opAndType{OMINUS, TUINT32}: ssa.OpNeg32, 1013 opAndType{OMINUS, TINT64}: ssa.OpNeg64, 1014 opAndType{OMINUS, TUINT64}: ssa.OpNeg64, 1015 opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F, 1016 opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F, 1017 1018 opAndType{OCOM, TINT8}: ssa.OpCom8, 1019 opAndType{OCOM, TUINT8}: ssa.OpCom8, 1020 opAndType{OCOM, TINT16}: ssa.OpCom16, 1021 opAndType{OCOM, TUINT16}: ssa.OpCom16, 1022 opAndType{OCOM, TINT32}: ssa.OpCom32, 1023 opAndType{OCOM, TUINT32}: ssa.OpCom32, 1024 opAndType{OCOM, TINT64}: ssa.OpCom64, 1025 opAndType{OCOM, TUINT64}: ssa.OpCom64, 1026 1027 opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag, 1028 opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag, 1029 opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal, 1030 opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal, 1031 1032 opAndType{OMUL, TINT8}: ssa.OpMul8, 1033 opAndType{OMUL, TUINT8}: ssa.OpMul8, 1034 opAndType{OMUL, TINT16}: ssa.OpMul16, 1035 opAndType{OMUL, TUINT16}: ssa.OpMul16, 1036 opAndType{OMUL, TINT32}: ssa.OpMul32, 1037 opAndType{OMUL, TUINT32}: ssa.OpMul32, 1038 opAndType{OMUL, TINT64}: ssa.OpMul64, 1039 opAndType{OMUL, TUINT64}: ssa.OpMul64, 1040 opAndType{OMUL, TFLOAT32}: ssa.OpMul32F, 1041 opAndType{OMUL, TFLOAT64}: ssa.OpMul64F, 1042 1043 opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F, 1044 opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F, 1045 1046 opAndType{ODIV, TINT8}: ssa.OpDiv8, 1047 opAndType{ODIV, TUINT8}: ssa.OpDiv8u, 1048 opAndType{ODIV, TINT16}: ssa.OpDiv16, 1049 opAndType{ODIV, TUINT16}: ssa.OpDiv16u, 1050 opAndType{ODIV, TINT32}: ssa.OpDiv32, 1051 opAndType{ODIV, TUINT32}: ssa.OpDiv32u, 1052 opAndType{ODIV, TINT64}: ssa.OpDiv64, 1053 opAndType{ODIV, TUINT64}: ssa.OpDiv64u, 1054 1055 opAndType{OMOD, TINT8}: ssa.OpMod8, 1056 opAndType{OMOD, TUINT8}: ssa.OpMod8u, 1057 opAndType{OMOD, TINT16}: ssa.OpMod16, 1058 opAndType{OMOD, TUINT16}: ssa.OpMod16u, 1059 opAndType{OMOD, TINT32}: ssa.OpMod32, 1060 opAndType{OMOD, TUINT32}: ssa.OpMod32u, 1061 opAndType{OMOD, TINT64}: ssa.OpMod64, 1062 opAndType{OMOD, TUINT64}: ssa.OpMod64u, 1063 1064 opAndType{OAND, TINT8}: ssa.OpAnd8, 1065 opAndType{OAND, TUINT8}: ssa.OpAnd8, 1066 opAndType{OAND, TINT16}: ssa.OpAnd16, 1067 opAndType{OAND, TUINT16}: ssa.OpAnd16, 1068 opAndType{OAND, TINT32}: ssa.OpAnd32, 1069 opAndType{OAND, TUINT32}: ssa.OpAnd32, 1070 opAndType{OAND, TINT64}: ssa.OpAnd64, 1071 opAndType{OAND, TUINT64}: ssa.OpAnd64, 1072 1073 opAndType{OOR, TINT8}: ssa.OpOr8, 1074 opAndType{OOR, TUINT8}: ssa.OpOr8, 1075 opAndType{OOR, TINT16}: ssa.OpOr16, 1076 opAndType{OOR, TUINT16}: ssa.OpOr16, 1077 opAndType{OOR, TINT32}: ssa.OpOr32, 1078 opAndType{OOR, TUINT32}: ssa.OpOr32, 1079 opAndType{OOR, TINT64}: ssa.OpOr64, 1080 opAndType{OOR, TUINT64}: ssa.OpOr64, 1081 1082 opAndType{OXOR, TINT8}: ssa.OpXor8, 1083 opAndType{OXOR, TUINT8}: ssa.OpXor8, 1084 opAndType{OXOR, TINT16}: ssa.OpXor16, 1085 opAndType{OXOR, TUINT16}: ssa.OpXor16, 1086 opAndType{OXOR, TINT32}: ssa.OpXor32, 1087 opAndType{OXOR, TUINT32}: ssa.OpXor32, 1088 opAndType{OXOR, TINT64}: ssa.OpXor64, 1089 opAndType{OXOR, TUINT64}: ssa.OpXor64, 1090 1091 opAndType{OEQ, TBOOL}: ssa.OpEqB, 1092 opAndType{OEQ, TINT8}: ssa.OpEq8, 1093 opAndType{OEQ, TUINT8}: ssa.OpEq8, 1094 opAndType{OEQ, TINT16}: ssa.OpEq16, 1095 opAndType{OEQ, TUINT16}: ssa.OpEq16, 1096 opAndType{OEQ, TINT32}: ssa.OpEq32, 1097 opAndType{OEQ, TUINT32}: ssa.OpEq32, 1098 opAndType{OEQ, TINT64}: ssa.OpEq64, 1099 opAndType{OEQ, TUINT64}: ssa.OpEq64, 1100 opAndType{OEQ, TINTER}: ssa.OpEqInter, 1101 opAndType{OEQ, TSLICE}: ssa.OpEqSlice, 1102 opAndType{OEQ, TFUNC}: ssa.OpEqPtr, 1103 opAndType{OEQ, TMAP}: ssa.OpEqPtr, 1104 opAndType{OEQ, TCHAN}: ssa.OpEqPtr, 1105 opAndType{OEQ, TPTR32}: ssa.OpEqPtr, 1106 opAndType{OEQ, TPTR64}: ssa.OpEqPtr, 1107 opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr, 1108 opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr, 1109 opAndType{OEQ, TFLOAT64}: ssa.OpEq64F, 1110 opAndType{OEQ, TFLOAT32}: ssa.OpEq32F, 1111 1112 opAndType{ONE, TBOOL}: ssa.OpNeqB, 1113 opAndType{ONE, TINT8}: ssa.OpNeq8, 1114 opAndType{ONE, TUINT8}: ssa.OpNeq8, 1115 opAndType{ONE, TINT16}: ssa.OpNeq16, 1116 opAndType{ONE, TUINT16}: ssa.OpNeq16, 1117 opAndType{ONE, TINT32}: ssa.OpNeq32, 1118 opAndType{ONE, TUINT32}: ssa.OpNeq32, 1119 opAndType{ONE, TINT64}: ssa.OpNeq64, 1120 opAndType{ONE, TUINT64}: ssa.OpNeq64, 1121 opAndType{ONE, TINTER}: ssa.OpNeqInter, 1122 opAndType{ONE, TSLICE}: ssa.OpNeqSlice, 1123 opAndType{ONE, TFUNC}: ssa.OpNeqPtr, 1124 opAndType{ONE, TMAP}: ssa.OpNeqPtr, 1125 opAndType{ONE, TCHAN}: ssa.OpNeqPtr, 1126 opAndType{ONE, TPTR32}: ssa.OpNeqPtr, 1127 opAndType{ONE, TPTR64}: ssa.OpNeqPtr, 1128 opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr, 1129 opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr, 1130 opAndType{ONE, TFLOAT64}: ssa.OpNeq64F, 1131 opAndType{ONE, TFLOAT32}: ssa.OpNeq32F, 1132 1133 opAndType{OLT, TINT8}: ssa.OpLess8, 1134 opAndType{OLT, TUINT8}: ssa.OpLess8U, 1135 opAndType{OLT, TINT16}: ssa.OpLess16, 1136 opAndType{OLT, TUINT16}: ssa.OpLess16U, 1137 opAndType{OLT, TINT32}: ssa.OpLess32, 1138 opAndType{OLT, TUINT32}: ssa.OpLess32U, 1139 opAndType{OLT, TINT64}: ssa.OpLess64, 1140 opAndType{OLT, TUINT64}: ssa.OpLess64U, 1141 opAndType{OLT, TFLOAT64}: ssa.OpLess64F, 1142 opAndType{OLT, TFLOAT32}: ssa.OpLess32F, 1143 1144 opAndType{OGT, TINT8}: ssa.OpGreater8, 1145 opAndType{OGT, TUINT8}: ssa.OpGreater8U, 1146 opAndType{OGT, TINT16}: ssa.OpGreater16, 1147 opAndType{OGT, TUINT16}: ssa.OpGreater16U, 1148 opAndType{OGT, TINT32}: ssa.OpGreater32, 1149 opAndType{OGT, TUINT32}: ssa.OpGreater32U, 1150 opAndType{OGT, TINT64}: ssa.OpGreater64, 1151 opAndType{OGT, TUINT64}: ssa.OpGreater64U, 1152 opAndType{OGT, TFLOAT64}: ssa.OpGreater64F, 1153 opAndType{OGT, TFLOAT32}: ssa.OpGreater32F, 1154 1155 opAndType{OLE, TINT8}: ssa.OpLeq8, 1156 opAndType{OLE, TUINT8}: ssa.OpLeq8U, 1157 opAndType{OLE, TINT16}: ssa.OpLeq16, 1158 opAndType{OLE, TUINT16}: ssa.OpLeq16U, 1159 opAndType{OLE, TINT32}: ssa.OpLeq32, 1160 opAndType{OLE, TUINT32}: ssa.OpLeq32U, 1161 opAndType{OLE, TINT64}: ssa.OpLeq64, 1162 opAndType{OLE, TUINT64}: ssa.OpLeq64U, 1163 opAndType{OLE, TFLOAT64}: ssa.OpLeq64F, 1164 opAndType{OLE, TFLOAT32}: ssa.OpLeq32F, 1165 1166 opAndType{OGE, TINT8}: ssa.OpGeq8, 1167 opAndType{OGE, TUINT8}: ssa.OpGeq8U, 1168 opAndType{OGE, TINT16}: ssa.OpGeq16, 1169 opAndType{OGE, TUINT16}: ssa.OpGeq16U, 1170 opAndType{OGE, TINT32}: ssa.OpGeq32, 1171 opAndType{OGE, TUINT32}: ssa.OpGeq32U, 1172 opAndType{OGE, TINT64}: ssa.OpGeq64, 1173 opAndType{OGE, TUINT64}: ssa.OpGeq64U, 1174 opAndType{OGE, TFLOAT64}: ssa.OpGeq64F, 1175 opAndType{OGE, TFLOAT32}: ssa.OpGeq32F, 1176 } 1177 1178 func (s *state) concreteEtype(t *types.Type) types.EType { 1179 e := t.Etype 1180 switch e { 1181 default: 1182 return e 1183 case TINT: 1184 if s.config.PtrSize == 8 { 1185 return TINT64 1186 } 1187 return TINT32 1188 case TUINT: 1189 if s.config.PtrSize == 8 { 1190 return TUINT64 1191 } 1192 return TUINT32 1193 case TUINTPTR: 1194 if s.config.PtrSize == 8 { 1195 return TUINT64 1196 } 1197 return TUINT32 1198 } 1199 } 1200 1201 func (s *state) ssaOp(op Op, t *types.Type) ssa.Op { 1202 etype := s.concreteEtype(t) 1203 x, ok := opToSSA[opAndType{op, etype}] 1204 if !ok { 1205 s.Fatalf("unhandled binary op %v %s", op, etype) 1206 } 1207 return x 1208 } 1209 1210 func floatForComplex(t *types.Type) *types.Type { 1211 if t.Size() == 8 { 1212 return types.Types[TFLOAT32] 1213 } else { 1214 return types.Types[TFLOAT64] 1215 } 1216 } 1217 1218 type opAndTwoTypes struct { 1219 op Op 1220 etype1 types.EType 1221 etype2 types.EType 1222 } 1223 1224 type twoTypes struct { 1225 etype1 types.EType 1226 etype2 types.EType 1227 } 1228 1229 type twoOpsAndType struct { 1230 op1 ssa.Op 1231 op2 ssa.Op 1232 intermediateType types.EType 1233 } 1234 1235 var fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1236 1237 twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32}, 1238 twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32}, 1239 twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32}, 1240 twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64}, 1241 1242 twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32}, 1243 twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32}, 1244 twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32}, 1245 twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64}, 1246 1247 twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1248 twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1249 twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32}, 1250 twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64}, 1251 1252 twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1253 twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1254 twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32}, 1255 twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64}, 1256 // unsigned 1257 twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32}, 1258 twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32}, 1259 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned 1260 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead 1261 1262 twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32}, 1263 twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32}, 1264 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned 1265 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead 1266 1267 twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32}, 1268 twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32}, 1269 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1270 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead 1271 1272 twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32}, 1273 twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32}, 1274 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned 1275 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead 1276 1277 // float 1278 twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32}, 1279 twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, TFLOAT64}, 1280 twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, TFLOAT32}, 1281 twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64}, 1282 } 1283 1284 // this map is used only for 32-bit arch, and only includes the difference 1285 // on 32-bit arch, don't use int64<->float conversion for uint32 1286 var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{ 1287 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32}, 1288 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32}, 1289 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32}, 1290 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32}, 1291 } 1292 1293 // uint64<->float conversions, only on machines that have intructions for that 1294 var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{ 1295 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64}, 1296 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64}, 1297 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64}, 1298 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64}, 1299 } 1300 1301 var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{ 1302 opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8, 1303 opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8, 1304 opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16, 1305 opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16, 1306 opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32, 1307 opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32, 1308 opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64, 1309 opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64, 1310 1311 opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8, 1312 opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8, 1313 opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16, 1314 opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16, 1315 opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32, 1316 opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32, 1317 opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64, 1318 opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64, 1319 1320 opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8, 1321 opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8, 1322 opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16, 1323 opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16, 1324 opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32, 1325 opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32, 1326 opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64, 1327 opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64, 1328 1329 opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8, 1330 opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8, 1331 opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16, 1332 opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16, 1333 opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32, 1334 opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32, 1335 opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64, 1336 opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64, 1337 1338 opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8, 1339 opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8, 1340 opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16, 1341 opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16, 1342 opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32, 1343 opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32, 1344 opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64, 1345 opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64, 1346 1347 opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8, 1348 opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8, 1349 opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16, 1350 opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16, 1351 opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32, 1352 opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32, 1353 opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64, 1354 opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64, 1355 1356 opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8, 1357 opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8, 1358 opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16, 1359 opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16, 1360 opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32, 1361 opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32, 1362 opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64, 1363 opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64, 1364 1365 opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8, 1366 opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8, 1367 opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16, 1368 opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16, 1369 opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32, 1370 opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32, 1371 opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64, 1372 opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64, 1373 } 1374 1375 func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op { 1376 etype1 := s.concreteEtype(t) 1377 etype2 := s.concreteEtype(u) 1378 x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}] 1379 if !ok { 1380 s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2) 1381 } 1382 return x 1383 } 1384 1385 // expr converts the expression n to ssa, adds it to s and returns the ssa result. 1386 func (s *state) expr(n *Node) *ssa.Value { 1387 if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) { 1388 // ONAMEs and named OLITERALs have the line number 1389 // of the decl, not the use. See issue 14742. 1390 s.pushLine(n.Pos) 1391 defer s.popLine() 1392 } 1393 1394 s.stmtList(n.Ninit) 1395 switch n.Op { 1396 case OARRAYBYTESTRTMP: 1397 slice := s.expr(n.Left) 1398 ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice) 1399 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) 1400 return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) 1401 case OSTRARRAYBYTETMP: 1402 str := s.expr(n.Left) 1403 ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str) 1404 len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str) 1405 return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len) 1406 case OCFUNC: 1407 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: n.Left.Sym.Linksym()}) 1408 return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb) 1409 case ONAME: 1410 if n.Class() == PFUNC { 1411 // "value" of a function is the address of the function's closure 1412 sym := funcsym(n.Sym).Linksym() 1413 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: sym}) 1414 return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), aux, s.sb) 1415 } 1416 if s.canSSA(n) { 1417 return s.variable(n, n.Type) 1418 } 1419 addr := s.addr(n, false) 1420 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1421 case OCLOSUREVAR: 1422 addr := s.addr(n, false) 1423 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1424 case OLITERAL: 1425 switch u := n.Val().U.(type) { 1426 case *Mpint: 1427 i := u.Int64() 1428 switch n.Type.Size() { 1429 case 1: 1430 return s.constInt8(n.Type, int8(i)) 1431 case 2: 1432 return s.constInt16(n.Type, int16(i)) 1433 case 4: 1434 return s.constInt32(n.Type, int32(i)) 1435 case 8: 1436 return s.constInt64(n.Type, i) 1437 default: 1438 s.Fatalf("bad integer size %d", n.Type.Size()) 1439 return nil 1440 } 1441 case string: 1442 if u == "" { 1443 return s.constEmptyString(n.Type) 1444 } 1445 return s.entryNewValue0A(ssa.OpConstString, n.Type, u) 1446 case bool: 1447 return s.constBool(u) 1448 case *NilVal: 1449 t := n.Type 1450 switch { 1451 case t.IsSlice(): 1452 return s.constSlice(t) 1453 case t.IsInterface(): 1454 return s.constInterface(t) 1455 default: 1456 return s.constNil(t) 1457 } 1458 case *Mpflt: 1459 switch n.Type.Size() { 1460 case 4: 1461 return s.constFloat32(n.Type, u.Float32()) 1462 case 8: 1463 return s.constFloat64(n.Type, u.Float64()) 1464 default: 1465 s.Fatalf("bad float size %d", n.Type.Size()) 1466 return nil 1467 } 1468 case *Mpcplx: 1469 r := &u.Real 1470 i := &u.Imag 1471 switch n.Type.Size() { 1472 case 8: 1473 pt := types.Types[TFLOAT32] 1474 return s.newValue2(ssa.OpComplexMake, n.Type, 1475 s.constFloat32(pt, r.Float32()), 1476 s.constFloat32(pt, i.Float32())) 1477 case 16: 1478 pt := types.Types[TFLOAT64] 1479 return s.newValue2(ssa.OpComplexMake, n.Type, 1480 s.constFloat64(pt, r.Float64()), 1481 s.constFloat64(pt, i.Float64())) 1482 default: 1483 s.Fatalf("bad float size %d", n.Type.Size()) 1484 return nil 1485 } 1486 1487 default: 1488 s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype()) 1489 return nil 1490 } 1491 case OCONVNOP: 1492 to := n.Type 1493 from := n.Left.Type 1494 1495 // Assume everything will work out, so set up our return value. 1496 // Anything interesting that happens from here is a fatal. 1497 x := s.expr(n.Left) 1498 1499 // Special case for not confusing GC and liveness. 1500 // We don't want pointers accidentally classified 1501 // as not-pointers or vice-versa because of copy 1502 // elision. 1503 if to.IsPtrShaped() != from.IsPtrShaped() { 1504 return s.newValue2(ssa.OpConvert, to, x, s.mem()) 1505 } 1506 1507 v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type 1508 1509 // CONVNOP closure 1510 if to.Etype == TFUNC && from.IsPtrShaped() { 1511 return v 1512 } 1513 1514 // named <--> unnamed type or typed <--> untyped const 1515 if from.Etype == to.Etype { 1516 return v 1517 } 1518 1519 // unsafe.Pointer <--> *T 1520 if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() { 1521 return v 1522 } 1523 1524 dowidth(from) 1525 dowidth(to) 1526 if from.Width != to.Width { 1527 s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width) 1528 return nil 1529 } 1530 if etypesign(from.Etype) != etypesign(to.Etype) { 1531 s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype) 1532 return nil 1533 } 1534 1535 if instrumenting { 1536 // These appear to be fine, but they fail the 1537 // integer constraint below, so okay them here. 1538 // Sample non-integer conversion: map[string]string -> *uint8 1539 return v 1540 } 1541 1542 if etypesign(from.Etype) == 0 { 1543 s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to) 1544 return nil 1545 } 1546 1547 // integer, same width, same sign 1548 return v 1549 1550 case OCONV: 1551 x := s.expr(n.Left) 1552 ft := n.Left.Type // from type 1553 tt := n.Type // to type 1554 if ft.IsBoolean() && tt.IsKind(TUINT8) { 1555 // Bool -> uint8 is generated internally when indexing into runtime.staticbyte. 1556 return s.newValue1(ssa.OpCopy, n.Type, x) 1557 } 1558 if ft.IsInteger() && tt.IsInteger() { 1559 var op ssa.Op 1560 if tt.Size() == ft.Size() { 1561 op = ssa.OpCopy 1562 } else if tt.Size() < ft.Size() { 1563 // truncation 1564 switch 10*ft.Size() + tt.Size() { 1565 case 21: 1566 op = ssa.OpTrunc16to8 1567 case 41: 1568 op = ssa.OpTrunc32to8 1569 case 42: 1570 op = ssa.OpTrunc32to16 1571 case 81: 1572 op = ssa.OpTrunc64to8 1573 case 82: 1574 op = ssa.OpTrunc64to16 1575 case 84: 1576 op = ssa.OpTrunc64to32 1577 default: 1578 s.Fatalf("weird integer truncation %v -> %v", ft, tt) 1579 } 1580 } else if ft.IsSigned() { 1581 // sign extension 1582 switch 10*ft.Size() + tt.Size() { 1583 case 12: 1584 op = ssa.OpSignExt8to16 1585 case 14: 1586 op = ssa.OpSignExt8to32 1587 case 18: 1588 op = ssa.OpSignExt8to64 1589 case 24: 1590 op = ssa.OpSignExt16to32 1591 case 28: 1592 op = ssa.OpSignExt16to64 1593 case 48: 1594 op = ssa.OpSignExt32to64 1595 default: 1596 s.Fatalf("bad integer sign extension %v -> %v", ft, tt) 1597 } 1598 } else { 1599 // zero extension 1600 switch 10*ft.Size() + tt.Size() { 1601 case 12: 1602 op = ssa.OpZeroExt8to16 1603 case 14: 1604 op = ssa.OpZeroExt8to32 1605 case 18: 1606 op = ssa.OpZeroExt8to64 1607 case 24: 1608 op = ssa.OpZeroExt16to32 1609 case 28: 1610 op = ssa.OpZeroExt16to64 1611 case 48: 1612 op = ssa.OpZeroExt32to64 1613 default: 1614 s.Fatalf("weird integer sign extension %v -> %v", ft, tt) 1615 } 1616 } 1617 return s.newValue1(op, n.Type, x) 1618 } 1619 1620 if ft.IsFloat() || tt.IsFloat() { 1621 conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}] 1622 if s.config.RegSize == 4 && thearch.LinkArch.Family != sys.MIPS { 1623 if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { 1624 conv = conv1 1625 } 1626 } 1627 if thearch.LinkArch.Family == sys.ARM64 { 1628 if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { 1629 conv = conv1 1630 } 1631 } 1632 1633 if thearch.LinkArch.Family == sys.MIPS { 1634 if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() { 1635 // tt is float32 or float64, and ft is also unsigned 1636 if tt.Size() == 4 { 1637 return s.uint32Tofloat32(n, x, ft, tt) 1638 } 1639 if tt.Size() == 8 { 1640 return s.uint32Tofloat64(n, x, ft, tt) 1641 } 1642 } else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() { 1643 // ft is float32 or float64, and tt is unsigned integer 1644 if ft.Size() == 4 { 1645 return s.float32ToUint32(n, x, ft, tt) 1646 } 1647 if ft.Size() == 8 { 1648 return s.float64ToUint32(n, x, ft, tt) 1649 } 1650 } 1651 } 1652 1653 if !ok { 1654 s.Fatalf("weird float conversion %v -> %v", ft, tt) 1655 } 1656 op1, op2, it := conv.op1, conv.op2, conv.intermediateType 1657 1658 if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid { 1659 // normal case, not tripping over unsigned 64 1660 if op1 == ssa.OpCopy { 1661 if op2 == ssa.OpCopy { 1662 return x 1663 } 1664 return s.newValue1(op2, n.Type, x) 1665 } 1666 if op2 == ssa.OpCopy { 1667 return s.newValue1(op1, n.Type, x) 1668 } 1669 return s.newValue1(op2, n.Type, s.newValue1(op1, types.Types[it], x)) 1670 } 1671 // Tricky 64-bit unsigned cases. 1672 if ft.IsInteger() { 1673 // tt is float32 or float64, and ft is also unsigned 1674 if tt.Size() == 4 { 1675 return s.uint64Tofloat32(n, x, ft, tt) 1676 } 1677 if tt.Size() == 8 { 1678 return s.uint64Tofloat64(n, x, ft, tt) 1679 } 1680 s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt) 1681 } 1682 // ft is float32 or float64, and tt is unsigned integer 1683 if ft.Size() == 4 { 1684 return s.float32ToUint64(n, x, ft, tt) 1685 } 1686 if ft.Size() == 8 { 1687 return s.float64ToUint64(n, x, ft, tt) 1688 } 1689 s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt) 1690 return nil 1691 } 1692 1693 if ft.IsComplex() && tt.IsComplex() { 1694 var op ssa.Op 1695 if ft.Size() == tt.Size() { 1696 switch ft.Size() { 1697 case 8: 1698 op = ssa.OpRound32F 1699 case 16: 1700 op = ssa.OpRound64F 1701 default: 1702 s.Fatalf("weird complex conversion %v -> %v", ft, tt) 1703 } 1704 } else if ft.Size() == 8 && tt.Size() == 16 { 1705 op = ssa.OpCvt32Fto64F 1706 } else if ft.Size() == 16 && tt.Size() == 8 { 1707 op = ssa.OpCvt64Fto32F 1708 } else { 1709 s.Fatalf("weird complex conversion %v -> %v", ft, tt) 1710 } 1711 ftp := floatForComplex(ft) 1712 ttp := floatForComplex(tt) 1713 return s.newValue2(ssa.OpComplexMake, tt, 1714 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)), 1715 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x))) 1716 } 1717 1718 s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype) 1719 return nil 1720 1721 case ODOTTYPE: 1722 res, _ := s.dottype(n, false) 1723 return res 1724 1725 // binary ops 1726 case OLT, OEQ, ONE, OLE, OGE, OGT: 1727 a := s.expr(n.Left) 1728 b := s.expr(n.Right) 1729 if n.Left.Type.IsComplex() { 1730 pt := floatForComplex(n.Left.Type) 1731 op := s.ssaOp(OEQ, pt) 1732 r := s.newValue2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)) 1733 i := s.newValue2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)) 1734 c := s.newValue2(ssa.OpAndB, types.Types[TBOOL], r, i) 1735 switch n.Op { 1736 case OEQ: 1737 return c 1738 case ONE: 1739 return s.newValue1(ssa.OpNot, types.Types[TBOOL], c) 1740 default: 1741 s.Fatalf("ordered complex compare %v", n.Op) 1742 } 1743 } 1744 return s.newValue2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b) 1745 case OMUL: 1746 a := s.expr(n.Left) 1747 b := s.expr(n.Right) 1748 if n.Type.IsComplex() { 1749 mulop := ssa.OpMul64F 1750 addop := ssa.OpAdd64F 1751 subop := ssa.OpSub64F 1752 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1753 wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 1754 1755 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1756 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1757 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1758 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1759 1760 if pt != wt { // Widen for calculation 1761 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal) 1762 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal) 1763 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag) 1764 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag) 1765 } 1766 1767 xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag)) 1768 ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal)) 1769 1770 if pt != wt { // Narrow to store back 1771 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal) 1772 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag) 1773 } 1774 1775 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1776 } 1777 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1778 1779 case ODIV: 1780 a := s.expr(n.Left) 1781 b := s.expr(n.Right) 1782 if n.Type.IsComplex() { 1783 // TODO this is not executed because the front-end substitutes a runtime call. 1784 // That probably ought to change; with modest optimization the widen/narrow 1785 // conversions could all be elided in larger expression trees. 1786 mulop := ssa.OpMul64F 1787 addop := ssa.OpAdd64F 1788 subop := ssa.OpSub64F 1789 divop := ssa.OpDiv64F 1790 pt := floatForComplex(n.Type) // Could be Float32 or Float64 1791 wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancelation error 1792 1793 areal := s.newValue1(ssa.OpComplexReal, pt, a) 1794 breal := s.newValue1(ssa.OpComplexReal, pt, b) 1795 aimag := s.newValue1(ssa.OpComplexImag, pt, a) 1796 bimag := s.newValue1(ssa.OpComplexImag, pt, b) 1797 1798 if pt != wt { // Widen for calculation 1799 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal) 1800 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal) 1801 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag) 1802 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag) 1803 } 1804 1805 denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag)) 1806 xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag)) 1807 ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag)) 1808 1809 // TODO not sure if this is best done in wide precision or narrow 1810 // Double-rounding might be an issue. 1811 // Note that the pre-SSA implementation does the entire calculation 1812 // in wide format, so wide is compatible. 1813 xreal = s.newValue2(divop, wt, xreal, denom) 1814 ximag = s.newValue2(divop, wt, ximag, denom) 1815 1816 if pt != wt { // Narrow to store back 1817 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal) 1818 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag) 1819 } 1820 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) 1821 } 1822 if n.Type.IsFloat() { 1823 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1824 } 1825 return s.intDivide(n, a, b) 1826 case OMOD: 1827 a := s.expr(n.Left) 1828 b := s.expr(n.Right) 1829 return s.intDivide(n, a, b) 1830 case OADD, OSUB: 1831 a := s.expr(n.Left) 1832 b := s.expr(n.Right) 1833 if n.Type.IsComplex() { 1834 pt := floatForComplex(n.Type) 1835 op := s.ssaOp(n.Op, pt) 1836 return s.newValue2(ssa.OpComplexMake, n.Type, 1837 s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)), 1838 s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))) 1839 } 1840 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1841 case OAND, OOR, OXOR: 1842 a := s.expr(n.Left) 1843 b := s.expr(n.Right) 1844 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 1845 case OLSH, ORSH: 1846 a := s.expr(n.Left) 1847 b := s.expr(n.Right) 1848 return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b) 1849 case OANDAND, OOROR: 1850 // To implement OANDAND (and OOROR), we introduce a 1851 // new temporary variable to hold the result. The 1852 // variable is associated with the OANDAND node in the 1853 // s.vars table (normally variables are only 1854 // associated with ONAME nodes). We convert 1855 // A && B 1856 // to 1857 // var = A 1858 // if var { 1859 // var = B 1860 // } 1861 // Using var in the subsequent block introduces the 1862 // necessary phi variable. 1863 el := s.expr(n.Left) 1864 s.vars[n] = el 1865 1866 b := s.endBlock() 1867 b.Kind = ssa.BlockIf 1868 b.SetControl(el) 1869 // In theory, we should set b.Likely here based on context. 1870 // However, gc only gives us likeliness hints 1871 // in a single place, for plain OIF statements, 1872 // and passing around context is finnicky, so don't bother for now. 1873 1874 bRight := s.f.NewBlock(ssa.BlockPlain) 1875 bResult := s.f.NewBlock(ssa.BlockPlain) 1876 if n.Op == OANDAND { 1877 b.AddEdgeTo(bRight) 1878 b.AddEdgeTo(bResult) 1879 } else if n.Op == OOROR { 1880 b.AddEdgeTo(bResult) 1881 b.AddEdgeTo(bRight) 1882 } 1883 1884 s.startBlock(bRight) 1885 er := s.expr(n.Right) 1886 s.vars[n] = er 1887 1888 b = s.endBlock() 1889 b.AddEdgeTo(bResult) 1890 1891 s.startBlock(bResult) 1892 return s.variable(n, types.Types[TBOOL]) 1893 case OCOMPLEX: 1894 r := s.expr(n.Left) 1895 i := s.expr(n.Right) 1896 return s.newValue2(ssa.OpComplexMake, n.Type, r, i) 1897 1898 // unary ops 1899 case OMINUS: 1900 a := s.expr(n.Left) 1901 if n.Type.IsComplex() { 1902 tp := floatForComplex(n.Type) 1903 negop := s.ssaOp(n.Op, tp) 1904 return s.newValue2(ssa.OpComplexMake, n.Type, 1905 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)), 1906 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a))) 1907 } 1908 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 1909 case ONOT, OCOM: 1910 a := s.expr(n.Left) 1911 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a) 1912 case OIMAG, OREAL: 1913 a := s.expr(n.Left) 1914 return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a) 1915 case OPLUS: 1916 return s.expr(n.Left) 1917 1918 case OADDR: 1919 return s.addr(n.Left, n.Bounded()) 1920 1921 case OINDREGSP: 1922 addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset) 1923 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 1924 1925 case OIND: 1926 p := s.exprPtr(n.Left, false, n.Pos) 1927 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1928 1929 case ODOT: 1930 t := n.Left.Type 1931 if canSSAType(t) { 1932 v := s.expr(n.Left) 1933 return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v) 1934 } 1935 if n.Left.Op == OSTRUCTLIT { 1936 // All literals with nonzero fields have already been 1937 // rewritten during walk. Any that remain are just T{} 1938 // or equivalents. Use the zero value. 1939 if !iszero(n.Left) { 1940 Fatalf("literal with nonzero value in SSA: %v", n.Left) 1941 } 1942 return s.zeroVal(n.Type) 1943 } 1944 p := s.addr(n, false) 1945 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1946 1947 case ODOTPTR: 1948 p := s.exprPtr(n.Left, false, n.Pos) 1949 p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type), n.Xoffset, p) 1950 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 1951 1952 case OINDEX: 1953 switch { 1954 case n.Left.Type.IsString(): 1955 if n.Bounded() && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) { 1956 // Replace "abc"[1] with 'b'. 1957 // Delayed until now because "abc"[1] is not an ideal constant. 1958 // See test/fixedbugs/issue11370.go. 1959 return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(n.Left.Val().U.(string)[n.Right.Int64()]))) 1960 } 1961 a := s.expr(n.Left) 1962 i := s.expr(n.Right) 1963 i = s.extendIndex(i, panicindex) 1964 if !n.Bounded() { 1965 len := s.newValue1(ssa.OpStringLen, types.Types[TINT], a) 1966 s.boundsCheck(i, len) 1967 } 1968 ptrtyp := s.f.Config.Types.BytePtr 1969 ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a) 1970 if Isconst(n.Right, CTINT) { 1971 ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr) 1972 } else { 1973 ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i) 1974 } 1975 return s.newValue2(ssa.OpLoad, types.Types[TUINT8], ptr, s.mem()) 1976 case n.Left.Type.IsSlice(): 1977 p := s.addr(n, false) 1978 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 1979 case n.Left.Type.IsArray(): 1980 if bound := n.Left.Type.NumElem(); bound <= 1 { 1981 // SSA can handle arrays of length at most 1. 1982 a := s.expr(n.Left) 1983 i := s.expr(n.Right) 1984 if bound == 0 { 1985 // Bounds check will never succeed. Might as well 1986 // use constants for the bounds check. 1987 z := s.constInt(types.Types[TINT], 0) 1988 s.boundsCheck(z, z) 1989 // The return value won't be live, return junk. 1990 return s.newValue0(ssa.OpUnknown, n.Type) 1991 } 1992 i = s.extendIndex(i, panicindex) 1993 if !n.Bounded() { 1994 s.boundsCheck(i, s.constInt(types.Types[TINT], bound)) 1995 } 1996 return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a) 1997 } 1998 p := s.addr(n, false) 1999 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem()) 2000 default: 2001 s.Fatalf("bad type for index %v", n.Left.Type) 2002 return nil 2003 } 2004 2005 case OLEN, OCAP: 2006 switch { 2007 case n.Left.Type.IsSlice(): 2008 op := ssa.OpSliceLen 2009 if n.Op == OCAP { 2010 op = ssa.OpSliceCap 2011 } 2012 return s.newValue1(op, types.Types[TINT], s.expr(n.Left)) 2013 case n.Left.Type.IsString(): // string; not reachable for OCAP 2014 return s.newValue1(ssa.OpStringLen, types.Types[TINT], s.expr(n.Left)) 2015 case n.Left.Type.IsMap(), n.Left.Type.IsChan(): 2016 return s.referenceTypeBuiltin(n, s.expr(n.Left)) 2017 default: // array 2018 return s.constInt(types.Types[TINT], n.Left.Type.NumElem()) 2019 } 2020 2021 case OSPTR: 2022 a := s.expr(n.Left) 2023 if n.Left.Type.IsSlice() { 2024 return s.newValue1(ssa.OpSlicePtr, n.Type, a) 2025 } else { 2026 return s.newValue1(ssa.OpStringPtr, n.Type, a) 2027 } 2028 2029 case OITAB: 2030 a := s.expr(n.Left) 2031 return s.newValue1(ssa.OpITab, n.Type, a) 2032 2033 case OIDATA: 2034 a := s.expr(n.Left) 2035 return s.newValue1(ssa.OpIData, n.Type, a) 2036 2037 case OEFACE: 2038 tab := s.expr(n.Left) 2039 data := s.expr(n.Right) 2040 return s.newValue2(ssa.OpIMake, n.Type, tab, data) 2041 2042 case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR: 2043 v := s.expr(n.Left) 2044 var i, j, k *ssa.Value 2045 low, high, max := n.SliceBounds() 2046 if low != nil { 2047 i = s.extendIndex(s.expr(low), panicslice) 2048 } 2049 if high != nil { 2050 j = s.extendIndex(s.expr(high), panicslice) 2051 } 2052 if max != nil { 2053 k = s.extendIndex(s.expr(max), panicslice) 2054 } 2055 p, l, c := s.slice(n.Left.Type, v, i, j, k) 2056 return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c) 2057 2058 case OSLICESTR: 2059 v := s.expr(n.Left) 2060 var i, j *ssa.Value 2061 low, high, _ := n.SliceBounds() 2062 if low != nil { 2063 i = s.extendIndex(s.expr(low), panicslice) 2064 } 2065 if high != nil { 2066 j = s.extendIndex(s.expr(high), panicslice) 2067 } 2068 p, l, _ := s.slice(n.Left.Type, v, i, j, nil) 2069 return s.newValue2(ssa.OpStringMake, n.Type, p, l) 2070 2071 case OCALLFUNC: 2072 if isIntrinsicCall(n) { 2073 return s.intrinsicCall(n) 2074 } 2075 fallthrough 2076 2077 case OCALLINTER, OCALLMETH: 2078 a := s.call(n, callNormal) 2079 return s.newValue2(ssa.OpLoad, n.Type, a, s.mem()) 2080 2081 case OGETG: 2082 return s.newValue1(ssa.OpGetG, n.Type, s.mem()) 2083 2084 case OAPPEND: 2085 return s.append(n, false) 2086 2087 case OSTRUCTLIT, OARRAYLIT: 2088 // All literals with nonzero fields have already been 2089 // rewritten during walk. Any that remain are just T{} 2090 // or equivalents. Use the zero value. 2091 if !iszero(n) { 2092 Fatalf("literal with nonzero value in SSA: %v", n) 2093 } 2094 return s.zeroVal(n.Type) 2095 2096 default: 2097 s.Fatalf("unhandled expr %v", n.Op) 2098 return nil 2099 } 2100 } 2101 2102 // append converts an OAPPEND node to SSA. 2103 // If inplace is false, it converts the OAPPEND expression n to an ssa.Value, 2104 // adds it to s, and returns the Value. 2105 // If inplace is true, it writes the result of the OAPPEND expression n 2106 // back to the slice being appended to, and returns nil. 2107 // inplace MUST be set to false if the slice can be SSA'd. 2108 func (s *state) append(n *Node, inplace bool) *ssa.Value { 2109 // If inplace is false, process as expression "append(s, e1, e2, e3)": 2110 // 2111 // ptr, len, cap := s 2112 // newlen := len + 3 2113 // if newlen > cap { 2114 // ptr, len, cap = growslice(s, newlen) 2115 // newlen = len + 3 // recalculate to avoid a spill 2116 // } 2117 // // with write barriers, if needed: 2118 // *(ptr+len) = e1 2119 // *(ptr+len+1) = e2 2120 // *(ptr+len+2) = e3 2121 // return makeslice(ptr, newlen, cap) 2122 // 2123 // 2124 // If inplace is true, process as statement "s = append(s, e1, e2, e3)": 2125 // 2126 // a := &s 2127 // ptr, len, cap := s 2128 // newlen := len + 3 2129 // if newlen > cap { 2130 // newptr, len, newcap = growslice(ptr, len, cap, newlen) 2131 // vardef(a) // if necessary, advise liveness we are writing a new a 2132 // *a.cap = newcap // write before ptr to avoid a spill 2133 // *a.ptr = newptr // with write barrier 2134 // } 2135 // newlen = len + 3 // recalculate to avoid a spill 2136 // *a.len = newlen 2137 // // with write barriers, if needed: 2138 // *(ptr+len) = e1 2139 // *(ptr+len+1) = e2 2140 // *(ptr+len+2) = e3 2141 2142 et := n.Type.Elem() 2143 pt := types.NewPtr(et) 2144 2145 // Evaluate slice 2146 sn := n.List.First() // the slice node is the first in the list 2147 2148 var slice, addr *ssa.Value 2149 if inplace { 2150 addr = s.addr(sn, false) 2151 slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 2152 } else { 2153 slice = s.expr(sn) 2154 } 2155 2156 // Allocate new blocks 2157 grow := s.f.NewBlock(ssa.BlockPlain) 2158 assign := s.f.NewBlock(ssa.BlockPlain) 2159 2160 // Decide if we need to grow 2161 nargs := int64(n.List.Len() - 1) 2162 p := s.newValue1(ssa.OpSlicePtr, pt, slice) 2163 l := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) 2164 c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice) 2165 nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs)) 2166 2167 cmp := s.newValue2(s.ssaOp(OGT, types.Types[TINT]), types.Types[TBOOL], nl, c) 2168 s.vars[&ptrVar] = p 2169 2170 if !inplace { 2171 s.vars[&newlenVar] = nl 2172 s.vars[&capVar] = c 2173 } else { 2174 s.vars[&lenVar] = l 2175 } 2176 2177 b := s.endBlock() 2178 b.Kind = ssa.BlockIf 2179 b.Likely = ssa.BranchUnlikely 2180 b.SetControl(cmp) 2181 b.AddEdgeTo(grow) 2182 b.AddEdgeTo(assign) 2183 2184 // Call growslice 2185 s.startBlock(grow) 2186 taddr := s.expr(n.Left) 2187 r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[TINT], types.Types[TINT]}, taddr, p, l, c, nl) 2188 2189 if inplace { 2190 if sn.Op == ONAME { 2191 // Tell liveness we're about to build a new slice 2192 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem()) 2193 } 2194 capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_cap), addr) 2195 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], capaddr, r[2], s.mem()) 2196 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, pt, addr, r[0], s.mem()) 2197 // load the value we just stored to avoid having to spill it 2198 s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem()) 2199 s.vars[&lenVar] = r[1] // avoid a spill in the fast path 2200 } else { 2201 s.vars[&ptrVar] = r[0] 2202 s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs)) 2203 s.vars[&capVar] = r[2] 2204 } 2205 2206 b = s.endBlock() 2207 b.AddEdgeTo(assign) 2208 2209 // assign new elements to slots 2210 s.startBlock(assign) 2211 2212 if inplace { 2213 l = s.variable(&lenVar, types.Types[TINT]) // generates phi for len 2214 nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs)) 2215 lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_nel), addr) 2216 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenaddr, nl, s.mem()) 2217 } 2218 2219 // Evaluate args 2220 type argRec struct { 2221 // if store is true, we're appending the value v. If false, we're appending the 2222 // value at *v. 2223 v *ssa.Value 2224 store bool 2225 } 2226 args := make([]argRec, 0, nargs) 2227 for _, n := range n.List.Slice()[1:] { 2228 if canSSAType(n.Type) { 2229 args = append(args, argRec{v: s.expr(n), store: true}) 2230 } else { 2231 v := s.addr(n, false) 2232 args = append(args, argRec{v: v}) 2233 } 2234 } 2235 2236 p = s.variable(&ptrVar, pt) // generates phi for ptr 2237 if !inplace { 2238 nl = s.variable(&newlenVar, types.Types[TINT]) // generates phi for nl 2239 c = s.variable(&capVar, types.Types[TINT]) // generates phi for cap 2240 } 2241 p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l) 2242 for i, arg := range args { 2243 addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[TINT], int64(i))) 2244 if arg.store { 2245 s.storeType(et, addr, arg.v, 0) 2246 } else { 2247 store := s.newValue3I(ssa.OpMove, types.TypeMem, et.Size(), addr, arg.v, s.mem()) 2248 store.Aux = et 2249 s.vars[&memVar] = store 2250 } 2251 } 2252 2253 delete(s.vars, &ptrVar) 2254 if inplace { 2255 delete(s.vars, &lenVar) 2256 return nil 2257 } 2258 delete(s.vars, &newlenVar) 2259 delete(s.vars, &capVar) 2260 // make result 2261 return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c) 2262 } 2263 2264 // condBranch evaluates the boolean expression cond and branches to yes 2265 // if cond is true and no if cond is false. 2266 // This function is intended to handle && and || better than just calling 2267 // s.expr(cond) and branching on the result. 2268 func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) { 2269 if cond.Op == OANDAND { 2270 mid := s.f.NewBlock(ssa.BlockPlain) 2271 s.stmtList(cond.Ninit) 2272 s.condBranch(cond.Left, mid, no, max8(likely, 0)) 2273 s.startBlock(mid) 2274 s.condBranch(cond.Right, yes, no, likely) 2275 return 2276 // Note: if likely==1, then both recursive calls pass 1. 2277 // If likely==-1, then we don't have enough information to decide 2278 // whether the first branch is likely or not. So we pass 0 for 2279 // the likeliness of the first branch. 2280 // TODO: have the frontend give us branch prediction hints for 2281 // OANDAND and OOROR nodes (if it ever has such info). 2282 } 2283 if cond.Op == OOROR { 2284 mid := s.f.NewBlock(ssa.BlockPlain) 2285 s.stmtList(cond.Ninit) 2286 s.condBranch(cond.Left, yes, mid, min8(likely, 0)) 2287 s.startBlock(mid) 2288 s.condBranch(cond.Right, yes, no, likely) 2289 return 2290 // Note: if likely==-1, then both recursive calls pass -1. 2291 // If likely==1, then we don't have enough info to decide 2292 // the likelihood of the first branch. 2293 } 2294 if cond.Op == ONOT { 2295 s.stmtList(cond.Ninit) 2296 s.condBranch(cond.Left, no, yes, -likely) 2297 return 2298 } 2299 c := s.expr(cond) 2300 b := s.endBlock() 2301 b.Kind = ssa.BlockIf 2302 b.SetControl(c) 2303 b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness 2304 b.AddEdgeTo(yes) 2305 b.AddEdgeTo(no) 2306 } 2307 2308 type skipMask uint8 2309 2310 const ( 2311 skipPtr skipMask = 1 << iota 2312 skipLen 2313 skipCap 2314 ) 2315 2316 // assign does left = right. 2317 // Right has already been evaluated to ssa, left has not. 2318 // If deref is true, then we do left = *right instead (and right has already been nil-checked). 2319 // If deref is true and right == nil, just do left = 0. 2320 // skip indicates assignments (at the top level) that can be avoided. 2321 func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) { 2322 if left.Op == ONAME && isblank(left) { 2323 return 2324 } 2325 t := left.Type 2326 dowidth(t) 2327 if s.canSSA(left) { 2328 if deref { 2329 s.Fatalf("can SSA LHS %v but not RHS %s", left, right) 2330 } 2331 if left.Op == ODOT { 2332 // We're assigning to a field of an ssa-able value. 2333 // We need to build a new structure with the new value for the 2334 // field we're assigning and the old values for the other fields. 2335 // For instance: 2336 // type T struct {a, b, c int} 2337 // var T x 2338 // x.b = 5 2339 // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c} 2340 2341 // Grab information about the structure type. 2342 t := left.Left.Type 2343 nf := t.NumFields() 2344 idx := fieldIdx(left) 2345 2346 // Grab old value of structure. 2347 old := s.expr(left.Left) 2348 2349 // Make new structure. 2350 new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t) 2351 2352 // Add fields as args. 2353 for i := 0; i < nf; i++ { 2354 if i == idx { 2355 new.AddArg(right) 2356 } else { 2357 new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old)) 2358 } 2359 } 2360 2361 // Recursively assign the new value we've made to the base of the dot op. 2362 s.assign(left.Left, new, false, 0) 2363 // TODO: do we need to update named values here? 2364 return 2365 } 2366 if left.Op == OINDEX && left.Left.Type.IsArray() { 2367 // We're assigning to an element of an ssa-able array. 2368 // a[i] = v 2369 t := left.Left.Type 2370 n := t.NumElem() 2371 2372 i := s.expr(left.Right) // index 2373 if n == 0 { 2374 // The bounds check must fail. Might as well 2375 // ignore the actual index and just use zeros. 2376 z := s.constInt(types.Types[TINT], 0) 2377 s.boundsCheck(z, z) 2378 return 2379 } 2380 if n != 1 { 2381 s.Fatalf("assigning to non-1-length array") 2382 } 2383 // Rewrite to a = [1]{v} 2384 i = s.extendIndex(i, panicindex) 2385 s.boundsCheck(i, s.constInt(types.Types[TINT], 1)) 2386 v := s.newValue1(ssa.OpArrayMake1, t, right) 2387 s.assign(left.Left, v, false, 0) 2388 return 2389 } 2390 // Update variable assignment. 2391 s.vars[left] = right 2392 s.addNamedValue(left, right) 2393 return 2394 } 2395 // Left is not ssa-able. Compute its address. 2396 addr := s.addr(left, false) 2397 if left.Op == ONAME && skip == 0 { 2398 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, left, s.mem()) 2399 } 2400 if isReflectHeaderDataField(left) { 2401 // Package unsafe's documentation says storing pointers into 2402 // reflect.SliceHeader and reflect.StringHeader's Data fields 2403 // is valid, even though they have type uintptr (#19168). 2404 // Mark it pointer type to signal the writebarrier pass to 2405 // insert a write barrier. 2406 t = types.Types[TUNSAFEPTR] 2407 } 2408 if deref { 2409 // Treat as a mem->mem move. 2410 var store *ssa.Value 2411 if right == nil { 2412 store = s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), addr, s.mem()) 2413 } else { 2414 store = s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), addr, right, s.mem()) 2415 } 2416 store.Aux = t 2417 s.vars[&memVar] = store 2418 return 2419 } 2420 // Treat as a store. 2421 s.storeType(t, addr, right, skip) 2422 } 2423 2424 // zeroVal returns the zero value for type t. 2425 func (s *state) zeroVal(t *types.Type) *ssa.Value { 2426 switch { 2427 case t.IsInteger(): 2428 switch t.Size() { 2429 case 1: 2430 return s.constInt8(t, 0) 2431 case 2: 2432 return s.constInt16(t, 0) 2433 case 4: 2434 return s.constInt32(t, 0) 2435 case 8: 2436 return s.constInt64(t, 0) 2437 default: 2438 s.Fatalf("bad sized integer type %v", t) 2439 } 2440 case t.IsFloat(): 2441 switch t.Size() { 2442 case 4: 2443 return s.constFloat32(t, 0) 2444 case 8: 2445 return s.constFloat64(t, 0) 2446 default: 2447 s.Fatalf("bad sized float type %v", t) 2448 } 2449 case t.IsComplex(): 2450 switch t.Size() { 2451 case 8: 2452 z := s.constFloat32(types.Types[TFLOAT32], 0) 2453 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2454 case 16: 2455 z := s.constFloat64(types.Types[TFLOAT64], 0) 2456 return s.entryNewValue2(ssa.OpComplexMake, t, z, z) 2457 default: 2458 s.Fatalf("bad sized complex type %v", t) 2459 } 2460 2461 case t.IsString(): 2462 return s.constEmptyString(t) 2463 case t.IsPtrShaped(): 2464 return s.constNil(t) 2465 case t.IsBoolean(): 2466 return s.constBool(false) 2467 case t.IsInterface(): 2468 return s.constInterface(t) 2469 case t.IsSlice(): 2470 return s.constSlice(t) 2471 case t.IsStruct(): 2472 n := t.NumFields() 2473 v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t) 2474 for i := 0; i < n; i++ { 2475 v.AddArg(s.zeroVal(t.FieldType(i))) 2476 } 2477 return v 2478 case t.IsArray(): 2479 switch t.NumElem() { 2480 case 0: 2481 return s.entryNewValue0(ssa.OpArrayMake0, t) 2482 case 1: 2483 return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem())) 2484 } 2485 } 2486 s.Fatalf("zero for type %v not implemented", t) 2487 return nil 2488 } 2489 2490 type callKind int8 2491 2492 const ( 2493 callNormal callKind = iota 2494 callDefer 2495 callGo 2496 ) 2497 2498 var intrinsics map[intrinsicKey]intrinsicBuilder 2499 2500 // An intrinsicBuilder converts a call node n into an ssa value that 2501 // implements that call as an intrinsic. args is a list of arguments to the func. 2502 type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value 2503 2504 type intrinsicKey struct { 2505 arch *sys.Arch 2506 pkg string 2507 fn string 2508 } 2509 2510 func init() { 2511 intrinsics = map[intrinsicKey]intrinsicBuilder{} 2512 2513 var all []*sys.Arch 2514 var p4 []*sys.Arch 2515 var p8 []*sys.Arch 2516 for _, a := range sys.Archs { 2517 all = append(all, a) 2518 if a.PtrSize == 4 { 2519 p4 = append(p4, a) 2520 } else { 2521 p8 = append(p8, a) 2522 } 2523 } 2524 2525 // add adds the intrinsic b for pkg.fn for the given list of architectures. 2526 add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) { 2527 for _, a := range archs { 2528 intrinsics[intrinsicKey{a, pkg, fn}] = b 2529 } 2530 } 2531 // addF does the same as add but operates on architecture families. 2532 addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) { 2533 m := 0 2534 for _, f := range archFamilies { 2535 if f >= 32 { 2536 panic("too many architecture families") 2537 } 2538 m |= 1 << uint(f) 2539 } 2540 for _, a := range all { 2541 if m>>uint(a.Family)&1 != 0 { 2542 intrinsics[intrinsicKey{a, pkg, fn}] = b 2543 } 2544 } 2545 } 2546 // alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists. 2547 alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) { 2548 for _, a := range archs { 2549 if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok { 2550 intrinsics[intrinsicKey{a, pkg, fn}] = b 2551 } 2552 } 2553 } 2554 2555 /******** runtime ********/ 2556 if !instrumenting { 2557 add("runtime", "slicebytetostringtmp", 2558 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2559 // Compiler frontend optimizations emit OARRAYBYTESTRTMP nodes 2560 // for the backend instead of slicebytetostringtmp calls 2561 // when not instrumenting. 2562 slice := args[0] 2563 ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice) 2564 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice) 2565 return s.newValue2(ssa.OpStringMake, n.Type, ptr, len) 2566 }, 2567 all...) 2568 } 2569 add("runtime", "KeepAlive", 2570 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2571 data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0]) 2572 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem()) 2573 return nil 2574 }, 2575 all...) 2576 2577 /******** runtime/internal/sys ********/ 2578 addF("runtime/internal/sys", "Ctz32", 2579 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2580 return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0]) 2581 }, 2582 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) 2583 addF("runtime/internal/sys", "Ctz64", 2584 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2585 return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0]) 2586 }, 2587 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) 2588 addF("runtime/internal/sys", "Bswap32", 2589 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2590 return s.newValue1(ssa.OpBswap32, types.Types[TUINT32], args[0]) 2591 }, 2592 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X) 2593 addF("runtime/internal/sys", "Bswap64", 2594 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2595 return s.newValue1(ssa.OpBswap64, types.Types[TUINT64], args[0]) 2596 }, 2597 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X) 2598 2599 /******** runtime/internal/atomic ********/ 2600 addF("runtime/internal/atomic", "Load", 2601 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2602 v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem()) 2603 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2604 return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) 2605 }, 2606 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) 2607 2608 addF("runtime/internal/atomic", "Load64", 2609 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2610 v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem()) 2611 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2612 return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) 2613 }, 2614 sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64) 2615 addF("runtime/internal/atomic", "Loadp", 2616 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2617 v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem()) 2618 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2619 return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v) 2620 }, 2621 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) 2622 2623 addF("runtime/internal/atomic", "Store", 2624 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2625 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem()) 2626 return nil 2627 }, 2628 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) 2629 addF("runtime/internal/atomic", "Store64", 2630 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2631 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem()) 2632 return nil 2633 }, 2634 sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64) 2635 addF("runtime/internal/atomic", "StorepNoWB", 2636 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2637 s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem()) 2638 return nil 2639 }, 2640 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS) 2641 2642 addF("runtime/internal/atomic", "Xchg", 2643 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2644 v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem()) 2645 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2646 return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) 2647 }, 2648 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) 2649 addF("runtime/internal/atomic", "Xchg64", 2650 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2651 v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem()) 2652 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2653 return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) 2654 }, 2655 sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64) 2656 2657 addF("runtime/internal/atomic", "Xadd", 2658 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2659 v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem()) 2660 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2661 return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) 2662 }, 2663 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) 2664 addF("runtime/internal/atomic", "Xadd64", 2665 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2666 v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem()) 2667 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2668 return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) 2669 }, 2670 sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64) 2671 2672 addF("runtime/internal/atomic", "Cas", 2673 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2674 v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) 2675 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2676 return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) 2677 }, 2678 sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) 2679 addF("runtime/internal/atomic", "Cas64", 2680 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2681 v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) 2682 s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) 2683 return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) 2684 }, 2685 sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64) 2686 2687 addF("runtime/internal/atomic", "And8", 2688 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2689 s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem()) 2690 return nil 2691 }, 2692 sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64) 2693 addF("runtime/internal/atomic", "Or8", 2694 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2695 s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem()) 2696 return nil 2697 }, 2698 sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64) 2699 2700 alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...) 2701 alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...) 2702 alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...) 2703 alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...) 2704 alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...) 2705 alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...) 2706 alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...) 2707 alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...) 2708 alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...) 2709 alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...) 2710 alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...) 2711 alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...) 2712 alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...) 2713 alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...) 2714 alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...) 2715 alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...) 2716 2717 /******** math ********/ 2718 addF("math", "Sqrt", 2719 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2720 return s.newValue1(ssa.OpSqrt, types.Types[TFLOAT64], args[0]) 2721 }, 2722 sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X) 2723 2724 /******** math/bits ********/ 2725 addF("math/bits", "TrailingZeros64", 2726 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2727 return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0]) 2728 }, 2729 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 2730 addF("math/bits", "TrailingZeros32", 2731 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2732 return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0]) 2733 }, 2734 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 2735 addF("math/bits", "TrailingZeros16", 2736 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2737 x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0]) 2738 c := s.constInt32(types.Types[TUINT32], 1<<16) 2739 y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c) 2740 return s.newValue1(ssa.OpCtz32, types.Types[TINT], y) 2741 }, 2742 sys.ARM, sys.MIPS) 2743 addF("math/bits", "TrailingZeros16", 2744 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2745 x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0]) 2746 c := s.constInt64(types.Types[TUINT64], 1<<16) 2747 y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c) 2748 return s.newValue1(ssa.OpCtz64, types.Types[TINT], y) 2749 }, 2750 sys.AMD64, sys.ARM64, sys.S390X) 2751 addF("math/bits", "TrailingZeros8", 2752 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2753 x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0]) 2754 c := s.constInt32(types.Types[TUINT32], 1<<8) 2755 y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c) 2756 return s.newValue1(ssa.OpCtz32, types.Types[TINT], y) 2757 }, 2758 sys.ARM, sys.MIPS) 2759 addF("math/bits", "TrailingZeros8", 2760 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2761 x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0]) 2762 c := s.constInt64(types.Types[TUINT64], 1<<8) 2763 y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c) 2764 return s.newValue1(ssa.OpCtz64, types.Types[TINT], y) 2765 }, 2766 sys.AMD64, sys.ARM64, sys.S390X) 2767 alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...) 2768 alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...) 2769 // ReverseBytes inlines correctly, no need to intrinsify it. 2770 // ReverseBytes16 lowers to a rotate, no need for anything special here. 2771 addF("math/bits", "Len64", 2772 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2773 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0]) 2774 }, 2775 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 2776 addF("math/bits", "Len32", 2777 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2778 if s.config.PtrSize == 4 { 2779 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0]) 2780 } 2781 x := s.newValue1(ssa.OpZeroExt32to64, types.Types[TUINT64], args[0]) 2782 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) 2783 }, 2784 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 2785 addF("math/bits", "Len16", 2786 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2787 if s.config.PtrSize == 4 { 2788 x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0]) 2789 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x) 2790 } 2791 x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0]) 2792 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) 2793 }, 2794 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 2795 // Note: disabled on AMD64 because the Go code is faster! 2796 addF("math/bits", "Len8", 2797 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2798 if s.config.PtrSize == 4 { 2799 x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0]) 2800 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x) 2801 } 2802 x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0]) 2803 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x) 2804 }, 2805 sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 2806 2807 addF("math/bits", "Len", 2808 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2809 if s.config.PtrSize == 4 { 2810 return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0]) 2811 } 2812 return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0]) 2813 }, 2814 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) 2815 // LeadingZeros is handled because it trivially calls Len. 2816 addF("math/bits", "Reverse64", 2817 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2818 return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0]) 2819 }, 2820 sys.ARM64) 2821 addF("math/bits", "Reverse32", 2822 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2823 return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0]) 2824 }, 2825 sys.ARM64) 2826 addF("math/bits", "Reverse16", 2827 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2828 return s.newValue1(ssa.OpBitRev16, types.Types[TINT], args[0]) 2829 }, 2830 sys.ARM64) 2831 addF("math/bits", "Reverse8", 2832 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2833 return s.newValue1(ssa.OpBitRev8, types.Types[TINT], args[0]) 2834 }, 2835 sys.ARM64) 2836 addF("math/bits", "Reverse", 2837 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2838 if s.config.PtrSize == 4 { 2839 return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0]) 2840 } 2841 return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0]) 2842 }, 2843 sys.ARM64) 2844 makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2845 return func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2846 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: syslook("support_popcnt").Sym.Linksym()}) 2847 addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), aux, s.sb) 2848 v := s.newValue2(ssa.OpLoad, types.Types[TBOOL], addr, s.mem()) 2849 b := s.endBlock() 2850 b.Kind = ssa.BlockIf 2851 b.SetControl(v) 2852 bTrue := s.f.NewBlock(ssa.BlockPlain) 2853 bFalse := s.f.NewBlock(ssa.BlockPlain) 2854 bEnd := s.f.NewBlock(ssa.BlockPlain) 2855 b.AddEdgeTo(bTrue) 2856 b.AddEdgeTo(bFalse) 2857 b.Likely = ssa.BranchLikely // most machines have popcnt nowadays 2858 2859 // We have the intrinsic - use it directly. 2860 s.startBlock(bTrue) 2861 op := op64 2862 if s.config.PtrSize == 4 { 2863 op = op32 2864 } 2865 s.vars[n] = s.newValue1(op, types.Types[TINT], args[0]) 2866 s.endBlock().AddEdgeTo(bEnd) 2867 2868 // Call the pure Go version. 2869 s.startBlock(bFalse) 2870 a := s.call(n, callNormal) 2871 s.vars[n] = s.newValue2(ssa.OpLoad, types.Types[TINT], a, s.mem()) 2872 s.endBlock().AddEdgeTo(bEnd) 2873 2874 // Merge results. 2875 s.startBlock(bEnd) 2876 return s.variable(n, types.Types[TINT]) 2877 } 2878 } 2879 addF("math/bits", "OnesCount64", 2880 makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64), 2881 sys.AMD64) 2882 addF("math/bits", "OnesCount64", 2883 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2884 return s.newValue1(ssa.OpPopCount64, types.Types[TINT], args[0]) 2885 }, 2886 sys.PPC64) 2887 addF("math/bits", "OnesCount32", 2888 makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32), 2889 sys.AMD64) 2890 addF("math/bits", "OnesCount32", 2891 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2892 return s.newValue1(ssa.OpPopCount32, types.Types[TINT], args[0]) 2893 }, 2894 sys.PPC64) 2895 addF("math/bits", "OnesCount16", 2896 makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16), 2897 sys.AMD64) 2898 // Note: no OnesCount8, the Go implementation is faster - just a table load. 2899 addF("math/bits", "OnesCount", 2900 makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32), 2901 sys.AMD64) 2902 2903 /******** sync/atomic ********/ 2904 2905 // Note: these are disabled by flag_race in findIntrinsic below. 2906 alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...) 2907 alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...) 2908 alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...) 2909 alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...) 2910 alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...) 2911 alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...) 2912 alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...) 2913 2914 alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...) 2915 alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...) 2916 // Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap. 2917 alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...) 2918 alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...) 2919 alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...) 2920 alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...) 2921 2922 alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...) 2923 alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...) 2924 alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...) 2925 alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...) 2926 alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...) 2927 alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...) 2928 2929 alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...) 2930 alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...) 2931 alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...) 2932 alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...) 2933 alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...) 2934 alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...) 2935 2936 alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...) 2937 alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...) 2938 alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...) 2939 alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...) 2940 alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...) 2941 alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...) 2942 2943 /******** math/big ********/ 2944 add("math/big", "mulWW", 2945 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2946 return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1]) 2947 }, 2948 sys.ArchAMD64) 2949 add("math/big", "divWW", 2950 func(s *state, n *Node, args []*ssa.Value) *ssa.Value { 2951 return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2]) 2952 }, 2953 sys.ArchAMD64) 2954 } 2955 2956 // findIntrinsic returns a function which builds the SSA equivalent of the 2957 // function identified by the symbol sym. If sym is not an intrinsic call, returns nil. 2958 func findIntrinsic(sym *types.Sym) intrinsicBuilder { 2959 if ssa.IntrinsicsDisable { 2960 return nil 2961 } 2962 if sym == nil || sym.Pkg == nil { 2963 return nil 2964 } 2965 pkg := sym.Pkg.Path 2966 if sym.Pkg == localpkg { 2967 pkg = myimportpath 2968 } 2969 if flag_race && pkg == "sync/atomic" { 2970 // The race detector needs to be able to intercept these calls. 2971 // We can't intrinsify them. 2972 return nil 2973 } 2974 fn := sym.Name 2975 return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}] 2976 } 2977 2978 func isIntrinsicCall(n *Node) bool { 2979 if n == nil || n.Left == nil { 2980 return false 2981 } 2982 return findIntrinsic(n.Left.Sym) != nil 2983 } 2984 2985 // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation. 2986 func (s *state) intrinsicCall(n *Node) *ssa.Value { 2987 v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n)) 2988 if ssa.IntrinsicsDebug > 0 { 2989 x := v 2990 if x == nil { 2991 x = s.mem() 2992 } 2993 if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 { 2994 x = x.Args[0] 2995 } 2996 Warnl(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString()) 2997 } 2998 return v 2999 } 3000 3001 type callArg struct { 3002 offset int64 3003 v *ssa.Value 3004 } 3005 type byOffset []callArg 3006 3007 func (x byOffset) Len() int { return len(x) } 3008 func (x byOffset) Swap(i, j int) { x[i], x[j] = x[j], x[i] } 3009 func (x byOffset) Less(i, j int) bool { 3010 return x[i].offset < x[j].offset 3011 } 3012 3013 // intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them. 3014 func (s *state) intrinsicArgs(n *Node) []*ssa.Value { 3015 // This code is complicated because of how walk transforms calls. For a call node, 3016 // each entry in n.List is either an assignment to OINDREGSP which actually 3017 // stores an arg, or an assignment to a temporary which computes an arg 3018 // which is later assigned. 3019 // The args can also be out of order. 3020 // TODO: when walk goes away someday, this code can go away also. 3021 var args []callArg 3022 temps := map[*Node]*ssa.Value{} 3023 for _, a := range n.List.Slice() { 3024 if a.Op != OAS { 3025 s.Fatalf("non-assignment as a function argument %s", opnames[a.Op]) 3026 } 3027 l, r := a.Left, a.Right 3028 switch l.Op { 3029 case ONAME: 3030 // Evaluate and store to "temporary". 3031 // Walk ensures these temporaries are dead outside of n. 3032 temps[l] = s.expr(r) 3033 case OINDREGSP: 3034 // Store a value to an argument slot. 3035 var v *ssa.Value 3036 if x, ok := temps[r]; ok { 3037 // This is a previously computed temporary. 3038 v = x 3039 } else { 3040 // This is an explicit value; evaluate it. 3041 v = s.expr(r) 3042 } 3043 args = append(args, callArg{l.Xoffset, v}) 3044 default: 3045 s.Fatalf("function argument assignment target not allowed: %s", opnames[l.Op]) 3046 } 3047 } 3048 sort.Sort(byOffset(args)) 3049 res := make([]*ssa.Value, len(args)) 3050 for i, a := range args { 3051 res[i] = a.v 3052 } 3053 return res 3054 } 3055 3056 // Calls the function n using the specified call type. 3057 // Returns the address of the return value (or nil if none). 3058 func (s *state) call(n *Node, k callKind) *ssa.Value { 3059 var sym *types.Sym // target symbol (if static) 3060 var closure *ssa.Value // ptr to closure to run (if dynamic) 3061 var codeptr *ssa.Value // ptr to target code (if dynamic) 3062 var rcvr *ssa.Value // receiver to set 3063 fn := n.Left 3064 switch n.Op { 3065 case OCALLFUNC: 3066 if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC { 3067 sym = fn.Sym 3068 break 3069 } 3070 closure = s.expr(fn) 3071 case OCALLMETH: 3072 if fn.Op != ODOTMETH { 3073 Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) 3074 } 3075 if k == callNormal { 3076 sym = fn.Sym 3077 break 3078 } 3079 // Make a name n2 for the function. 3080 // fn.Sym might be sync.(*Mutex).Unlock. 3081 // Make a PFUNC node out of that, then evaluate it. 3082 // We get back an SSA value representing &sync.(*Mutex).Unlock·f. 3083 // We can then pass that to defer or go. 3084 n2 := newnamel(fn.Pos, fn.Sym) 3085 n2.Name.Curfn = s.curfn 3086 n2.SetClass(PFUNC) 3087 n2.Pos = fn.Pos 3088 n2.Type = types.Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it. 3089 closure = s.expr(n2) 3090 // Note: receiver is already assigned in n.List, so we don't 3091 // want to set it here. 3092 case OCALLINTER: 3093 if fn.Op != ODOTINTER { 3094 Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op) 3095 } 3096 i := s.expr(fn.Left) 3097 itab := s.newValue1(ssa.OpITab, types.Types[TUINTPTR], i) 3098 if k != callNormal { 3099 s.nilCheck(itab) 3100 } 3101 itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab 3102 itab = s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab) 3103 if k == callNormal { 3104 codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], itab, s.mem()) 3105 } else { 3106 closure = itab 3107 } 3108 rcvr = s.newValue1(ssa.OpIData, types.Types[TUINTPTR], i) 3109 } 3110 dowidth(fn.Type) 3111 stksize := fn.Type.ArgWidth() // includes receiver 3112 3113 // Run all argument assignments. The arg slots have already 3114 // been offset by the appropriate amount (+2*widthptr for go/defer, 3115 // +widthptr for interface calls). 3116 // For OCALLMETH, the receiver is set in these statements. 3117 s.stmtList(n.List) 3118 3119 // Set receiver (for interface calls) 3120 if rcvr != nil { 3121 argStart := Ctxt.FixedFrameSize() 3122 if k != callNormal { 3123 argStart += int64(2 * Widthptr) 3124 } 3125 addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart) 3126 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], addr, rcvr, s.mem()) 3127 } 3128 3129 // Defer/go args 3130 if k != callNormal { 3131 // Write argsize and closure (args to Newproc/Deferproc). 3132 argStart := Ctxt.FixedFrameSize() 3133 argsize := s.constInt32(types.Types[TUINT32], int32(stksize)) 3134 addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart) 3135 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINT32], addr, argsize, s.mem()) 3136 addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr)) 3137 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], addr, closure, s.mem()) 3138 stksize += 2 * int64(Widthptr) 3139 } 3140 3141 // call target 3142 var call *ssa.Value 3143 switch { 3144 case k == callDefer: 3145 call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, Deferproc, s.mem()) 3146 case k == callGo: 3147 call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, Newproc, s.mem()) 3148 case closure != nil: 3149 codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], closure, s.mem()) 3150 call = s.newValue3(ssa.OpClosureCall, types.TypeMem, codeptr, closure, s.mem()) 3151 case codeptr != nil: 3152 call = s.newValue2(ssa.OpInterCall, types.TypeMem, codeptr, s.mem()) 3153 case sym != nil: 3154 call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, sym.Linksym(), s.mem()) 3155 default: 3156 Fatalf("bad call type %v %v", n.Op, n) 3157 } 3158 call.AuxInt = stksize // Call operations carry the argsize of the callee along with them 3159 s.vars[&memVar] = call 3160 3161 // Finish block for defers 3162 if k == callDefer { 3163 b := s.endBlock() 3164 b.Kind = ssa.BlockDefer 3165 b.SetControl(call) 3166 bNext := s.f.NewBlock(ssa.BlockPlain) 3167 b.AddEdgeTo(bNext) 3168 // Add recover edge to exit code. 3169 r := s.f.NewBlock(ssa.BlockPlain) 3170 s.startBlock(r) 3171 s.exit() 3172 b.AddEdgeTo(r) 3173 b.Likely = ssa.BranchLikely 3174 s.startBlock(bNext) 3175 } 3176 3177 res := n.Left.Type.Results() 3178 if res.NumFields() == 0 || k != callNormal { 3179 // call has no return value. Continue with the next statement. 3180 return nil 3181 } 3182 fp := res.Field(0) 3183 return s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize()) 3184 } 3185 3186 // etypesign returns the signed-ness of e, for integer/pointer etypes. 3187 // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer. 3188 func etypesign(e types.EType) int8 { 3189 switch e { 3190 case TINT8, TINT16, TINT32, TINT64, TINT: 3191 return -1 3192 case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR: 3193 return +1 3194 } 3195 return 0 3196 } 3197 3198 // lookupSymbol is used to retrieve the symbol (Extern, Arg or Auto) used for a particular node. 3199 // This improves the effectiveness of cse by using the same Aux values for the 3200 // same symbols. 3201 func (s *state) lookupSymbol(n *Node, sym interface{}) interface{} { 3202 switch sym.(type) { 3203 default: 3204 s.Fatalf("sym %v is of unknown type %T", sym, sym) 3205 case *ssa.ExternSymbol, *ssa.ArgSymbol, *ssa.AutoSymbol: 3206 // these are the only valid types 3207 } 3208 3209 if lsym, ok := s.varsyms[n]; ok { 3210 return lsym 3211 } 3212 s.varsyms[n] = sym 3213 return sym 3214 } 3215 3216 // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result. 3217 // The value that the returned Value represents is guaranteed to be non-nil. 3218 // If bounded is true then this address does not require a nil check for its operand 3219 // even if that would otherwise be implied. 3220 func (s *state) addr(n *Node, bounded bool) *ssa.Value { 3221 t := types.NewPtr(n.Type) 3222 switch n.Op { 3223 case ONAME: 3224 switch n.Class() { 3225 case PEXTERN: 3226 // global variable 3227 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: n.Sym.Linksym()}) 3228 v := s.entryNewValue1A(ssa.OpAddr, t, aux, s.sb) 3229 // TODO: Make OpAddr use AuxInt as well as Aux. 3230 if n.Xoffset != 0 { 3231 v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v) 3232 } 3233 return v 3234 case PPARAM: 3235 // parameter slot 3236 v := s.decladdrs[n] 3237 if v != nil { 3238 return v 3239 } 3240 if n == nodfp { 3241 // Special arg that points to the frame pointer (Used by ORECOVER). 3242 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n}) 3243 return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp) 3244 } 3245 s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs) 3246 return nil 3247 case PAUTO: 3248 aux := s.lookupSymbol(n, &ssa.AutoSymbol{Node: n}) 3249 return s.newValue1A(ssa.OpAddr, t, aux, s.sp) 3250 case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early. 3251 // ensure that we reuse symbols for out parameters so 3252 // that cse works on their addresses 3253 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n}) 3254 return s.newValue1A(ssa.OpAddr, t, aux, s.sp) 3255 default: 3256 s.Fatalf("variable address class %v not implemented", classnames[n.Class()]) 3257 return nil 3258 } 3259 case OINDREGSP: 3260 // indirect off REGSP 3261 // used for storing/loading arguments/returns to/from callees 3262 return s.constOffPtrSP(t, n.Xoffset) 3263 case OINDEX: 3264 if n.Left.Type.IsSlice() { 3265 a := s.expr(n.Left) 3266 i := s.expr(n.Right) 3267 i = s.extendIndex(i, panicindex) 3268 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], a) 3269 if !n.Bounded() { 3270 s.boundsCheck(i, len) 3271 } 3272 p := s.newValue1(ssa.OpSlicePtr, t, a) 3273 return s.newValue2(ssa.OpPtrIndex, t, p, i) 3274 } else { // array 3275 a := s.addr(n.Left, bounded) 3276 i := s.expr(n.Right) 3277 i = s.extendIndex(i, panicindex) 3278 len := s.constInt(types.Types[TINT], n.Left.Type.NumElem()) 3279 if !n.Bounded() { 3280 s.boundsCheck(i, len) 3281 } 3282 return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left.Type.Elem()), a, i) 3283 } 3284 case OIND: 3285 return s.exprPtr(n.Left, bounded, n.Pos) 3286 case ODOT: 3287 p := s.addr(n.Left, bounded) 3288 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p) 3289 case ODOTPTR: 3290 p := s.exprPtr(n.Left, bounded, n.Pos) 3291 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p) 3292 case OCLOSUREVAR: 3293 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, 3294 s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)) 3295 case OCONVNOP: 3296 addr := s.addr(n.Left, bounded) 3297 return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type 3298 case OCALLFUNC, OCALLINTER, OCALLMETH: 3299 return s.call(n, callNormal) 3300 case ODOTTYPE: 3301 v, _ := s.dottype(n, false) 3302 if v.Op != ssa.OpLoad { 3303 s.Fatalf("dottype of non-load") 3304 } 3305 if v.Args[1] != s.mem() { 3306 s.Fatalf("memory no longer live from dottype load") 3307 } 3308 return v.Args[0] 3309 default: 3310 s.Fatalf("unhandled addr %v", n.Op) 3311 return nil 3312 } 3313 } 3314 3315 // canSSA reports whether n is SSA-able. 3316 // n must be an ONAME (or an ODOT sequence with an ONAME base). 3317 func (s *state) canSSA(n *Node) bool { 3318 if Debug['N'] != 0 { 3319 return false 3320 } 3321 for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) { 3322 n = n.Left 3323 } 3324 if n.Op != ONAME { 3325 return false 3326 } 3327 if n.Addrtaken() { 3328 return false 3329 } 3330 if n.isParamHeapCopy() { 3331 return false 3332 } 3333 if n.Class() == PAUTOHEAP { 3334 Fatalf("canSSA of PAUTOHEAP %v", n) 3335 } 3336 switch n.Class() { 3337 case PEXTERN: 3338 return false 3339 case PPARAMOUT: 3340 if s.hasdefer { 3341 // TODO: handle this case? Named return values must be 3342 // in memory so that the deferred function can see them. 3343 // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false } 3344 // Or maybe not, see issue 18860. Even unnamed return values 3345 // must be written back so if a defer recovers, the caller can see them. 3346 return false 3347 } 3348 if s.cgoUnsafeArgs { 3349 // Cgo effectively takes the address of all result args, 3350 // but the compiler can't see that. 3351 return false 3352 } 3353 } 3354 if n.Class() == PPARAM && n.Sym != nil && n.Sym.Name == ".this" { 3355 // wrappers generated by genwrapper need to update 3356 // the .this pointer in place. 3357 // TODO: treat as a PPARMOUT? 3358 return false 3359 } 3360 return canSSAType(n.Type) 3361 // TODO: try to make more variables SSAable? 3362 } 3363 3364 // canSSA reports whether variables of type t are SSA-able. 3365 func canSSAType(t *types.Type) bool { 3366 dowidth(t) 3367 if t.Width > int64(4*Widthptr) { 3368 // 4*Widthptr is an arbitrary constant. We want it 3369 // to be at least 3*Widthptr so slices can be registerized. 3370 // Too big and we'll introduce too much register pressure. 3371 return false 3372 } 3373 switch t.Etype { 3374 case TARRAY: 3375 // We can't do larger arrays because dynamic indexing is 3376 // not supported on SSA variables. 3377 // TODO: allow if all indexes are constant. 3378 if t.NumElem() <= 1 { 3379 return canSSAType(t.Elem()) 3380 } 3381 return false 3382 case TSTRUCT: 3383 if t.NumFields() > ssa.MaxStruct { 3384 return false 3385 } 3386 for _, t1 := range t.Fields().Slice() { 3387 if !canSSAType(t1.Type) { 3388 return false 3389 } 3390 } 3391 return true 3392 default: 3393 return true 3394 } 3395 } 3396 3397 // exprPtr evaluates n to a pointer and nil-checks it. 3398 func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value { 3399 p := s.expr(n) 3400 if bounded || n.NonNil() { 3401 if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 { 3402 s.f.Warnl(lineno, "removed nil check") 3403 } 3404 return p 3405 } 3406 s.nilCheck(p) 3407 return p 3408 } 3409 3410 // nilCheck generates nil pointer checking code. 3411 // Used only for automatically inserted nil checks, 3412 // not for user code like 'x != nil'. 3413 func (s *state) nilCheck(ptr *ssa.Value) { 3414 if disable_checknil != 0 || s.curfn.Func.NilCheckDisabled() { 3415 return 3416 } 3417 s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem()) 3418 } 3419 3420 // boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not. 3421 // Starts a new block on return. 3422 // idx is already converted to full int width. 3423 func (s *state) boundsCheck(idx, len *ssa.Value) { 3424 if Debug['B'] != 0 { 3425 return 3426 } 3427 3428 // bounds check 3429 cmp := s.newValue2(ssa.OpIsInBounds, types.Types[TBOOL], idx, len) 3430 s.check(cmp, panicindex) 3431 } 3432 3433 // sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not. 3434 // Starts a new block on return. 3435 // idx and len are already converted to full int width. 3436 func (s *state) sliceBoundsCheck(idx, len *ssa.Value) { 3437 if Debug['B'] != 0 { 3438 return 3439 } 3440 3441 // bounds check 3442 cmp := s.newValue2(ssa.OpIsSliceInBounds, types.Types[TBOOL], idx, len) 3443 s.check(cmp, panicslice) 3444 } 3445 3446 // If cmp (a bool) is false, panic using the given function. 3447 func (s *state) check(cmp *ssa.Value, fn *obj.LSym) { 3448 b := s.endBlock() 3449 b.Kind = ssa.BlockIf 3450 b.SetControl(cmp) 3451 b.Likely = ssa.BranchLikely 3452 bNext := s.f.NewBlock(ssa.BlockPlain) 3453 line := s.peekPos() 3454 pos := Ctxt.PosTable.Pos(line) 3455 fl := funcLine{f: fn, file: pos.Filename(), line: pos.Line()} 3456 bPanic := s.panics[fl] 3457 if bPanic == nil { 3458 bPanic = s.f.NewBlock(ssa.BlockPlain) 3459 s.panics[fl] = bPanic 3460 s.startBlock(bPanic) 3461 // The panic call takes/returns memory to ensure that the right 3462 // memory state is observed if the panic happens. 3463 s.rtcall(fn, false, nil) 3464 } 3465 b.AddEdgeTo(bNext) 3466 b.AddEdgeTo(bPanic) 3467 s.startBlock(bNext) 3468 } 3469 3470 func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value { 3471 needcheck := true 3472 switch b.Op { 3473 case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64: 3474 if b.AuxInt != 0 { 3475 needcheck = false 3476 } 3477 } 3478 if needcheck { 3479 // do a size-appropriate check for zero 3480 cmp := s.newValue2(s.ssaOp(ONE, n.Type), types.Types[TBOOL], b, s.zeroVal(n.Type)) 3481 s.check(cmp, panicdivide) 3482 } 3483 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) 3484 } 3485 3486 // rtcall issues a call to the given runtime function fn with the listed args. 3487 // Returns a slice of results of the given result types. 3488 // The call is added to the end of the current block. 3489 // If returns is false, the block is marked as an exit block. 3490 func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value { 3491 // Write args to the stack 3492 off := Ctxt.FixedFrameSize() 3493 for _, arg := range args { 3494 t := arg.Type 3495 off = Rnd(off, t.Alignment()) 3496 ptr := s.constOffPtrSP(t.PtrTo(), off) 3497 size := t.Size() 3498 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, ptr, arg, s.mem()) 3499 off += size 3500 } 3501 off = Rnd(off, int64(Widthreg)) 3502 3503 // Issue call 3504 call := s.newValue1A(ssa.OpStaticCall, types.TypeMem, fn, s.mem()) 3505 s.vars[&memVar] = call 3506 3507 if !returns { 3508 // Finish block 3509 b := s.endBlock() 3510 b.Kind = ssa.BlockExit 3511 b.SetControl(call) 3512 call.AuxInt = off - Ctxt.FixedFrameSize() 3513 if len(results) > 0 { 3514 Fatalf("panic call can't have results") 3515 } 3516 return nil 3517 } 3518 3519 // Load results 3520 res := make([]*ssa.Value, len(results)) 3521 for i, t := range results { 3522 off = Rnd(off, t.Alignment()) 3523 ptr := s.constOffPtrSP(types.NewPtr(t), off) 3524 res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem()) 3525 off += t.Size() 3526 } 3527 off = Rnd(off, int64(Widthptr)) 3528 3529 // Remember how much callee stack space we needed. 3530 call.AuxInt = off 3531 3532 return res 3533 } 3534 3535 // do *left = right for type t. 3536 func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask) { 3537 if skip == 0 && (!types.Haspointers(t) || ssa.IsStackAddr(left)) { 3538 // Known to not have write barrier. Store the whole type. 3539 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem()) 3540 return 3541 } 3542 3543 // store scalar fields first, so write barrier stores for 3544 // pointer fields can be grouped together, and scalar values 3545 // don't need to be live across the write barrier call. 3546 // TODO: if the writebarrier pass knows how to reorder stores, 3547 // we can do a single store here as long as skip==0. 3548 s.storeTypeScalars(t, left, right, skip) 3549 if skip&skipPtr == 0 && types.Haspointers(t) { 3550 s.storeTypePtrs(t, left, right) 3551 } 3552 } 3553 3554 // do *left = right for all scalar (non-pointer) parts of t. 3555 func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) { 3556 switch { 3557 case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex(): 3558 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem()) 3559 case t.IsPtrShaped(): 3560 // no scalar fields. 3561 case t.IsString(): 3562 if skip&skipLen != 0 { 3563 return 3564 } 3565 len := s.newValue1(ssa.OpStringLen, types.Types[TINT], right) 3566 lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left) 3567 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenAddr, len, s.mem()) 3568 case t.IsSlice(): 3569 if skip&skipLen == 0 { 3570 len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], right) 3571 lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left) 3572 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenAddr, len, s.mem()) 3573 } 3574 if skip&skipCap == 0 { 3575 cap := s.newValue1(ssa.OpSliceCap, types.Types[TINT], right) 3576 capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left) 3577 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], capAddr, cap, s.mem()) 3578 } 3579 case t.IsInterface(): 3580 // itab field doesn't need a write barrier (even though it is a pointer). 3581 itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right) 3582 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], left, itab, s.mem()) 3583 case t.IsStruct(): 3584 n := t.NumFields() 3585 for i := 0; i < n; i++ { 3586 ft := t.FieldType(i) 3587 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3588 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3589 s.storeTypeScalars(ft, addr, val, 0) 3590 } 3591 case t.IsArray() && t.NumElem() == 0: 3592 // nothing 3593 case t.IsArray() && t.NumElem() == 1: 3594 s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0) 3595 default: 3596 s.Fatalf("bad write barrier type %v", t) 3597 } 3598 } 3599 3600 // do *left = right for all pointer parts of t. 3601 func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) { 3602 switch { 3603 case t.IsPtrShaped(): 3604 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem()) 3605 case t.IsString(): 3606 ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right) 3607 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem()) 3608 case t.IsSlice(): 3609 ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, right) 3610 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem()) 3611 case t.IsInterface(): 3612 // itab field is treated as a scalar. 3613 idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right) 3614 idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left) 3615 s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, idataAddr, idata, s.mem()) 3616 case t.IsStruct(): 3617 n := t.NumFields() 3618 for i := 0; i < n; i++ { 3619 ft := t.FieldType(i) 3620 if !types.Haspointers(ft) { 3621 continue 3622 } 3623 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) 3624 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) 3625 s.storeTypePtrs(ft, addr, val) 3626 } 3627 case t.IsArray() && t.NumElem() == 0: 3628 // nothing 3629 case t.IsArray() && t.NumElem() == 1: 3630 s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right)) 3631 default: 3632 s.Fatalf("bad write barrier type %v", t) 3633 } 3634 } 3635 3636 // slice computes the slice v[i:j:k] and returns ptr, len, and cap of result. 3637 // i,j,k may be nil, in which case they are set to their default value. 3638 // t is a slice, ptr to array, or string type. 3639 func (s *state) slice(t *types.Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) { 3640 var elemtype *types.Type 3641 var ptrtype *types.Type 3642 var ptr *ssa.Value 3643 var len *ssa.Value 3644 var cap *ssa.Value 3645 zero := s.constInt(types.Types[TINT], 0) 3646 switch { 3647 case t.IsSlice(): 3648 elemtype = t.Elem() 3649 ptrtype = types.NewPtr(elemtype) 3650 ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v) 3651 len = s.newValue1(ssa.OpSliceLen, types.Types[TINT], v) 3652 cap = s.newValue1(ssa.OpSliceCap, types.Types[TINT], v) 3653 case t.IsString(): 3654 elemtype = types.Types[TUINT8] 3655 ptrtype = types.NewPtr(elemtype) 3656 ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v) 3657 len = s.newValue1(ssa.OpStringLen, types.Types[TINT], v) 3658 cap = len 3659 case t.IsPtr(): 3660 if !t.Elem().IsArray() { 3661 s.Fatalf("bad ptr to array in slice %v\n", t) 3662 } 3663 elemtype = t.Elem().Elem() 3664 ptrtype = types.NewPtr(elemtype) 3665 s.nilCheck(v) 3666 ptr = v 3667 len = s.constInt(types.Types[TINT], t.Elem().NumElem()) 3668 cap = len 3669 default: 3670 s.Fatalf("bad type in slice %v\n", t) 3671 } 3672 3673 // Set default values 3674 if i == nil { 3675 i = zero 3676 } 3677 if j == nil { 3678 j = len 3679 } 3680 if k == nil { 3681 k = cap 3682 } 3683 3684 // Panic if slice indices are not in bounds. 3685 s.sliceBoundsCheck(i, j) 3686 if j != k { 3687 s.sliceBoundsCheck(j, k) 3688 } 3689 if k != cap { 3690 s.sliceBoundsCheck(k, cap) 3691 } 3692 3693 // Generate the following code assuming that indexes are in bounds. 3694 // The masking is to make sure that we don't generate a slice 3695 // that points to the next object in memory. 3696 // rlen = j - i 3697 // rcap = k - i 3698 // delta = i * elemsize 3699 // rptr = p + delta&mask(rcap) 3700 // result = (SliceMake rptr rlen rcap) 3701 // where mask(x) is 0 if x==0 and -1 if x>0. 3702 subOp := s.ssaOp(OSUB, types.Types[TINT]) 3703 mulOp := s.ssaOp(OMUL, types.Types[TINT]) 3704 andOp := s.ssaOp(OAND, types.Types[TINT]) 3705 rlen := s.newValue2(subOp, types.Types[TINT], j, i) 3706 var rcap *ssa.Value 3707 switch { 3708 case t.IsString(): 3709 // Capacity of the result is unimportant. However, we use 3710 // rcap to test if we've generated a zero-length slice. 3711 // Use length of strings for that. 3712 rcap = rlen 3713 case j == k: 3714 rcap = rlen 3715 default: 3716 rcap = s.newValue2(subOp, types.Types[TINT], k, i) 3717 } 3718 3719 var rptr *ssa.Value 3720 if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 { 3721 // No pointer arithmetic necessary. 3722 rptr = ptr 3723 } else { 3724 // delta = # of bytes to offset pointer by. 3725 delta := s.newValue2(mulOp, types.Types[TINT], i, s.constInt(types.Types[TINT], elemtype.Width)) 3726 // If we're slicing to the point where the capacity is zero, 3727 // zero out the delta. 3728 mask := s.newValue1(ssa.OpSlicemask, types.Types[TINT], rcap) 3729 delta = s.newValue2(andOp, types.Types[TINT], delta, mask) 3730 // Compute rptr = ptr + delta 3731 rptr = s.newValue2(ssa.OpAddPtr, ptrtype, ptr, delta) 3732 } 3733 3734 return rptr, rlen, rcap 3735 } 3736 3737 type u642fcvtTab struct { 3738 geq, cvt2F, and, rsh, or, add ssa.Op 3739 one func(*state, *types.Type, int64) *ssa.Value 3740 } 3741 3742 var u64_f64 u642fcvtTab = u642fcvtTab{ 3743 geq: ssa.OpGeq64, 3744 cvt2F: ssa.OpCvt64to64F, 3745 and: ssa.OpAnd64, 3746 rsh: ssa.OpRsh64Ux64, 3747 or: ssa.OpOr64, 3748 add: ssa.OpAdd64F, 3749 one: (*state).constInt64, 3750 } 3751 3752 var u64_f32 u642fcvtTab = u642fcvtTab{ 3753 geq: ssa.OpGeq64, 3754 cvt2F: ssa.OpCvt64to32F, 3755 and: ssa.OpAnd64, 3756 rsh: ssa.OpRsh64Ux64, 3757 or: ssa.OpOr64, 3758 add: ssa.OpAdd32F, 3759 one: (*state).constInt64, 3760 } 3761 3762 func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3763 return s.uint64Tofloat(&u64_f64, n, x, ft, tt) 3764 } 3765 3766 func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3767 return s.uint64Tofloat(&u64_f32, n, x, ft, tt) 3768 } 3769 3770 func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3771 // if x >= 0 { 3772 // result = (floatY) x 3773 // } else { 3774 // y = uintX(x) ; y = x & 1 3775 // z = uintX(x) ; z = z >> 1 3776 // z = z >> 1 3777 // z = z | y 3778 // result = floatY(z) 3779 // result = result + result 3780 // } 3781 // 3782 // Code borrowed from old code generator. 3783 // What's going on: large 64-bit "unsigned" looks like 3784 // negative number to hardware's integer-to-float 3785 // conversion. However, because the mantissa is only 3786 // 63 bits, we don't need the LSB, so instead we do an 3787 // unsigned right shift (divide by two), convert, and 3788 // double. However, before we do that, we need to be 3789 // sure that we do not lose a "1" if that made the 3790 // difference in the resulting rounding. Therefore, we 3791 // preserve it, and OR (not ADD) it back in. The case 3792 // that matters is when the eleven discarded bits are 3793 // equal to 10000000001; that rounds up, and the 1 cannot 3794 // be lost else it would round down if the LSB of the 3795 // candidate mantissa is 0. 3796 cmp := s.newValue2(cvttab.geq, types.Types[TBOOL], x, s.zeroVal(ft)) 3797 b := s.endBlock() 3798 b.Kind = ssa.BlockIf 3799 b.SetControl(cmp) 3800 b.Likely = ssa.BranchLikely 3801 3802 bThen := s.f.NewBlock(ssa.BlockPlain) 3803 bElse := s.f.NewBlock(ssa.BlockPlain) 3804 bAfter := s.f.NewBlock(ssa.BlockPlain) 3805 3806 b.AddEdgeTo(bThen) 3807 s.startBlock(bThen) 3808 a0 := s.newValue1(cvttab.cvt2F, tt, x) 3809 s.vars[n] = a0 3810 s.endBlock() 3811 bThen.AddEdgeTo(bAfter) 3812 3813 b.AddEdgeTo(bElse) 3814 s.startBlock(bElse) 3815 one := cvttab.one(s, ft, 1) 3816 y := s.newValue2(cvttab.and, ft, x, one) 3817 z := s.newValue2(cvttab.rsh, ft, x, one) 3818 z = s.newValue2(cvttab.or, ft, z, y) 3819 a := s.newValue1(cvttab.cvt2F, tt, z) 3820 a1 := s.newValue2(cvttab.add, tt, a, a) 3821 s.vars[n] = a1 3822 s.endBlock() 3823 bElse.AddEdgeTo(bAfter) 3824 3825 s.startBlock(bAfter) 3826 return s.variable(n, n.Type) 3827 } 3828 3829 type u322fcvtTab struct { 3830 cvtI2F, cvtF2F ssa.Op 3831 } 3832 3833 var u32_f64 u322fcvtTab = u322fcvtTab{ 3834 cvtI2F: ssa.OpCvt32to64F, 3835 cvtF2F: ssa.OpCopy, 3836 } 3837 3838 var u32_f32 u322fcvtTab = u322fcvtTab{ 3839 cvtI2F: ssa.OpCvt32to32F, 3840 cvtF2F: ssa.OpCvt64Fto32F, 3841 } 3842 3843 func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3844 return s.uint32Tofloat(&u32_f64, n, x, ft, tt) 3845 } 3846 3847 func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3848 return s.uint32Tofloat(&u32_f32, n, x, ft, tt) 3849 } 3850 3851 func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3852 // if x >= 0 { 3853 // result = floatY(x) 3854 // } else { 3855 // result = floatY(float64(x) + (1<<32)) 3856 // } 3857 cmp := s.newValue2(ssa.OpGeq32, types.Types[TBOOL], x, s.zeroVal(ft)) 3858 b := s.endBlock() 3859 b.Kind = ssa.BlockIf 3860 b.SetControl(cmp) 3861 b.Likely = ssa.BranchLikely 3862 3863 bThen := s.f.NewBlock(ssa.BlockPlain) 3864 bElse := s.f.NewBlock(ssa.BlockPlain) 3865 bAfter := s.f.NewBlock(ssa.BlockPlain) 3866 3867 b.AddEdgeTo(bThen) 3868 s.startBlock(bThen) 3869 a0 := s.newValue1(cvttab.cvtI2F, tt, x) 3870 s.vars[n] = a0 3871 s.endBlock() 3872 bThen.AddEdgeTo(bAfter) 3873 3874 b.AddEdgeTo(bElse) 3875 s.startBlock(bElse) 3876 a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[TFLOAT64], x) 3877 twoToThe32 := s.constFloat64(types.Types[TFLOAT64], float64(1<<32)) 3878 a2 := s.newValue2(ssa.OpAdd64F, types.Types[TFLOAT64], a1, twoToThe32) 3879 a3 := s.newValue1(cvttab.cvtF2F, tt, a2) 3880 3881 s.vars[n] = a3 3882 s.endBlock() 3883 bElse.AddEdgeTo(bAfter) 3884 3885 s.startBlock(bAfter) 3886 return s.variable(n, n.Type) 3887 } 3888 3889 // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels. 3890 func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value { 3891 if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() { 3892 s.Fatalf("node must be a map or a channel") 3893 } 3894 // if n == nil { 3895 // return 0 3896 // } else { 3897 // // len 3898 // return *((*int)n) 3899 // // cap 3900 // return *(((*int)n)+1) 3901 // } 3902 lenType := n.Type 3903 nilValue := s.constNil(types.Types[TUINTPTR]) 3904 cmp := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], x, nilValue) 3905 b := s.endBlock() 3906 b.Kind = ssa.BlockIf 3907 b.SetControl(cmp) 3908 b.Likely = ssa.BranchUnlikely 3909 3910 bThen := s.f.NewBlock(ssa.BlockPlain) 3911 bElse := s.f.NewBlock(ssa.BlockPlain) 3912 bAfter := s.f.NewBlock(ssa.BlockPlain) 3913 3914 // length/capacity of a nil map/chan is zero 3915 b.AddEdgeTo(bThen) 3916 s.startBlock(bThen) 3917 s.vars[n] = s.zeroVal(lenType) 3918 s.endBlock() 3919 bThen.AddEdgeTo(bAfter) 3920 3921 b.AddEdgeTo(bElse) 3922 s.startBlock(bElse) 3923 if n.Op == OLEN { 3924 // length is stored in the first word for map/chan 3925 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem()) 3926 } else if n.Op == OCAP { 3927 // capacity is stored in the second word for chan 3928 sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x) 3929 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem()) 3930 } else { 3931 s.Fatalf("op must be OLEN or OCAP") 3932 } 3933 s.endBlock() 3934 bElse.AddEdgeTo(bAfter) 3935 3936 s.startBlock(bAfter) 3937 return s.variable(n, lenType) 3938 } 3939 3940 type f2uCvtTab struct { 3941 ltf, cvt2U, subf, or ssa.Op 3942 floatValue func(*state, *types.Type, float64) *ssa.Value 3943 intValue func(*state, *types.Type, int64) *ssa.Value 3944 cutoff uint64 3945 } 3946 3947 var f32_u64 f2uCvtTab = f2uCvtTab{ 3948 ltf: ssa.OpLess32F, 3949 cvt2U: ssa.OpCvt32Fto64, 3950 subf: ssa.OpSub32F, 3951 or: ssa.OpOr64, 3952 floatValue: (*state).constFloat32, 3953 intValue: (*state).constInt64, 3954 cutoff: 9223372036854775808, 3955 } 3956 3957 var f64_u64 f2uCvtTab = f2uCvtTab{ 3958 ltf: ssa.OpLess64F, 3959 cvt2U: ssa.OpCvt64Fto64, 3960 subf: ssa.OpSub64F, 3961 or: ssa.OpOr64, 3962 floatValue: (*state).constFloat64, 3963 intValue: (*state).constInt64, 3964 cutoff: 9223372036854775808, 3965 } 3966 3967 var f32_u32 f2uCvtTab = f2uCvtTab{ 3968 ltf: ssa.OpLess32F, 3969 cvt2U: ssa.OpCvt32Fto32, 3970 subf: ssa.OpSub32F, 3971 or: ssa.OpOr32, 3972 floatValue: (*state).constFloat32, 3973 intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) }, 3974 cutoff: 2147483648, 3975 } 3976 3977 var f64_u32 f2uCvtTab = f2uCvtTab{ 3978 ltf: ssa.OpLess64F, 3979 cvt2U: ssa.OpCvt64Fto32, 3980 subf: ssa.OpSub64F, 3981 or: ssa.OpOr32, 3982 floatValue: (*state).constFloat64, 3983 intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) }, 3984 cutoff: 2147483648, 3985 } 3986 3987 func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3988 return s.floatToUint(&f32_u64, n, x, ft, tt) 3989 } 3990 func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3991 return s.floatToUint(&f64_u64, n, x, ft, tt) 3992 } 3993 3994 func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3995 return s.floatToUint(&f32_u32, n, x, ft, tt) 3996 } 3997 3998 func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 3999 return s.floatToUint(&f64_u32, n, x, ft, tt) 4000 } 4001 4002 func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { 4003 // cutoff:=1<<(intY_Size-1) 4004 // if x < floatX(cutoff) { 4005 // result = uintY(x) 4006 // } else { 4007 // y = x - floatX(cutoff) 4008 // z = uintY(y) 4009 // result = z | -(cutoff) 4010 // } 4011 cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff)) 4012 cmp := s.newValue2(cvttab.ltf, types.Types[TBOOL], x, cutoff) 4013 b := s.endBlock() 4014 b.Kind = ssa.BlockIf 4015 b.SetControl(cmp) 4016 b.Likely = ssa.BranchLikely 4017 4018 bThen := s.f.NewBlock(ssa.BlockPlain) 4019 bElse := s.f.NewBlock(ssa.BlockPlain) 4020 bAfter := s.f.NewBlock(ssa.BlockPlain) 4021 4022 b.AddEdgeTo(bThen) 4023 s.startBlock(bThen) 4024 a0 := s.newValue1(cvttab.cvt2U, tt, x) 4025 s.vars[n] = a0 4026 s.endBlock() 4027 bThen.AddEdgeTo(bAfter) 4028 4029 b.AddEdgeTo(bElse) 4030 s.startBlock(bElse) 4031 y := s.newValue2(cvttab.subf, ft, x, cutoff) 4032 y = s.newValue1(cvttab.cvt2U, tt, y) 4033 z := cvttab.intValue(s, tt, int64(-cvttab.cutoff)) 4034 a1 := s.newValue2(cvttab.or, tt, y, z) 4035 s.vars[n] = a1 4036 s.endBlock() 4037 bElse.AddEdgeTo(bAfter) 4038 4039 s.startBlock(bAfter) 4040 return s.variable(n, n.Type) 4041 } 4042 4043 // dottype generates SSA for a type assertion node. 4044 // commaok indicates whether to panic or return a bool. 4045 // If commaok is false, resok will be nil. 4046 func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) { 4047 iface := s.expr(n.Left) // input interface 4048 target := s.expr(n.Right) // target type 4049 byteptr := s.f.Config.Types.BytePtr 4050 4051 if n.Type.IsInterface() { 4052 if n.Type.IsEmptyInterface() { 4053 // Converting to an empty interface. 4054 // Input could be an empty or nonempty interface. 4055 if Debug_typeassert > 0 { 4056 Warnl(n.Pos, "type assertion inlined") 4057 } 4058 4059 // Get itab/type field from input. 4060 itab := s.newValue1(ssa.OpITab, byteptr, iface) 4061 // Conversion succeeds iff that field is not nil. 4062 cond := s.newValue2(ssa.OpNeqPtr, types.Types[TBOOL], itab, s.constNil(byteptr)) 4063 4064 if n.Left.Type.IsEmptyInterface() && commaok { 4065 // Converting empty interface to empty interface with ,ok is just a nil check. 4066 return iface, cond 4067 } 4068 4069 // Branch on nilness. 4070 b := s.endBlock() 4071 b.Kind = ssa.BlockIf 4072 b.SetControl(cond) 4073 b.Likely = ssa.BranchLikely 4074 bOk := s.f.NewBlock(ssa.BlockPlain) 4075 bFail := s.f.NewBlock(ssa.BlockPlain) 4076 b.AddEdgeTo(bOk) 4077 b.AddEdgeTo(bFail) 4078 4079 if !commaok { 4080 // On failure, panic by calling panicnildottype. 4081 s.startBlock(bFail) 4082 s.rtcall(panicnildottype, false, nil, target) 4083 4084 // On success, return (perhaps modified) input interface. 4085 s.startBlock(bOk) 4086 if n.Left.Type.IsEmptyInterface() { 4087 res = iface // Use input interface unchanged. 4088 return 4089 } 4090 // Load type out of itab, build interface with existing idata. 4091 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) 4092 typ := s.newValue2(ssa.OpLoad, byteptr, off, s.mem()) 4093 idata := s.newValue1(ssa.OpIData, n.Type, iface) 4094 res = s.newValue2(ssa.OpIMake, n.Type, typ, idata) 4095 return 4096 } 4097 4098 s.startBlock(bOk) 4099 // nonempty -> empty 4100 // Need to load type from itab 4101 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab) 4102 s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem()) 4103 s.endBlock() 4104 4105 // itab is nil, might as well use that as the nil result. 4106 s.startBlock(bFail) 4107 s.vars[&typVar] = itab 4108 s.endBlock() 4109 4110 // Merge point. 4111 bEnd := s.f.NewBlock(ssa.BlockPlain) 4112 bOk.AddEdgeTo(bEnd) 4113 bFail.AddEdgeTo(bEnd) 4114 s.startBlock(bEnd) 4115 idata := s.newValue1(ssa.OpIData, n.Type, iface) 4116 res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata) 4117 resok = cond 4118 delete(s.vars, &typVar) 4119 return 4120 } 4121 // converting to a nonempty interface needs a runtime call. 4122 if Debug_typeassert > 0 { 4123 Warnl(n.Pos, "type assertion not inlined") 4124 } 4125 if n.Left.Type.IsEmptyInterface() { 4126 if commaok { 4127 call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface) 4128 return call[0], call[1] 4129 } 4130 return s.rtcall(assertE2I, true, []*types.Type{n.Type}, target, iface)[0], nil 4131 } 4132 if commaok { 4133 call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface) 4134 return call[0], call[1] 4135 } 4136 return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil 4137 } 4138 4139 if Debug_typeassert > 0 { 4140 Warnl(n.Pos, "type assertion inlined") 4141 } 4142 4143 // Converting to a concrete type. 4144 direct := isdirectiface(n.Type) 4145 itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface 4146 if Debug_typeassert > 0 { 4147 Warnl(n.Pos, "type assertion inlined") 4148 } 4149 var targetITab *ssa.Value 4150 if n.Left.Type.IsEmptyInterface() { 4151 // Looking for pointer to target type. 4152 targetITab = target 4153 } else { 4154 // Looking for pointer to itab for target type and source interface. 4155 targetITab = s.expr(n.List.First()) 4156 } 4157 4158 var tmp *Node // temporary for use with large types 4159 var addr *ssa.Value // address of tmp 4160 if commaok && !canSSAType(n.Type) { 4161 // unSSAable type, use temporary. 4162 // TODO: get rid of some of these temporaries. 4163 tmp = tempAt(n.Pos, s.curfn, n.Type) 4164 addr = s.addr(tmp, false) 4165 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem()) 4166 } 4167 4168 cond := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], itab, targetITab) 4169 b := s.endBlock() 4170 b.Kind = ssa.BlockIf 4171 b.SetControl(cond) 4172 b.Likely = ssa.BranchLikely 4173 4174 bOk := s.f.NewBlock(ssa.BlockPlain) 4175 bFail := s.f.NewBlock(ssa.BlockPlain) 4176 b.AddEdgeTo(bOk) 4177 b.AddEdgeTo(bFail) 4178 4179 if !commaok { 4180 // on failure, panic by calling panicdottype 4181 s.startBlock(bFail) 4182 taddr := s.expr(n.Right.Right) 4183 if n.Left.Type.IsEmptyInterface() { 4184 s.rtcall(panicdottypeE, false, nil, itab, target, taddr) 4185 } else { 4186 s.rtcall(panicdottypeI, false, nil, itab, target, taddr) 4187 } 4188 4189 // on success, return data from interface 4190 s.startBlock(bOk) 4191 if direct { 4192 return s.newValue1(ssa.OpIData, n.Type, iface), nil 4193 } 4194 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) 4195 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()), nil 4196 } 4197 4198 // commaok is the more complicated case because we have 4199 // a control flow merge point. 4200 bEnd := s.f.NewBlock(ssa.BlockPlain) 4201 // Note that we need a new valVar each time (unlike okVar where we can 4202 // reuse the variable) because it might have a different type every time. 4203 valVar := &Node{Op: ONAME, Sym: &types.Sym{Name: "val"}} 4204 4205 // type assertion succeeded 4206 s.startBlock(bOk) 4207 if tmp == nil { 4208 if direct { 4209 s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface) 4210 } else { 4211 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) 4212 s.vars[valVar] = s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) 4213 } 4214 } else { 4215 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface) 4216 store := s.newValue3I(ssa.OpMove, types.TypeMem, n.Type.Size(), addr, p, s.mem()) 4217 store.Aux = n.Type 4218 s.vars[&memVar] = store 4219 } 4220 s.vars[&okVar] = s.constBool(true) 4221 s.endBlock() 4222 bOk.AddEdgeTo(bEnd) 4223 4224 // type assertion failed 4225 s.startBlock(bFail) 4226 if tmp == nil { 4227 s.vars[valVar] = s.zeroVal(n.Type) 4228 } else { 4229 store := s.newValue2I(ssa.OpZero, types.TypeMem, n.Type.Size(), addr, s.mem()) 4230 store.Aux = n.Type 4231 s.vars[&memVar] = store 4232 } 4233 s.vars[&okVar] = s.constBool(false) 4234 s.endBlock() 4235 bFail.AddEdgeTo(bEnd) 4236 4237 // merge point 4238 s.startBlock(bEnd) 4239 if tmp == nil { 4240 res = s.variable(valVar, n.Type) 4241 delete(s.vars, valVar) 4242 } else { 4243 res = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) 4244 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem()) 4245 } 4246 resok = s.variable(&okVar, types.Types[TBOOL]) 4247 delete(s.vars, &okVar) 4248 return res, resok 4249 } 4250 4251 // variable returns the value of a variable at the current location. 4252 func (s *state) variable(name *Node, t *types.Type) *ssa.Value { 4253 v := s.vars[name] 4254 if v != nil { 4255 return v 4256 } 4257 v = s.fwdVars[name] 4258 if v != nil { 4259 return v 4260 } 4261 4262 if s.curBlock == s.f.Entry { 4263 // No variable should be live at entry. 4264 s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, name, v) 4265 } 4266 // Make a FwdRef, which records a value that's live on block input. 4267 // We'll find the matching definition as part of insertPhis. 4268 v = s.newValue0A(ssa.OpFwdRef, t, name) 4269 s.fwdVars[name] = v 4270 s.addNamedValue(name, v) 4271 return v 4272 } 4273 4274 func (s *state) mem() *ssa.Value { 4275 return s.variable(&memVar, types.TypeMem) 4276 } 4277 4278 func (s *state) addNamedValue(n *Node, v *ssa.Value) { 4279 if n.Class() == Pxxx { 4280 // Don't track our dummy nodes (&memVar etc.). 4281 return 4282 } 4283 if n.IsAutoTmp() { 4284 // Don't track temporary variables. 4285 return 4286 } 4287 if n.Class() == PPARAMOUT { 4288 // Don't track named output values. This prevents return values 4289 // from being assigned too early. See #14591 and #14762. TODO: allow this. 4290 return 4291 } 4292 if n.Class() == PAUTO && n.Xoffset != 0 { 4293 s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset) 4294 } 4295 loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0} 4296 values, ok := s.f.NamedValues[loc] 4297 if !ok { 4298 s.f.Names = append(s.f.Names, loc) 4299 } 4300 s.f.NamedValues[loc] = append(values, v) 4301 } 4302 4303 // Branch is an unresolved branch. 4304 type Branch struct { 4305 P *obj.Prog // branch instruction 4306 B *ssa.Block // target 4307 } 4308 4309 // SSAGenState contains state needed during Prog generation. 4310 type SSAGenState struct { 4311 pp *Progs 4312 4313 // Branches remembers all the branch instructions we've seen 4314 // and where they would like to go. 4315 Branches []Branch 4316 4317 // bstart remembers where each block starts (indexed by block ID) 4318 bstart []*obj.Prog 4319 4320 // 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?) 4321 SSEto387 map[int16]int16 4322 // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8. 4323 ScratchFpMem *Node 4324 4325 maxarg int64 // largest frame size for arguments to calls made by the function 4326 4327 // Map from GC safe points to stack map index, generated by 4328 // liveness analysis. 4329 stackMapIndex map[*ssa.Value]int 4330 } 4331 4332 // Prog appends a new Prog. 4333 func (s *SSAGenState) Prog(as obj.As) *obj.Prog { 4334 return s.pp.Prog(as) 4335 } 4336 4337 // Pc returns the current Prog. 4338 func (s *SSAGenState) Pc() *obj.Prog { 4339 return s.pp.next 4340 } 4341 4342 // SetPos sets the current source position. 4343 func (s *SSAGenState) SetPos(pos src.XPos) { 4344 s.pp.pos = pos 4345 } 4346 4347 // DebugFriendlySetPos sets the position subject to heuristics 4348 // that reduce "jumpy" line number churn when debugging. 4349 // Spill/fill/copy instructions from the register allocator, 4350 // phi functions, and instructions with a no-pos position 4351 // are examples of instructions that can cause churn. 4352 func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) { 4353 // The two choices here are either to leave lineno unchanged, 4354 // or to explicitly set it to src.NoXPos. Leaving it unchanged 4355 // (reusing the preceding line number) produces slightly better- 4356 // looking assembly language output from the compiler, and is 4357 // expected by some already-existing tests. 4358 // The debug information appears to be the same in either case 4359 switch v.Op { 4360 case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg: 4361 // leave the position unchanged from beginning of block 4362 // or previous line number. 4363 default: 4364 if v.Pos != src.NoXPos { 4365 s.SetPos(v.Pos) 4366 } 4367 } 4368 } 4369 4370 // genssa appends entries to pp for each instruction in f. 4371 func genssa(f *ssa.Func, pp *Progs) { 4372 var s SSAGenState 4373 4374 e := f.Frontend().(*ssafn) 4375 4376 // Generate GC bitmaps, except if the stack is too large, 4377 // in which compilation will fail later anyway (issue 20529). 4378 if e.stksize < maxStackSize { 4379 s.stackMapIndex = liveness(e, f) 4380 } 4381 4382 // Remember where each block starts. 4383 s.bstart = make([]*obj.Prog, f.NumBlocks()) 4384 s.pp = pp 4385 var valueProgs map[*obj.Prog]*ssa.Value 4386 var blockProgs map[*obj.Prog]*ssa.Block 4387 var logProgs = e.log 4388 if logProgs { 4389 valueProgs = make(map[*obj.Prog]*ssa.Value, f.NumValues()) 4390 blockProgs = make(map[*obj.Prog]*ssa.Block, f.NumBlocks()) 4391 f.Logf("genssa %s\n", f.Name) 4392 blockProgs[s.pp.next] = f.Blocks[0] 4393 } 4394 4395 if thearch.Use387 { 4396 s.SSEto387 = map[int16]int16{} 4397 } 4398 4399 s.ScratchFpMem = e.scratchFpMem 4400 4401 // Emit basic blocks 4402 for i, b := range f.Blocks { 4403 s.bstart[b.ID] = s.pp.next 4404 // Emit values in block 4405 thearch.SSAMarkMoves(&s, b) 4406 for _, v := range b.Values { 4407 x := s.pp.next 4408 s.DebugFriendlySetPosFrom(v) 4409 switch v.Op { 4410 case ssa.OpInitMem: 4411 // memory arg needs no code 4412 case ssa.OpArg: 4413 // input args need no code 4414 case ssa.OpSP, ssa.OpSB: 4415 // nothing to do 4416 case ssa.OpSelect0, ssa.OpSelect1: 4417 // nothing to do 4418 case ssa.OpGetG: 4419 // nothing to do when there's a g register, 4420 // and checkLower complains if there's not 4421 case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive: 4422 // nothing to do; already used by liveness 4423 case ssa.OpVarKill: 4424 // Zero variable if it is ambiguously live. 4425 // After the VARKILL anything this variable references 4426 // might be collected. If it were to become live again later, 4427 // the GC will see references to already-collected objects. 4428 // See issue 20029. 4429 n := v.Aux.(*Node) 4430 if n.Name.Needzero() { 4431 if n.Class() != PAUTO { 4432 v.Fatalf("zero of variable which isn't PAUTO %v", n) 4433 } 4434 if n.Type.Size()%int64(Widthptr) != 0 { 4435 v.Fatalf("zero of variable not a multiple of ptr size %v", n) 4436 } 4437 thearch.ZeroAuto(s.pp, n) 4438 } 4439 case ssa.OpPhi: 4440 CheckLoweredPhi(v) 4441 4442 default: 4443 // let the backend handle it 4444 thearch.SSAGenValue(&s, v) 4445 } 4446 4447 if logProgs { 4448 for ; x != s.pp.next; x = x.Link { 4449 valueProgs[x] = v 4450 } 4451 } 4452 } 4453 // Emit control flow instructions for block 4454 var next *ssa.Block 4455 if i < len(f.Blocks)-1 && Debug['N'] == 0 { 4456 // If -N, leave next==nil so every block with successors 4457 // ends in a JMP (except call blocks - plive doesn't like 4458 // select{send,recv} followed by a JMP call). Helps keep 4459 // line numbers for otherwise empty blocks. 4460 next = f.Blocks[i+1] 4461 } 4462 x := s.pp.next 4463 s.SetPos(b.Pos) 4464 thearch.SSAGenBlock(&s, b, next) 4465 if logProgs { 4466 for ; x != s.pp.next; x = x.Link { 4467 blockProgs[x] = b 4468 } 4469 } 4470 } 4471 4472 // Resolve branches 4473 for _, br := range s.Branches { 4474 br.P.To.Val = s.bstart[br.B.ID] 4475 } 4476 4477 if logProgs { 4478 for p := pp.Text; p != nil; p = p.Link { 4479 var s string 4480 if v, ok := valueProgs[p]; ok { 4481 s = v.String() 4482 } else if b, ok := blockProgs[p]; ok { 4483 s = b.String() 4484 } else { 4485 s = " " // most value and branch strings are 2-3 characters long 4486 } 4487 f.Logf("%s\t%s\n", s, p) 4488 } 4489 if f.HTMLWriter != nil { 4490 // LineHist is defunct now - this code won't do 4491 // anything. 4492 // TODO: fix this (ideally without a global variable) 4493 // saved := pp.Text.Ctxt.LineHist.PrintFilenameOnly 4494 // pp.Text.Ctxt.LineHist.PrintFilenameOnly = true 4495 var buf bytes.Buffer 4496 buf.WriteString("<code>") 4497 buf.WriteString("<dl class=\"ssa-gen\">") 4498 for p := pp.Text; p != nil; p = p.Link { 4499 buf.WriteString("<dt class=\"ssa-prog-src\">") 4500 if v, ok := valueProgs[p]; ok { 4501 buf.WriteString(v.HTML()) 4502 } else if b, ok := blockProgs[p]; ok { 4503 buf.WriteString(b.HTML()) 4504 } 4505 buf.WriteString("</dt>") 4506 buf.WriteString("<dd class=\"ssa-prog\">") 4507 buf.WriteString(html.EscapeString(p.String())) 4508 buf.WriteString("</dd>") 4509 buf.WriteString("</li>") 4510 } 4511 buf.WriteString("</dl>") 4512 buf.WriteString("</code>") 4513 f.HTMLWriter.WriteColumn("genssa", buf.String()) 4514 // pp.Text.Ctxt.LineHist.PrintFilenameOnly = saved 4515 } 4516 } 4517 4518 defframe(&s, e) 4519 if Debug['f'] != 0 { 4520 frame(0) 4521 } 4522 4523 f.HTMLWriter.Close() 4524 f.HTMLWriter = nil 4525 } 4526 4527 func defframe(s *SSAGenState, e *ssafn) { 4528 pp := s.pp 4529 4530 frame := Rnd(s.maxarg+e.stksize, int64(Widthreg)) 4531 if thearch.PadFrame != nil { 4532 frame = thearch.PadFrame(frame) 4533 } 4534 4535 // Fill in argument and frame size. 4536 pp.Text.To.Type = obj.TYPE_TEXTSIZE 4537 pp.Text.To.Val = int32(Rnd(e.curfn.Type.ArgWidth(), int64(Widthreg))) 4538 pp.Text.To.Offset = frame 4539 4540 // Insert code to zero ambiguously live variables so that the 4541 // garbage collector only sees initialized values when it 4542 // looks for pointers. 4543 p := pp.Text 4544 var lo, hi int64 4545 4546 // Opaque state for backend to use. Current backends use it to 4547 // keep track of which helper registers have been zeroed. 4548 var state uint32 4549 4550 // Iterate through declarations. They are sorted in decreasing Xoffset order. 4551 for _, n := range e.curfn.Func.Dcl { 4552 if !n.Name.Needzero() { 4553 continue 4554 } 4555 if n.Class() != PAUTO { 4556 Fatalf("needzero class %d", n.Class()) 4557 } 4558 if n.Type.Size()%int64(Widthptr) != 0 || n.Xoffset%int64(Widthptr) != 0 || n.Type.Size() == 0 { 4559 Fatalf("var %L has size %d offset %d", n, n.Type.Size(), n.Xoffset) 4560 } 4561 4562 if lo != hi && n.Xoffset+n.Type.Size() >= lo-int64(2*Widthreg) { 4563 // Merge with range we already have. 4564 lo = n.Xoffset 4565 continue 4566 } 4567 4568 // Zero old range 4569 p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state) 4570 4571 // Set new range. 4572 lo = n.Xoffset 4573 hi = lo + n.Type.Size() 4574 } 4575 4576 // Zero final range. 4577 thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state) 4578 } 4579 4580 type FloatingEQNEJump struct { 4581 Jump obj.As 4582 Index int 4583 } 4584 4585 func (s *SSAGenState) oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump) { 4586 p := s.Prog(jumps.Jump) 4587 p.To.Type = obj.TYPE_BRANCH 4588 to := jumps.Index 4589 s.Branches = append(s.Branches, Branch{p, b.Succs[to].Block()}) 4590 } 4591 4592 func (s *SSAGenState) FPJump(b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) { 4593 switch next { 4594 case b.Succs[0].Block(): 4595 s.oneFPJump(b, &jumps[0][0]) 4596 s.oneFPJump(b, &jumps[0][1]) 4597 case b.Succs[1].Block(): 4598 s.oneFPJump(b, &jumps[1][0]) 4599 s.oneFPJump(b, &jumps[1][1]) 4600 default: 4601 s.oneFPJump(b, &jumps[1][0]) 4602 s.oneFPJump(b, &jumps[1][1]) 4603 q := s.Prog(obj.AJMP) 4604 q.To.Type = obj.TYPE_BRANCH 4605 s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()}) 4606 } 4607 } 4608 4609 func AuxOffset(v *ssa.Value) (offset int64) { 4610 if v.Aux == nil { 4611 return 0 4612 } 4613 switch sym := v.Aux.(type) { 4614 4615 case *ssa.AutoSymbol: 4616 n := sym.Node.(*Node) 4617 return n.Xoffset 4618 } 4619 return 0 4620 } 4621 4622 // AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a. 4623 func AddAux(a *obj.Addr, v *ssa.Value) { 4624 AddAux2(a, v, v.AuxInt) 4625 } 4626 func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { 4627 if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR { 4628 v.Fatalf("bad AddAux addr %v", a) 4629 } 4630 // add integer offset 4631 a.Offset += offset 4632 4633 // If no additional symbol offset, we're done. 4634 if v.Aux == nil { 4635 return 4636 } 4637 // Add symbol's offset from its base register. 4638 switch sym := v.Aux.(type) { 4639 case *ssa.ExternSymbol: 4640 a.Name = obj.NAME_EXTERN 4641 a.Sym = sym.Sym 4642 case *ssa.ArgSymbol: 4643 n := sym.Node.(*Node) 4644 a.Name = obj.NAME_PARAM 4645 a.Sym = n.Orig.Sym.Linksym() 4646 a.Offset += n.Xoffset 4647 case *ssa.AutoSymbol: 4648 n := sym.Node.(*Node) 4649 a.Name = obj.NAME_AUTO 4650 a.Sym = n.Sym.Linksym() 4651 a.Offset += n.Xoffset 4652 default: 4653 v.Fatalf("aux in %s not implemented %#v", v, v.Aux) 4654 } 4655 } 4656 4657 // extendIndex extends v to a full int width. 4658 // panic using the given function if v does not fit in an int (only on 32-bit archs). 4659 func (s *state) extendIndex(v *ssa.Value, panicfn *obj.LSym) *ssa.Value { 4660 size := v.Type.Size() 4661 if size == s.config.PtrSize { 4662 return v 4663 } 4664 if size > s.config.PtrSize { 4665 // truncate 64-bit indexes on 32-bit pointer archs. Test the 4666 // high word and branch to out-of-bounds failure if it is not 0. 4667 if Debug['B'] == 0 { 4668 hi := s.newValue1(ssa.OpInt64Hi, types.Types[TUINT32], v) 4669 cmp := s.newValue2(ssa.OpEq32, types.Types[TBOOL], hi, s.constInt32(types.Types[TUINT32], 0)) 4670 s.check(cmp, panicfn) 4671 } 4672 return s.newValue1(ssa.OpTrunc64to32, types.Types[TINT], v) 4673 } 4674 4675 // Extend value to the required size 4676 var op ssa.Op 4677 if v.Type.IsSigned() { 4678 switch 10*size + s.config.PtrSize { 4679 case 14: 4680 op = ssa.OpSignExt8to32 4681 case 18: 4682 op = ssa.OpSignExt8to64 4683 case 24: 4684 op = ssa.OpSignExt16to32 4685 case 28: 4686 op = ssa.OpSignExt16to64 4687 case 48: 4688 op = ssa.OpSignExt32to64 4689 default: 4690 s.Fatalf("bad signed index extension %s", v.Type) 4691 } 4692 } else { 4693 switch 10*size + s.config.PtrSize { 4694 case 14: 4695 op = ssa.OpZeroExt8to32 4696 case 18: 4697 op = ssa.OpZeroExt8to64 4698 case 24: 4699 op = ssa.OpZeroExt16to32 4700 case 28: 4701 op = ssa.OpZeroExt16to64 4702 case 48: 4703 op = ssa.OpZeroExt32to64 4704 default: 4705 s.Fatalf("bad unsigned index extension %s", v.Type) 4706 } 4707 } 4708 return s.newValue1(op, types.Types[TINT], v) 4709 } 4710 4711 // CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values. 4712 // Called during ssaGenValue. 4713 func CheckLoweredPhi(v *ssa.Value) { 4714 if v.Op != ssa.OpPhi { 4715 v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString()) 4716 } 4717 if v.Type.IsMemory() { 4718 return 4719 } 4720 f := v.Block.Func 4721 loc := f.RegAlloc[v.ID] 4722 for _, a := range v.Args { 4723 if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead? 4724 v.Fatalf("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n%s\n", v, loc, a, aloc, v.Block.Func) 4725 } 4726 } 4727 } 4728 4729 // CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block. 4730 // The output of LoweredGetClosurePtr is generally hardwired to the correct register. 4731 // That register contains the closure pointer on closure entry. 4732 func CheckLoweredGetClosurePtr(v *ssa.Value) { 4733 entry := v.Block.Func.Entry 4734 if entry != v.Block || entry.Values[0] != v { 4735 Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v) 4736 } 4737 } 4738 4739 // AutoVar returns a *Node and int64 representing the auto variable and offset within it 4740 // where v should be spilled. 4741 func AutoVar(v *ssa.Value) (*Node, int64) { 4742 loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot) 4743 if v.Type.Size() > loc.Type.Size() { 4744 v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) 4745 } 4746 return loc.N.(*Node), loc.Off 4747 } 4748 4749 func AddrAuto(a *obj.Addr, v *ssa.Value) { 4750 n, off := AutoVar(v) 4751 a.Type = obj.TYPE_MEM 4752 a.Sym = n.Sym.Linksym() 4753 a.Reg = int16(thearch.REGSP) 4754 a.Offset = n.Xoffset + off 4755 if n.Class() == PPARAM || n.Class() == PPARAMOUT { 4756 a.Name = obj.NAME_PARAM 4757 } else { 4758 a.Name = obj.NAME_AUTO 4759 } 4760 } 4761 4762 func (s *SSAGenState) AddrScratch(a *obj.Addr) { 4763 if s.ScratchFpMem == nil { 4764 panic("no scratch memory available; forgot to declare usesScratch for Op?") 4765 } 4766 a.Type = obj.TYPE_MEM 4767 a.Name = obj.NAME_AUTO 4768 a.Sym = s.ScratchFpMem.Sym.Linksym() 4769 a.Reg = int16(thearch.REGSP) 4770 a.Offset = s.ScratchFpMem.Xoffset 4771 } 4772 4773 func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog { 4774 idx, ok := s.stackMapIndex[v] 4775 if !ok { 4776 Fatalf("missing stack map index for %v", v.LongString()) 4777 } 4778 p := s.Prog(obj.APCDATA) 4779 Addrconst(&p.From, objabi.PCDATA_StackMapIndex) 4780 Addrconst(&p.To, int64(idx)) 4781 4782 if sym, _ := v.Aux.(*obj.LSym); sym == Deferreturn { 4783 // Deferred calls will appear to be returning to 4784 // the CALL deferreturn(SB) that we are about to emit. 4785 // However, the stack trace code will show the line 4786 // of the instruction byte before the return PC. 4787 // To avoid that being an unrelated instruction, 4788 // insert an actual hardware NOP that will have the right line number. 4789 // This is different from obj.ANOP, which is a virtual no-op 4790 // that doesn't make it into the instruction stream. 4791 thearch.Ginsnop(s.pp) 4792 } 4793 4794 p = s.Prog(obj.ACALL) 4795 if sym, ok := v.Aux.(*obj.LSym); ok { 4796 p.To.Type = obj.TYPE_MEM 4797 p.To.Name = obj.NAME_EXTERN 4798 p.To.Sym = sym 4799 } else { 4800 // TODO(mdempsky): Can these differences be eliminated? 4801 switch thearch.LinkArch.Family { 4802 case sys.AMD64, sys.I386, sys.PPC64, sys.S390X: 4803 p.To.Type = obj.TYPE_REG 4804 case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64: 4805 p.To.Type = obj.TYPE_MEM 4806 default: 4807 Fatalf("unknown indirect call family") 4808 } 4809 p.To.Reg = v.Args[0].Reg() 4810 } 4811 if s.maxarg < v.AuxInt { 4812 s.maxarg = v.AuxInt 4813 } 4814 return p 4815 } 4816 4817 // fieldIdx finds the index of the field referred to by the ODOT node n. 4818 func fieldIdx(n *Node) int { 4819 t := n.Left.Type 4820 f := n.Sym 4821 if !t.IsStruct() { 4822 panic("ODOT's LHS is not a struct") 4823 } 4824 4825 var i int 4826 for _, t1 := range t.Fields().Slice() { 4827 if t1.Sym != f { 4828 i++ 4829 continue 4830 } 4831 if t1.Offset != n.Xoffset { 4832 panic("field offset doesn't match") 4833 } 4834 return i 4835 } 4836 panic(fmt.Sprintf("can't find field in expr %v\n", n)) 4837 4838 // TODO: keep the result of this function somewhere in the ODOT Node 4839 // so we don't have to recompute it each time we need it. 4840 } 4841 4842 // ssafn holds frontend information about a function that the backend is processing. 4843 // It also exports a bunch of compiler services for the ssa backend. 4844 type ssafn struct { 4845 curfn *Node 4846 strings map[string]interface{} // map from constant string to data symbols 4847 scratchFpMem *Node // temp for floating point register / memory moves on some architectures 4848 stksize int64 // stack size for current frame 4849 stkptrsize int64 // prefix of stack containing pointers 4850 log bool 4851 } 4852 4853 // StringData returns a symbol (a *types.Sym wrapped in an interface) which 4854 // is the data component of a global string constant containing s. 4855 func (e *ssafn) StringData(s string) interface{} { 4856 if aux, ok := e.strings[s]; ok { 4857 return aux 4858 } 4859 if e.strings == nil { 4860 e.strings = make(map[string]interface{}) 4861 } 4862 data := stringsym(s) 4863 aux := &ssa.ExternSymbol{Sym: data} 4864 e.strings[s] = aux 4865 return aux 4866 } 4867 4868 func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode { 4869 n := tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list 4870 return n 4871 } 4872 4873 func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4874 n := name.N.(*Node) 4875 ptrType := types.NewPtr(types.Types[TUINT8]) 4876 lenType := types.Types[TINT] 4877 if n.Class() == PAUTO && !n.Addrtaken() { 4878 // Split this string up into two separate variables. 4879 p := e.namedAuto(n.Sym.Name+".ptr", ptrType, n.Pos) 4880 l := e.namedAuto(n.Sym.Name+".len", lenType, n.Pos) 4881 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0} 4882 } 4883 // Return the two parts of the larger variable. 4884 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)} 4885 } 4886 4887 func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4888 n := name.N.(*Node) 4889 t := types.NewPtr(types.Types[TUINT8]) 4890 if n.Class() == PAUTO && !n.Addrtaken() { 4891 // Split this interface up into two separate variables. 4892 f := ".itab" 4893 if n.Type.IsEmptyInterface() { 4894 f = ".type" 4895 } 4896 c := e.namedAuto(n.Sym.Name+f, t, n.Pos) 4897 d := e.namedAuto(n.Sym.Name+".data", t, n.Pos) 4898 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0} 4899 } 4900 // Return the two parts of the larger variable. 4901 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)} 4902 } 4903 4904 func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) { 4905 n := name.N.(*Node) 4906 ptrType := types.NewPtr(name.Type.ElemType()) 4907 lenType := types.Types[TINT] 4908 if n.Class() == PAUTO && !n.Addrtaken() { 4909 // Split this slice up into three separate variables. 4910 p := e.namedAuto(n.Sym.Name+".ptr", ptrType, n.Pos) 4911 l := e.namedAuto(n.Sym.Name+".len", lenType, n.Pos) 4912 c := e.namedAuto(n.Sym.Name+".cap", lenType, n.Pos) 4913 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}, ssa.LocalSlot{N: c, Type: lenType, Off: 0} 4914 } 4915 // Return the three parts of the larger variable. 4916 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, 4917 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}, 4918 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)} 4919 } 4920 4921 func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4922 n := name.N.(*Node) 4923 s := name.Type.Size() / 2 4924 var t *types.Type 4925 if s == 8 { 4926 t = types.Types[TFLOAT64] 4927 } else { 4928 t = types.Types[TFLOAT32] 4929 } 4930 if n.Class() == PAUTO && !n.Addrtaken() { 4931 // Split this complex up into two separate variables. 4932 c := e.namedAuto(n.Sym.Name+".real", t, n.Pos) 4933 d := e.namedAuto(n.Sym.Name+".imag", t, n.Pos) 4934 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0} 4935 } 4936 // Return the two parts of the larger variable. 4937 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s} 4938 } 4939 4940 func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { 4941 n := name.N.(*Node) 4942 var t *types.Type 4943 if name.Type.IsSigned() { 4944 t = types.Types[TINT32] 4945 } else { 4946 t = types.Types[TUINT32] 4947 } 4948 if n.Class() == PAUTO && !n.Addrtaken() { 4949 // Split this int64 up into two separate variables. 4950 h := e.namedAuto(n.Sym.Name+".hi", t, n.Pos) 4951 l := e.namedAuto(n.Sym.Name+".lo", types.Types[TUINT32], n.Pos) 4952 return ssa.LocalSlot{N: h, Type: t, Off: 0}, ssa.LocalSlot{N: l, Type: types.Types[TUINT32], Off: 0} 4953 } 4954 // Return the two parts of the larger variable. 4955 if thearch.LinkArch.ByteOrder == binary.BigEndian { 4956 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off + 4} 4957 } 4958 return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off} 4959 } 4960 4961 func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot { 4962 n := name.N.(*Node) 4963 st := name.Type 4964 ft := st.FieldType(i) 4965 if n.Class() == PAUTO && !n.Addrtaken() { 4966 // Note: the _ field may appear several times. But 4967 // have no fear, identically-named but distinct Autos are 4968 // ok, albeit maybe confusing for a debugger. 4969 x := e.namedAuto(n.Sym.Name+"."+st.FieldName(i), ft, n.Pos) 4970 return ssa.LocalSlot{N: x, Type: ft, Off: 0} 4971 } 4972 return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)} 4973 } 4974 4975 func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot { 4976 n := name.N.(*Node) 4977 at := name.Type 4978 if at.NumElem() != 1 { 4979 Fatalf("bad array size") 4980 } 4981 et := at.ElemType() 4982 if n.Class() == PAUTO && !n.Addrtaken() { 4983 x := e.namedAuto(n.Sym.Name+"[0]", et, n.Pos) 4984 return ssa.LocalSlot{N: x, Type: et, Off: 0} 4985 } 4986 return ssa.LocalSlot{N: n, Type: et, Off: name.Off} 4987 } 4988 4989 func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym { 4990 return itabsym(it, offset) 4991 } 4992 4993 // namedAuto returns a new AUTO variable with the given name and type. 4994 // These are exposed to the debugger. 4995 func (e *ssafn) namedAuto(name string, typ *types.Type, pos src.XPos) ssa.GCNode { 4996 t := typ 4997 s := &types.Sym{Name: name, Pkg: localpkg} 4998 4999 n := new(Node) 5000 n.Name = new(Name) 5001 n.Op = ONAME 5002 n.Pos = pos 5003 n.Orig = n 5004 5005 s.Def = asTypesNode(n) 5006 asNode(s.Def).Name.SetUsed(true) 5007 n.Sym = s 5008 n.Type = t 5009 n.SetClass(PAUTO) 5010 n.SetAddable(true) 5011 n.Esc = EscNever 5012 n.Name.Curfn = e.curfn 5013 e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n) 5014 dowidth(t) 5015 return n 5016 } 5017 5018 func (e *ssafn) CanSSA(t *types.Type) bool { 5019 return canSSAType(t) 5020 } 5021 5022 func (e *ssafn) Line(pos src.XPos) string { 5023 return linestr(pos) 5024 } 5025 5026 // Log logs a message from the compiler. 5027 func (e *ssafn) Logf(msg string, args ...interface{}) { 5028 if e.log { 5029 fmt.Printf(msg, args...) 5030 } 5031 } 5032 5033 func (e *ssafn) Log() bool { 5034 return e.log 5035 } 5036 5037 // Fatal reports a compiler error and exits. 5038 func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) { 5039 lineno = pos 5040 Fatalf(msg, args...) 5041 } 5042 5043 // Warnl reports a "warning", which is usually flag-triggered 5044 // logging output for the benefit of tests. 5045 func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) { 5046 Warnl(pos, fmt_, args...) 5047 } 5048 5049 func (e *ssafn) Debug_checknil() bool { 5050 return Debug_checknil != 0 5051 } 5052 5053 func (e *ssafn) Debug_wb() bool { 5054 return Debug_wb != 0 5055 } 5056 5057 func (e *ssafn) UseWriteBarrier() bool { 5058 return use_writebarrier 5059 } 5060 5061 func (e *ssafn) Syslook(name string) *obj.LSym { 5062 switch name { 5063 case "goschedguarded": 5064 return goschedguarded 5065 case "writeBarrier": 5066 return writeBarrier 5067 case "writebarrierptr": 5068 return writebarrierptr 5069 case "typedmemmove": 5070 return typedmemmove 5071 case "typedmemclr": 5072 return typedmemclr 5073 } 5074 Fatalf("unknown Syslook func %v", name) 5075 return nil 5076 } 5077 5078 func (n *Node) Typ() *types.Type { 5079 return n.Type 5080 }