github.com/bir3/gocompiler@v0.9.2202/src/cmd/compile/internal/walk/order.go (about) 1 // Copyright 2012 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package walk 6 7 import ( 8 "fmt" 9 "github.com/bir3/gocompiler/src/go/constant" 10 11 "github.com/bir3/gocompiler/src/cmd/compile/internal/base" 12 "github.com/bir3/gocompiler/src/cmd/compile/internal/ir" 13 "github.com/bir3/gocompiler/src/cmd/compile/internal/reflectdata" 14 "github.com/bir3/gocompiler/src/cmd/compile/internal/ssa" 15 "github.com/bir3/gocompiler/src/cmd/compile/internal/staticinit" 16 "github.com/bir3/gocompiler/src/cmd/compile/internal/typecheck" 17 "github.com/bir3/gocompiler/src/cmd/compile/internal/types" 18 "github.com/bir3/gocompiler/src/cmd/internal/objabi" 19 "github.com/bir3/gocompiler/src/cmd/internal/src" 20 ) 21 22 // Rewrite tree to use separate statements to enforce 23 // order of evaluation. Makes walk easier, because it 24 // can (after this runs) reorder at will within an expression. 25 // 26 // Rewrite m[k] op= r into m[k] = m[k] op r if op is / or %. 27 // 28 // Introduce temporaries as needed by runtime routines. 29 // For example, the map runtime routines take the map key 30 // by reference, so make sure all map keys are addressable 31 // by copying them to temporaries as needed. 32 // The same is true for channel operations. 33 // 34 // Arrange that map index expressions only appear in direct 35 // assignments x = m[k] or m[k] = x, never in larger expressions. 36 // 37 // Arrange that receive expressions only appear in direct assignments 38 // x = <-c or as standalone statements <-c, never in larger expressions. 39 40 // orderState holds state during the ordering process. 41 type orderState struct { 42 out []ir.Node // list of generated statements 43 temp []*ir.Name // stack of temporary variables 44 free map[string][]*ir.Name // free list of unused temporaries, by type.LinkString(). 45 edit func(ir.Node) ir.Node // cached closure of o.exprNoLHS 46 } 47 48 // order rewrites fn.Nbody to apply the ordering constraints 49 // described in the comment at the top of the file. 50 func order(fn *ir.Func) { 51 if base.Flag.W > 1 { 52 s := fmt.Sprintf("\nbefore order %v", fn.Sym()) 53 ir.DumpList(s, fn.Body) 54 } 55 ir.SetPos(fn) // Set reasonable position for instrumenting code. See issue 53688. 56 orderBlock(&fn.Body, map[string][]*ir.Name{}) 57 } 58 59 // append typechecks stmt and appends it to out. 60 func (o *orderState) append(stmt ir.Node) { 61 o.out = append(o.out, typecheck.Stmt(stmt)) 62 } 63 64 // newTemp allocates a new temporary with the given type, 65 // pushes it onto the temp stack, and returns it. 66 // If clear is true, newTemp emits code to zero the temporary. 67 func (o *orderState) newTemp(t *types.Type, clear bool) *ir.Name { 68 var v *ir.Name 69 key := t.LinkString() 70 if a := o.free[key]; len(a) > 0 { 71 v = a[len(a)-1] 72 if !types.Identical(t, v.Type()) { 73 base.Fatalf("expected %L to have type %v", v, t) 74 } 75 o.free[key] = a[:len(a)-1] 76 } else { 77 v = typecheck.TempAt(base.Pos, ir.CurFunc, t) 78 } 79 if clear { 80 o.append(ir.NewAssignStmt(base.Pos, v, nil)) 81 } 82 83 o.temp = append(o.temp, v) 84 return v 85 } 86 87 // copyExpr behaves like newTemp but also emits 88 // code to initialize the temporary to the value n. 89 func (o *orderState) copyExpr(n ir.Node) *ir.Name { 90 return o.copyExpr1(n, false) 91 } 92 93 // copyExprClear is like copyExpr but clears the temp before assignment. 94 // It is provided for use when the evaluation of tmp = n turns into 95 // a function call that is passed a pointer to the temporary as the output space. 96 // If the call blocks before tmp has been written, 97 // the garbage collector will still treat the temporary as live, 98 // so we must zero it before entering that call. 99 // Today, this only happens for channel receive operations. 100 // (The other candidate would be map access, but map access 101 // returns a pointer to the result data instead of taking a pointer 102 // to be filled in.) 103 func (o *orderState) copyExprClear(n ir.Node) *ir.Name { 104 return o.copyExpr1(n, true) 105 } 106 107 func (o *orderState) copyExpr1(n ir.Node, clear bool) *ir.Name { 108 t := n.Type() 109 v := o.newTemp(t, clear) 110 o.append(ir.NewAssignStmt(base.Pos, v, n)) 111 return v 112 } 113 114 // cheapExpr returns a cheap version of n. 115 // The definition of cheap is that n is a variable or constant. 116 // If not, cheapExpr allocates a new tmp, emits tmp = n, 117 // and then returns tmp. 118 func (o *orderState) cheapExpr(n ir.Node) ir.Node { 119 if n == nil { 120 return nil 121 } 122 123 switch n.Op() { 124 case ir.ONAME, ir.OLITERAL, ir.ONIL: 125 return n 126 case ir.OLEN, ir.OCAP: 127 n := n.(*ir.UnaryExpr) 128 l := o.cheapExpr(n.X) 129 if l == n.X { 130 return n 131 } 132 a := ir.Copy(n).(*ir.UnaryExpr) 133 a.X = l 134 return typecheck.Expr(a) 135 } 136 137 return o.copyExpr(n) 138 } 139 140 // safeExpr returns a safe version of n. 141 // The definition of safe is that n can appear multiple times 142 // without violating the semantics of the original program, 143 // and that assigning to the safe version has the same effect 144 // as assigning to the original n. 145 // 146 // The intended use is to apply to x when rewriting x += y into x = x + y. 147 func (o *orderState) safeExpr(n ir.Node) ir.Node { 148 switch n.Op() { 149 case ir.ONAME, ir.OLITERAL, ir.ONIL: 150 return n 151 152 case ir.OLEN, ir.OCAP: 153 n := n.(*ir.UnaryExpr) 154 l := o.safeExpr(n.X) 155 if l == n.X { 156 return n 157 } 158 a := ir.Copy(n).(*ir.UnaryExpr) 159 a.X = l 160 return typecheck.Expr(a) 161 162 case ir.ODOT: 163 n := n.(*ir.SelectorExpr) 164 l := o.safeExpr(n.X) 165 if l == n.X { 166 return n 167 } 168 a := ir.Copy(n).(*ir.SelectorExpr) 169 a.X = l 170 return typecheck.Expr(a) 171 172 case ir.ODOTPTR: 173 n := n.(*ir.SelectorExpr) 174 l := o.cheapExpr(n.X) 175 if l == n.X { 176 return n 177 } 178 a := ir.Copy(n).(*ir.SelectorExpr) 179 a.X = l 180 return typecheck.Expr(a) 181 182 case ir.ODEREF: 183 n := n.(*ir.StarExpr) 184 l := o.cheapExpr(n.X) 185 if l == n.X { 186 return n 187 } 188 a := ir.Copy(n).(*ir.StarExpr) 189 a.X = l 190 return typecheck.Expr(a) 191 192 case ir.OINDEX, ir.OINDEXMAP: 193 n := n.(*ir.IndexExpr) 194 var l ir.Node 195 if n.X.Type().IsArray() { 196 l = o.safeExpr(n.X) 197 } else { 198 l = o.cheapExpr(n.X) 199 } 200 r := o.cheapExpr(n.Index) 201 if l == n.X && r == n.Index { 202 return n 203 } 204 a := ir.Copy(n).(*ir.IndexExpr) 205 a.X = l 206 a.Index = r 207 return typecheck.Expr(a) 208 209 default: 210 base.Fatalf("order.safeExpr %v", n.Op()) 211 return nil // not reached 212 } 213 } 214 215 // addrTemp ensures that n is okay to pass by address to runtime routines. 216 // If the original argument n is not okay, addrTemp creates a tmp, emits 217 // tmp = n, and then returns tmp. 218 // The result of addrTemp MUST be assigned back to n, e.g. 219 // 220 // n.Left = o.addrTemp(n.Left) 221 func (o *orderState) addrTemp(n ir.Node) ir.Node { 222 if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL { 223 // TODO: expand this to all static composite literal nodes? 224 n = typecheck.DefaultLit(n, nil) 225 types.CalcSize(n.Type()) 226 vstat := readonlystaticname(n.Type()) 227 var s staticinit.Schedule 228 s.StaticAssign(vstat, 0, n, n.Type()) 229 if s.Out != nil { 230 base.Fatalf("staticassign of const generated code: %+v", n) 231 } 232 vstat = typecheck.Expr(vstat).(*ir.Name) 233 return vstat 234 } 235 236 // Prevent taking the address of an SSA-able local variable (#63332). 237 // 238 // TODO(mdempsky): Note that OuterValue unwraps OCONVNOPs, but 239 // IsAddressable does not. It should be possible to skip copying for 240 // at least some of these OCONVNOPs (e.g., reinsert them after the 241 // OADDR operation), but at least walkCompare needs to be fixed to 242 // support that (see trybot failures on go.dev/cl/541715, PS1). 243 if ir.IsAddressable(n) { 244 if name, ok := ir.OuterValue(n).(*ir.Name); ok && name.Op() == ir.ONAME { 245 if name.Class == ir.PAUTO && !name.Addrtaken() && ssa.CanSSA(name.Type()) { 246 goto Copy 247 } 248 } 249 250 return n 251 } 252 253 Copy: 254 return o.copyExpr(n) 255 } 256 257 // mapKeyTemp prepares n to be a key in a map runtime call and returns n. 258 // The first parameter is the position of n's containing node, for use in case 259 // that n's position is not unique (e.g., if n is an ONAME). 260 func (o *orderState) mapKeyTemp(outerPos src.XPos, t *types.Type, n ir.Node) ir.Node { 261 pos := outerPos 262 if ir.HasUniquePos(n) { 263 pos = n.Pos() 264 } 265 // Most map calls need to take the address of the key. 266 // Exception: map*_fast* calls. See golang.org/issue/19015. 267 alg := mapfast(t) 268 if alg == mapslow { 269 return o.addrTemp(n) 270 } 271 var kt *types.Type 272 switch alg { 273 case mapfast32: 274 kt = types.Types[types.TUINT32] 275 case mapfast64: 276 kt = types.Types[types.TUINT64] 277 case mapfast32ptr, mapfast64ptr: 278 kt = types.Types[types.TUNSAFEPTR] 279 case mapfaststr: 280 kt = types.Types[types.TSTRING] 281 } 282 nt := n.Type() 283 switch { 284 case nt == kt: 285 return n 286 case nt.Kind() == kt.Kind(), nt.IsPtrShaped() && kt.IsPtrShaped(): 287 // can directly convert (e.g. named type to underlying type, or one pointer to another) 288 return typecheck.Expr(ir.NewConvExpr(pos, ir.OCONVNOP, kt, n)) 289 case nt.IsInteger() && kt.IsInteger(): 290 // can directly convert (e.g. int32 to uint32) 291 if n.Op() == ir.OLITERAL && nt.IsSigned() { 292 // avoid constant overflow error 293 n = ir.NewConstExpr(constant.MakeUint64(uint64(ir.Int64Val(n))), n) 294 n.SetType(kt) 295 return n 296 } 297 return typecheck.Expr(ir.NewConvExpr(pos, ir.OCONV, kt, n)) 298 default: 299 // Unsafe cast through memory. 300 // We'll need to do a load with type kt. Create a temporary of type kt to 301 // ensure sufficient alignment. nt may be under-aligned. 302 if uint8(kt.Alignment()) < uint8(nt.Alignment()) { 303 base.Fatalf("mapKeyTemp: key type is not sufficiently aligned, kt=%v nt=%v", kt, nt) 304 } 305 tmp := o.newTemp(kt, true) 306 // *(*nt)(&tmp) = n 307 var e ir.Node = typecheck.NodAddr(tmp) 308 e = ir.NewConvExpr(pos, ir.OCONVNOP, nt.PtrTo(), e) 309 e = ir.NewStarExpr(pos, e) 310 o.append(ir.NewAssignStmt(pos, e, n)) 311 return tmp 312 } 313 } 314 315 // mapKeyReplaceStrConv replaces OBYTES2STR by OBYTES2STRTMP 316 // in n to avoid string allocations for keys in map lookups. 317 // Returns a bool that signals if a modification was made. 318 // 319 // For: 320 // 321 // x = m[string(k)] 322 // x = m[T1{... Tn{..., string(k), ...}}] 323 // 324 // where k is []byte, T1 to Tn is a nesting of struct and array literals, 325 // the allocation of backing bytes for the string can be avoided 326 // by reusing the []byte backing array. These are special cases 327 // for avoiding allocations when converting byte slices to strings. 328 // It would be nice to handle these generally, but because 329 // []byte keys are not allowed in maps, the use of string(k) 330 // comes up in important cases in practice. See issue 3512. 331 func mapKeyReplaceStrConv(n ir.Node) bool { 332 var replaced bool 333 switch n.Op() { 334 case ir.OBYTES2STR: 335 n := n.(*ir.ConvExpr) 336 n.SetOp(ir.OBYTES2STRTMP) 337 replaced = true 338 case ir.OSTRUCTLIT: 339 n := n.(*ir.CompLitExpr) 340 for _, elem := range n.List { 341 elem := elem.(*ir.StructKeyExpr) 342 if mapKeyReplaceStrConv(elem.Value) { 343 replaced = true 344 } 345 } 346 case ir.OARRAYLIT: 347 n := n.(*ir.CompLitExpr) 348 for _, elem := range n.List { 349 if elem.Op() == ir.OKEY { 350 elem = elem.(*ir.KeyExpr).Value 351 } 352 if mapKeyReplaceStrConv(elem) { 353 replaced = true 354 } 355 } 356 } 357 return replaced 358 } 359 360 type ordermarker int 361 362 // markTemp returns the top of the temporary variable stack. 363 func (o *orderState) markTemp() ordermarker { 364 return ordermarker(len(o.temp)) 365 } 366 367 // popTemp pops temporaries off the stack until reaching the mark, 368 // which must have been returned by markTemp. 369 func (o *orderState) popTemp(mark ordermarker) { 370 for _, n := range o.temp[mark:] { 371 key := n.Type().LinkString() 372 o.free[key] = append(o.free[key], n) 373 } 374 o.temp = o.temp[:mark] 375 } 376 377 // stmtList orders each of the statements in the list. 378 func (o *orderState) stmtList(l ir.Nodes) { 379 s := l 380 for i := range s { 381 orderMakeSliceCopy(s[i:]) 382 o.stmt(s[i]) 383 } 384 } 385 386 // orderMakeSliceCopy matches the pattern: 387 // 388 // m = OMAKESLICE([]T, x); OCOPY(m, s) 389 // 390 // and rewrites it to: 391 // 392 // m = OMAKESLICECOPY([]T, x, s); nil 393 func orderMakeSliceCopy(s []ir.Node) { 394 if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting { 395 return 396 } 397 if len(s) < 2 || s[0] == nil || s[0].Op() != ir.OAS || s[1] == nil || s[1].Op() != ir.OCOPY { 398 return 399 } 400 401 as := s[0].(*ir.AssignStmt) 402 cp := s[1].(*ir.BinaryExpr) 403 if as.Y == nil || as.Y.Op() != ir.OMAKESLICE || ir.IsBlank(as.X) || 404 as.X.Op() != ir.ONAME || cp.X.Op() != ir.ONAME || cp.Y.Op() != ir.ONAME || 405 as.X.Name() != cp.X.Name() || cp.X.Name() == cp.Y.Name() { 406 // The line above this one is correct with the differing equality operators: 407 // we want as.X and cp.X to be the same name, 408 // but we want the initial data to be coming from a different name. 409 return 410 } 411 412 mk := as.Y.(*ir.MakeExpr) 413 if mk.Esc() == ir.EscNone || mk.Len == nil || mk.Cap != nil { 414 return 415 } 416 mk.SetOp(ir.OMAKESLICECOPY) 417 mk.Cap = cp.Y 418 // Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s) 419 mk.SetBounded(mk.Len.Op() == ir.OLEN && ir.SameSafeExpr(mk.Len.(*ir.UnaryExpr).X, cp.Y)) 420 as.Y = typecheck.Expr(mk) 421 s[1] = nil // remove separate copy call 422 } 423 424 // edge inserts coverage instrumentation for libfuzzer. 425 func (o *orderState) edge() { 426 if base.Debug.Libfuzzer == 0 { 427 return 428 } 429 430 // Create a new uint8 counter to be allocated in section __sancov_cntrs 431 counter := staticinit.StaticName(types.Types[types.TUINT8]) 432 counter.SetLibfuzzer8BitCounter(true) 433 // As well as setting SetLibfuzzer8BitCounter, we preemptively set the 434 // symbol type to SLIBFUZZER_8BIT_COUNTER so that the race detector 435 // instrumentation pass (which does not have access to the flags set by 436 // SetLibfuzzer8BitCounter) knows to ignore them. This information is 437 // lost by the time it reaches the compile step, so SetLibfuzzer8BitCounter 438 // is still necessary. 439 counter.Linksym().Type = objabi.SLIBFUZZER_8BIT_COUNTER 440 441 // We guarantee that the counter never becomes zero again once it has been 442 // incremented once. This implementation follows the NeverZero optimization 443 // presented by the paper: 444 // "AFL++: Combining Incremental Steps of Fuzzing Research" 445 // The NeverZero policy avoids the overflow to 0 by setting the counter to one 446 // after it reaches 255 and so, if an edge is executed at least one time, the entry is 447 // never 0. 448 // Another policy presented in the paper is the Saturated Counters policy which 449 // freezes the counter when it reaches the value of 255. However, a range 450 // of experiments showed that that decreases overall performance. 451 o.append(ir.NewIfStmt(base.Pos, 452 ir.NewBinaryExpr(base.Pos, ir.OEQ, counter, ir.NewInt(base.Pos, 0xff)), 453 []ir.Node{ir.NewAssignStmt(base.Pos, counter, ir.NewInt(base.Pos, 1))}, 454 []ir.Node{ir.NewAssignOpStmt(base.Pos, ir.OADD, counter, ir.NewInt(base.Pos, 1))})) 455 } 456 457 // orderBlock orders the block of statements in n into a new slice, 458 // and then replaces the old slice in n with the new slice. 459 // free is a map that can be used to obtain temporary variables by type. 460 func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) { 461 if len(*n) != 0 { 462 // Set reasonable position for instrumenting code. See issue 53688. 463 // It would be nice if ir.Nodes had a position (the opening {, probably), 464 // but it doesn't. So we use the first statement's position instead. 465 ir.SetPos((*n)[0]) 466 } 467 var order orderState 468 order.free = free 469 mark := order.markTemp() 470 order.edge() 471 order.stmtList(*n) 472 order.popTemp(mark) 473 *n = order.out 474 } 475 476 // exprInPlace orders the side effects in *np and 477 // leaves them as the init list of the final *np. 478 // The result of exprInPlace MUST be assigned back to n, e.g. 479 // 480 // n.Left = o.exprInPlace(n.Left) 481 func (o *orderState) exprInPlace(n ir.Node) ir.Node { 482 var order orderState 483 order.free = o.free 484 n = order.expr(n, nil) 485 n = ir.InitExpr(order.out, n) 486 487 // insert new temporaries from order 488 // at head of outer list. 489 o.temp = append(o.temp, order.temp...) 490 return n 491 } 492 493 // orderStmtInPlace orders the side effects of the single statement *np 494 // and replaces it with the resulting statement list. 495 // The result of orderStmtInPlace MUST be assigned back to n, e.g. 496 // 497 // n.Left = orderStmtInPlace(n.Left) 498 // 499 // free is a map that can be used to obtain temporary variables by type. 500 func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node { 501 var order orderState 502 order.free = free 503 mark := order.markTemp() 504 order.stmt(n) 505 order.popTemp(mark) 506 return ir.NewBlockStmt(src.NoXPos, order.out) 507 } 508 509 // init moves n's init list to o.out. 510 func (o *orderState) init(n ir.Node) { 511 if ir.MayBeShared(n) { 512 // For concurrency safety, don't mutate potentially shared nodes. 513 // First, ensure that no work is required here. 514 if len(n.Init()) > 0 { 515 base.Fatalf("order.init shared node with ninit") 516 } 517 return 518 } 519 o.stmtList(ir.TakeInit(n)) 520 } 521 522 // call orders the call expression n. 523 // n.Op is OCALLFUNC/OCALLINTER or a builtin like OCOPY. 524 func (o *orderState) call(nn ir.Node) { 525 if len(nn.Init()) > 0 { 526 // Caller should have already called o.init(nn). 527 base.Fatalf("%v with unexpected ninit", nn.Op()) 528 } 529 if nn.Op() == ir.OCALLMETH { 530 base.FatalfAt(nn.Pos(), "OCALLMETH missed by typecheck") 531 } 532 533 // Builtin functions. 534 if nn.Op() != ir.OCALLFUNC && nn.Op() != ir.OCALLINTER { 535 switch n := nn.(type) { 536 default: 537 base.Fatalf("unexpected call: %+v", n) 538 case *ir.UnaryExpr: 539 n.X = o.expr(n.X, nil) 540 case *ir.ConvExpr: 541 n.X = o.expr(n.X, nil) 542 case *ir.BinaryExpr: 543 n.X = o.expr(n.X, nil) 544 n.Y = o.expr(n.Y, nil) 545 case *ir.MakeExpr: 546 n.Len = o.expr(n.Len, nil) 547 n.Cap = o.expr(n.Cap, nil) 548 case *ir.CallExpr: 549 o.exprList(n.Args) 550 } 551 return 552 } 553 554 n := nn.(*ir.CallExpr) 555 typecheck.AssertFixedCall(n) 556 557 if ir.IsFuncPCIntrinsic(n) && ir.IsIfaceOfFunc(n.Args[0]) != nil { 558 // For internal/abi.FuncPCABIxxx(fn), if fn is a defined function, 559 // do not introduce temporaries here, so it is easier to rewrite it 560 // to symbol address reference later in walk. 561 return 562 } 563 564 n.Fun = o.expr(n.Fun, nil) 565 o.exprList(n.Args) 566 } 567 568 // mapAssign appends n to o.out. 569 func (o *orderState) mapAssign(n ir.Node) { 570 switch n.Op() { 571 default: 572 base.Fatalf("order.mapAssign %v", n.Op()) 573 574 case ir.OAS: 575 n := n.(*ir.AssignStmt) 576 if n.X.Op() == ir.OINDEXMAP { 577 n.Y = o.safeMapRHS(n.Y) 578 } 579 o.out = append(o.out, n) 580 case ir.OASOP: 581 n := n.(*ir.AssignOpStmt) 582 if n.X.Op() == ir.OINDEXMAP { 583 n.Y = o.safeMapRHS(n.Y) 584 } 585 o.out = append(o.out, n) 586 } 587 } 588 589 func (o *orderState) safeMapRHS(r ir.Node) ir.Node { 590 // Make sure we evaluate the RHS before starting the map insert. 591 // We need to make sure the RHS won't panic. See issue 22881. 592 if r.Op() == ir.OAPPEND { 593 r := r.(*ir.CallExpr) 594 s := r.Args[1:] 595 for i, n := range s { 596 s[i] = o.cheapExpr(n) 597 } 598 return r 599 } 600 return o.cheapExpr(r) 601 } 602 603 // stmt orders the statement n, appending to o.out. 604 func (o *orderState) stmt(n ir.Node) { 605 if n == nil { 606 return 607 } 608 609 lno := ir.SetPos(n) 610 o.init(n) 611 612 switch n.Op() { 613 default: 614 base.Fatalf("order.stmt %v", n.Op()) 615 616 case ir.OINLMARK: 617 o.out = append(o.out, n) 618 619 case ir.OAS: 620 n := n.(*ir.AssignStmt) 621 t := o.markTemp() 622 623 // There's a delicate interaction here between two OINDEXMAP 624 // optimizations. 625 // 626 // First, we want to handle m[k] = append(m[k], ...) with a single 627 // runtime call to mapassign. This requires the m[k] expressions to 628 // satisfy ir.SameSafeExpr in walkAssign. 629 // 630 // But if k is a slow map key type that's passed by reference (e.g., 631 // byte), then we want to avoid marking user variables as addrtaken, 632 // if that might prevent the compiler from keeping k in a register. 633 // 634 // TODO(mdempsky): It would be better if walk was responsible for 635 // inserting temporaries as needed. 636 mapAppend := n.X.Op() == ir.OINDEXMAP && n.Y.Op() == ir.OAPPEND && 637 ir.SameSafeExpr(n.X, n.Y.(*ir.CallExpr).Args[0]) 638 639 n.X = o.expr(n.X, nil) 640 if mapAppend { 641 indexLHS := n.X.(*ir.IndexExpr) 642 indexLHS.X = o.cheapExpr(indexLHS.X) 643 indexLHS.Index = o.cheapExpr(indexLHS.Index) 644 645 call := n.Y.(*ir.CallExpr) 646 indexRHS := call.Args[0].(*ir.IndexExpr) 647 indexRHS.X = indexLHS.X 648 indexRHS.Index = indexLHS.Index 649 650 o.exprList(call.Args[1:]) 651 } else { 652 n.Y = o.expr(n.Y, n.X) 653 } 654 o.mapAssign(n) 655 o.popTemp(t) 656 657 case ir.OASOP: 658 n := n.(*ir.AssignOpStmt) 659 t := o.markTemp() 660 n.X = o.expr(n.X, nil) 661 n.Y = o.expr(n.Y, nil) 662 663 if base.Flag.Cfg.Instrumenting || n.X.Op() == ir.OINDEXMAP && (n.AsOp == ir.ODIV || n.AsOp == ir.OMOD) { 664 // Rewrite m[k] op= r into m[k] = m[k] op r so 665 // that we can ensure that if op panics 666 // because r is zero, the panic happens before 667 // the map assignment. 668 // DeepCopy is a big hammer here, but safeExpr 669 // makes sure there is nothing too deep being copied. 670 l1 := o.safeExpr(n.X) 671 l2 := ir.DeepCopy(src.NoXPos, l1) 672 if l2.Op() == ir.OINDEXMAP { 673 l2 := l2.(*ir.IndexExpr) 674 l2.Assigned = false 675 } 676 l2 = o.copyExpr(l2) 677 r := o.expr(typecheck.Expr(ir.NewBinaryExpr(n.Pos(), n.AsOp, l2, n.Y)), nil) 678 as := typecheck.Stmt(ir.NewAssignStmt(n.Pos(), l1, r)) 679 o.mapAssign(as) 680 o.popTemp(t) 681 return 682 } 683 684 o.mapAssign(n) 685 o.popTemp(t) 686 687 case ir.OAS2: 688 n := n.(*ir.AssignListStmt) 689 t := o.markTemp() 690 o.exprList(n.Lhs) 691 o.exprList(n.Rhs) 692 o.out = append(o.out, n) 693 o.popTemp(t) 694 695 // Special: avoid copy of func call n.Right 696 case ir.OAS2FUNC: 697 n := n.(*ir.AssignListStmt) 698 t := o.markTemp() 699 o.exprList(n.Lhs) 700 call := n.Rhs[0] 701 o.init(call) 702 if ic, ok := call.(*ir.InlinedCallExpr); ok { 703 o.stmtList(ic.Body) 704 705 n.SetOp(ir.OAS2) 706 n.Rhs = ic.ReturnVars 707 708 o.exprList(n.Rhs) 709 o.out = append(o.out, n) 710 } else { 711 o.call(call) 712 o.as2func(n) 713 } 714 o.popTemp(t) 715 716 // Special: use temporary variables to hold result, 717 // so that runtime can take address of temporary. 718 // No temporary for blank assignment. 719 // 720 // OAS2MAPR: make sure key is addressable if needed, 721 // and make sure OINDEXMAP is not copied out. 722 case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR: 723 n := n.(*ir.AssignListStmt) 724 t := o.markTemp() 725 o.exprList(n.Lhs) 726 727 switch r := n.Rhs[0]; r.Op() { 728 case ir.ODOTTYPE2: 729 r := r.(*ir.TypeAssertExpr) 730 r.X = o.expr(r.X, nil) 731 case ir.ODYNAMICDOTTYPE2: 732 r := r.(*ir.DynamicTypeAssertExpr) 733 r.X = o.expr(r.X, nil) 734 r.RType = o.expr(r.RType, nil) 735 r.ITab = o.expr(r.ITab, nil) 736 case ir.ORECV: 737 r := r.(*ir.UnaryExpr) 738 r.X = o.expr(r.X, nil) 739 case ir.OINDEXMAP: 740 r := r.(*ir.IndexExpr) 741 r.X = o.expr(r.X, nil) 742 r.Index = o.expr(r.Index, nil) 743 // See similar conversion for OINDEXMAP below. 744 _ = mapKeyReplaceStrConv(r.Index) 745 r.Index = o.mapKeyTemp(r.Pos(), r.X.Type(), r.Index) 746 default: 747 base.Fatalf("order.stmt: %v", r.Op()) 748 } 749 750 o.as2ok(n) 751 o.popTemp(t) 752 753 // Special: does not save n onto out. 754 case ir.OBLOCK: 755 n := n.(*ir.BlockStmt) 756 o.stmtList(n.List) 757 758 // Special: n->left is not an expression; save as is. 759 case ir.OBREAK, 760 ir.OCONTINUE, 761 ir.ODCL, 762 ir.OFALL, 763 ir.OGOTO, 764 ir.OLABEL, 765 ir.OTAILCALL: 766 o.out = append(o.out, n) 767 768 // Special: handle call arguments. 769 case ir.OCALLFUNC, ir.OCALLINTER: 770 n := n.(*ir.CallExpr) 771 t := o.markTemp() 772 o.call(n) 773 o.out = append(o.out, n) 774 o.popTemp(t) 775 776 case ir.OINLCALL: 777 n := n.(*ir.InlinedCallExpr) 778 o.stmtList(n.Body) 779 780 // discard results; double-check for no side effects 781 for _, result := range n.ReturnVars { 782 if staticinit.AnySideEffects(result) { 783 base.FatalfAt(result.Pos(), "inlined call result has side effects: %v", result) 784 } 785 } 786 787 case ir.OCHECKNIL, ir.OCLEAR, ir.OCLOSE, ir.OPANIC, ir.ORECV: 788 n := n.(*ir.UnaryExpr) 789 t := o.markTemp() 790 n.X = o.expr(n.X, nil) 791 o.out = append(o.out, n) 792 o.popTemp(t) 793 794 case ir.OCOPY: 795 n := n.(*ir.BinaryExpr) 796 t := o.markTemp() 797 n.X = o.expr(n.X, nil) 798 n.Y = o.expr(n.Y, nil) 799 o.out = append(o.out, n) 800 o.popTemp(t) 801 802 case ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP: 803 n := n.(*ir.CallExpr) 804 t := o.markTemp() 805 o.call(n) 806 o.out = append(o.out, n) 807 o.popTemp(t) 808 809 // Special: order arguments to inner call but not call itself. 810 case ir.ODEFER, ir.OGO: 811 n := n.(*ir.GoDeferStmt) 812 t := o.markTemp() 813 o.init(n.Call) 814 o.call(n.Call) 815 o.out = append(o.out, n) 816 o.popTemp(t) 817 818 case ir.ODELETE: 819 n := n.(*ir.CallExpr) 820 t := o.markTemp() 821 n.Args[0] = o.expr(n.Args[0], nil) 822 n.Args[1] = o.expr(n.Args[1], nil) 823 n.Args[1] = o.mapKeyTemp(n.Pos(), n.Args[0].Type(), n.Args[1]) 824 o.out = append(o.out, n) 825 o.popTemp(t) 826 827 // Clean temporaries from condition evaluation at 828 // beginning of loop body and after for statement. 829 case ir.OFOR: 830 n := n.(*ir.ForStmt) 831 t := o.markTemp() 832 n.Cond = o.exprInPlace(n.Cond) 833 orderBlock(&n.Body, o.free) 834 n.Post = orderStmtInPlace(n.Post, o.free) 835 o.out = append(o.out, n) 836 o.popTemp(t) 837 838 // Clean temporaries from condition at 839 // beginning of both branches. 840 case ir.OIF: 841 n := n.(*ir.IfStmt) 842 t := o.markTemp() 843 n.Cond = o.exprInPlace(n.Cond) 844 o.popTemp(t) 845 orderBlock(&n.Body, o.free) 846 orderBlock(&n.Else, o.free) 847 o.out = append(o.out, n) 848 849 case ir.ORANGE: 850 // n.Right is the expression being ranged over. 851 // order it, and then make a copy if we need one. 852 // We almost always do, to ensure that we don't 853 // see any value changes made during the loop. 854 // Usually the copy is cheap (e.g., array pointer, 855 // chan, slice, string are all tiny). 856 // The exception is ranging over an array value 857 // (not a slice, not a pointer to array), 858 // which must make a copy to avoid seeing updates made during 859 // the range body. Ranging over an array value is uncommon though. 860 861 // Mark []byte(str) range expression to reuse string backing storage. 862 // It is safe because the storage cannot be mutated. 863 n := n.(*ir.RangeStmt) 864 if x, ok := n.X.(*ir.ConvExpr); ok { 865 switch x.Op() { 866 case ir.OSTR2BYTES: 867 x.SetOp(ir.OSTR2BYTESTMP) 868 fallthrough 869 case ir.OSTR2BYTESTMP: 870 x.MarkNonNil() // "range []byte(nil)" is fine 871 } 872 } 873 874 t := o.markTemp() 875 n.X = o.expr(n.X, nil) 876 877 orderBody := true 878 xt := typecheck.RangeExprType(n.X.Type()) 879 switch k := xt.Kind(); { 880 default: 881 base.Fatalf("order.stmt range %v", n.Type()) 882 883 case types.IsInt[k]: 884 // Used only once, no need to copy. 885 886 case k == types.TARRAY, k == types.TSLICE: 887 if n.Value == nil || ir.IsBlank(n.Value) { 888 // for i := range x will only use x once, to compute len(x). 889 // No need to copy it. 890 break 891 } 892 fallthrough 893 894 case k == types.TCHAN, k == types.TSTRING: 895 // chan, string, slice, array ranges use value multiple times. 896 // make copy. 897 r := n.X 898 899 if r.Type().IsString() && r.Type() != types.Types[types.TSTRING] { 900 r = ir.NewConvExpr(base.Pos, ir.OCONV, nil, r) 901 r.SetType(types.Types[types.TSTRING]) 902 r = typecheck.Expr(r) 903 } 904 905 n.X = o.copyExpr(r) 906 907 case k == types.TMAP: 908 if isMapClear(n) { 909 // Preserve the body of the map clear pattern so it can 910 // be detected during walk. The loop body will not be used 911 // when optimizing away the range loop to a runtime call. 912 orderBody = false 913 break 914 } 915 916 // copy the map value in case it is a map literal. 917 // TODO(rsc): Make tmp = literal expressions reuse tmp. 918 // For maps tmp is just one word so it hardly matters. 919 r := n.X 920 n.X = o.copyExpr(r) 921 922 // n.Prealloc is the temp for the iterator. 923 // MapIterType contains pointers and needs to be zeroed. 924 n.Prealloc = o.newTemp(reflectdata.MapIterType(), true) 925 } 926 n.Key = o.exprInPlace(n.Key) 927 n.Value = o.exprInPlace(n.Value) 928 if orderBody { 929 orderBlock(&n.Body, o.free) 930 } 931 o.out = append(o.out, n) 932 o.popTemp(t) 933 934 case ir.ORETURN: 935 n := n.(*ir.ReturnStmt) 936 o.exprList(n.Results) 937 o.out = append(o.out, n) 938 939 // Special: clean case temporaries in each block entry. 940 // Select must enter one of its blocks, so there is no 941 // need for a cleaning at the end. 942 // Doubly special: evaluation order for select is stricter 943 // than ordinary expressions. Even something like p.c 944 // has to be hoisted into a temporary, so that it cannot be 945 // reordered after the channel evaluation for a different 946 // case (if p were nil, then the timing of the fault would 947 // give this away). 948 case ir.OSELECT: 949 n := n.(*ir.SelectStmt) 950 t := o.markTemp() 951 for _, ncas := range n.Cases { 952 r := ncas.Comm 953 ir.SetPos(ncas) 954 955 // Append any new body prologue to ninit. 956 // The next loop will insert ninit into nbody. 957 if len(ncas.Init()) != 0 { 958 base.Fatalf("order select ninit") 959 } 960 if r == nil { 961 continue 962 } 963 switch r.Op() { 964 default: 965 ir.Dump("select case", r) 966 base.Fatalf("unknown op in select %v", r.Op()) 967 968 case ir.OSELRECV2: 969 // case x, ok = <-c 970 r := r.(*ir.AssignListStmt) 971 recv := r.Rhs[0].(*ir.UnaryExpr) 972 recv.X = o.expr(recv.X, nil) 973 if !ir.IsAutoTmp(recv.X) { 974 recv.X = o.copyExpr(recv.X) 975 } 976 init := ir.TakeInit(r) 977 978 colas := r.Def 979 do := func(i int, t *types.Type) { 980 n := r.Lhs[i] 981 if ir.IsBlank(n) { 982 return 983 } 984 // If this is case x := <-ch or case x, y := <-ch, the case has 985 // the ODCL nodes to declare x and y. We want to delay that 986 // declaration (and possible allocation) until inside the case body. 987 // Delete the ODCL nodes here and recreate them inside the body below. 988 if colas { 989 if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).X == n { 990 init = init[1:] 991 992 // iimport may have added a default initialization assignment, 993 // due to how it handles ODCL statements. 994 if len(init) > 0 && init[0].Op() == ir.OAS && init[0].(*ir.AssignStmt).X == n { 995 init = init[1:] 996 } 997 } 998 dcl := typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name))) 999 ncas.PtrInit().Append(dcl) 1000 } 1001 tmp := o.newTemp(t, t.HasPointers()) 1002 as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, n, typecheck.Conv(tmp, n.Type()))) 1003 ncas.PtrInit().Append(as) 1004 r.Lhs[i] = tmp 1005 } 1006 do(0, recv.X.Type().Elem()) 1007 do(1, types.Types[types.TBOOL]) 1008 if len(init) != 0 { 1009 ir.DumpList("ninit", init) 1010 base.Fatalf("ninit on select recv") 1011 } 1012 orderBlock(ncas.PtrInit(), o.free) 1013 1014 case ir.OSEND: 1015 r := r.(*ir.SendStmt) 1016 if len(r.Init()) != 0 { 1017 ir.DumpList("ninit", r.Init()) 1018 base.Fatalf("ninit on select send") 1019 } 1020 1021 // case c <- x 1022 // r->left is c, r->right is x, both are always evaluated. 1023 r.Chan = o.expr(r.Chan, nil) 1024 1025 if !ir.IsAutoTmp(r.Chan) { 1026 r.Chan = o.copyExpr(r.Chan) 1027 } 1028 r.Value = o.expr(r.Value, nil) 1029 if !ir.IsAutoTmp(r.Value) { 1030 r.Value = o.copyExpr(r.Value) 1031 } 1032 } 1033 } 1034 // Now that we have accumulated all the temporaries, clean them. 1035 // Also insert any ninit queued during the previous loop. 1036 // (The temporary cleaning must follow that ninit work.) 1037 for _, cas := range n.Cases { 1038 orderBlock(&cas.Body, o.free) 1039 1040 // TODO(mdempsky): Is this actually necessary? 1041 // walkSelect appears to walk Ninit. 1042 cas.Body.Prepend(ir.TakeInit(cas)...) 1043 } 1044 1045 o.out = append(o.out, n) 1046 o.popTemp(t) 1047 1048 // Special: value being sent is passed as a pointer; make it addressable. 1049 case ir.OSEND: 1050 n := n.(*ir.SendStmt) 1051 t := o.markTemp() 1052 n.Chan = o.expr(n.Chan, nil) 1053 n.Value = o.expr(n.Value, nil) 1054 if base.Flag.Cfg.Instrumenting { 1055 // Force copying to the stack so that (chan T)(nil) <- x 1056 // is still instrumented as a read of x. 1057 n.Value = o.copyExpr(n.Value) 1058 } else { 1059 n.Value = o.addrTemp(n.Value) 1060 } 1061 o.out = append(o.out, n) 1062 o.popTemp(t) 1063 1064 // TODO(rsc): Clean temporaries more aggressively. 1065 // Note that because walkSwitch will rewrite some of the 1066 // switch into a binary search, this is not as easy as it looks. 1067 // (If we ran that code here we could invoke order.stmt on 1068 // the if-else chain instead.) 1069 // For now just clean all the temporaries at the end. 1070 // In practice that's fine. 1071 case ir.OSWITCH: 1072 n := n.(*ir.SwitchStmt) 1073 if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) { 1074 // Add empty "default:" case for instrumentation. 1075 n.Cases = append(n.Cases, ir.NewCaseStmt(base.Pos, nil, nil)) 1076 } 1077 1078 t := o.markTemp() 1079 n.Tag = o.expr(n.Tag, nil) 1080 for _, ncas := range n.Cases { 1081 o.exprListInPlace(ncas.List) 1082 orderBlock(&ncas.Body, o.free) 1083 } 1084 1085 o.out = append(o.out, n) 1086 o.popTemp(t) 1087 } 1088 1089 base.Pos = lno 1090 } 1091 1092 func hasDefaultCase(n *ir.SwitchStmt) bool { 1093 for _, ncas := range n.Cases { 1094 if len(ncas.List) == 0 { 1095 return true 1096 } 1097 } 1098 return false 1099 } 1100 1101 // exprList orders the expression list l into o. 1102 func (o *orderState) exprList(l ir.Nodes) { 1103 s := l 1104 for i := range s { 1105 s[i] = o.expr(s[i], nil) 1106 } 1107 } 1108 1109 // exprListInPlace orders the expression list l but saves 1110 // the side effects on the individual expression ninit lists. 1111 func (o *orderState) exprListInPlace(l ir.Nodes) { 1112 s := l 1113 for i := range s { 1114 s[i] = o.exprInPlace(s[i]) 1115 } 1116 } 1117 1118 func (o *orderState) exprNoLHS(n ir.Node) ir.Node { 1119 return o.expr(n, nil) 1120 } 1121 1122 // expr orders a single expression, appending side 1123 // effects to o.out as needed. 1124 // If this is part of an assignment lhs = *np, lhs is given. 1125 // Otherwise lhs == nil. (When lhs != nil it may be possible 1126 // to avoid copying the result of the expression to a temporary.) 1127 // The result of expr MUST be assigned back to n, e.g. 1128 // 1129 // n.Left = o.expr(n.Left, lhs) 1130 func (o *orderState) expr(n, lhs ir.Node) ir.Node { 1131 if n == nil { 1132 return n 1133 } 1134 lno := ir.SetPos(n) 1135 n = o.expr1(n, lhs) 1136 base.Pos = lno 1137 return n 1138 } 1139 1140 func (o *orderState) expr1(n, lhs ir.Node) ir.Node { 1141 o.init(n) 1142 1143 switch n.Op() { 1144 default: 1145 if o.edit == nil { 1146 o.edit = o.exprNoLHS // create closure once 1147 } 1148 ir.EditChildren(n, o.edit) 1149 return n 1150 1151 // Addition of strings turns into a function call. 1152 // Allocate a temporary to hold the strings. 1153 // Fewer than 5 strings use direct runtime helpers. 1154 case ir.OADDSTR: 1155 n := n.(*ir.AddStringExpr) 1156 o.exprList(n.List) 1157 1158 if len(n.List) > 5 { 1159 t := types.NewArray(types.Types[types.TSTRING], int64(len(n.List))) 1160 n.Prealloc = o.newTemp(t, false) 1161 } 1162 1163 // Mark string(byteSlice) arguments to reuse byteSlice backing 1164 // buffer during conversion. String concatenation does not 1165 // memorize the strings for later use, so it is safe. 1166 // However, we can do it only if there is at least one non-empty string literal. 1167 // Otherwise if all other arguments are empty strings, 1168 // concatstrings will return the reference to the temp string 1169 // to the caller. 1170 hasbyte := false 1171 1172 haslit := false 1173 for _, n1 := range n.List { 1174 hasbyte = hasbyte || n1.Op() == ir.OBYTES2STR 1175 haslit = haslit || n1.Op() == ir.OLITERAL && len(ir.StringVal(n1)) != 0 1176 } 1177 1178 if haslit && hasbyte { 1179 for _, n2 := range n.List { 1180 if n2.Op() == ir.OBYTES2STR { 1181 n2 := n2.(*ir.ConvExpr) 1182 n2.SetOp(ir.OBYTES2STRTMP) 1183 } 1184 } 1185 } 1186 return n 1187 1188 case ir.OINDEXMAP: 1189 n := n.(*ir.IndexExpr) 1190 n.X = o.expr(n.X, nil) 1191 n.Index = o.expr(n.Index, nil) 1192 needCopy := false 1193 1194 if !n.Assigned { 1195 // Enforce that any []byte slices we are not copying 1196 // can not be changed before the map index by forcing 1197 // the map index to happen immediately following the 1198 // conversions. See copyExpr a few lines below. 1199 needCopy = mapKeyReplaceStrConv(n.Index) 1200 1201 if base.Flag.Cfg.Instrumenting { 1202 // Race detector needs the copy. 1203 needCopy = true 1204 } 1205 } 1206 1207 // key may need to be be addressable 1208 n.Index = o.mapKeyTemp(n.Pos(), n.X.Type(), n.Index) 1209 if needCopy { 1210 return o.copyExpr(n) 1211 } 1212 return n 1213 1214 // concrete type (not interface) argument might need an addressable 1215 // temporary to pass to the runtime conversion routine. 1216 case ir.OCONVIFACE: 1217 n := n.(*ir.ConvExpr) 1218 n.X = o.expr(n.X, nil) 1219 if n.X.Type().IsInterface() { 1220 return n 1221 } 1222 if _, _, needsaddr := dataWordFuncName(n.X.Type()); needsaddr || isStaticCompositeLiteral(n.X) { 1223 // Need a temp if we need to pass the address to the conversion function. 1224 // We also process static composite literal node here, making a named static global 1225 // whose address we can put directly in an interface (see OCONVIFACE case in walk). 1226 n.X = o.addrTemp(n.X) 1227 } 1228 return n 1229 1230 case ir.OCONVNOP: 1231 n := n.(*ir.ConvExpr) 1232 if n.X.Op() == ir.OCALLMETH { 1233 base.FatalfAt(n.X.Pos(), "OCALLMETH missed by typecheck") 1234 } 1235 if n.Type().IsKind(types.TUNSAFEPTR) && n.X.Type().IsKind(types.TUINTPTR) && (n.X.Op() == ir.OCALLFUNC || n.X.Op() == ir.OCALLINTER) { 1236 call := n.X.(*ir.CallExpr) 1237 // When reordering unsafe.Pointer(f()) into a separate 1238 // statement, the conversion and function call must stay 1239 // together. See golang.org/issue/15329. 1240 o.init(call) 1241 o.call(call) 1242 if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting { 1243 return o.copyExpr(n) 1244 } 1245 } else { 1246 n.X = o.expr(n.X, nil) 1247 } 1248 return n 1249 1250 case ir.OANDAND, ir.OOROR: 1251 // ... = LHS && RHS 1252 // 1253 // var r bool 1254 // r = LHS 1255 // if r { // or !r, for OROR 1256 // r = RHS 1257 // } 1258 // ... = r 1259 1260 n := n.(*ir.LogicalExpr) 1261 r := o.newTemp(n.Type(), false) 1262 1263 // Evaluate left-hand side. 1264 lhs := o.expr(n.X, nil) 1265 o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, lhs))) 1266 1267 // Evaluate right-hand side, save generated code. 1268 saveout := o.out 1269 o.out = nil 1270 t := o.markTemp() 1271 o.edge() 1272 rhs := o.expr(n.Y, nil) 1273 o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, rhs))) 1274 o.popTemp(t) 1275 gen := o.out 1276 o.out = saveout 1277 1278 // If left-hand side doesn't cause a short-circuit, issue right-hand side. 1279 nif := ir.NewIfStmt(base.Pos, r, nil, nil) 1280 if n.Op() == ir.OANDAND { 1281 nif.Body = gen 1282 } else { 1283 nif.Else = gen 1284 } 1285 o.out = append(o.out, nif) 1286 return r 1287 1288 case ir.OCALLMETH: 1289 base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck") 1290 panic("unreachable") 1291 1292 case ir.OCALLFUNC, 1293 ir.OCALLINTER, 1294 ir.OCAP, 1295 ir.OCOMPLEX, 1296 ir.OCOPY, 1297 ir.OIMAG, 1298 ir.OLEN, 1299 ir.OMAKECHAN, 1300 ir.OMAKEMAP, 1301 ir.OMAKESLICE, 1302 ir.OMAKESLICECOPY, 1303 ir.OMAX, 1304 ir.OMIN, 1305 ir.ONEW, 1306 ir.OREAL, 1307 ir.ORECOVERFP, 1308 ir.OSTR2BYTES, 1309 ir.OSTR2BYTESTMP, 1310 ir.OSTR2RUNES: 1311 1312 if isRuneCount(n) { 1313 // len([]rune(s)) is rewritten to runtime.countrunes(s) later. 1314 conv := n.(*ir.UnaryExpr).X.(*ir.ConvExpr) 1315 conv.X = o.expr(conv.X, nil) 1316 } else { 1317 o.call(n) 1318 } 1319 1320 if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting { 1321 return o.copyExpr(n) 1322 } 1323 return n 1324 1325 case ir.OINLCALL: 1326 n := n.(*ir.InlinedCallExpr) 1327 o.stmtList(n.Body) 1328 return n.SingleResult() 1329 1330 case ir.OAPPEND: 1331 // Check for append(x, make([]T, y)...) . 1332 n := n.(*ir.CallExpr) 1333 if isAppendOfMake(n) { 1334 n.Args[0] = o.expr(n.Args[0], nil) // order x 1335 mk := n.Args[1].(*ir.MakeExpr) 1336 mk.Len = o.expr(mk.Len, nil) // order y 1337 } else { 1338 o.exprList(n.Args) 1339 } 1340 1341 if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.Args[0]) { 1342 return o.copyExpr(n) 1343 } 1344 return n 1345 1346 case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR: 1347 n := n.(*ir.SliceExpr) 1348 n.X = o.expr(n.X, nil) 1349 n.Low = o.cheapExpr(o.expr(n.Low, nil)) 1350 n.High = o.cheapExpr(o.expr(n.High, nil)) 1351 n.Max = o.cheapExpr(o.expr(n.Max, nil)) 1352 if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.X) { 1353 return o.copyExpr(n) 1354 } 1355 return n 1356 1357 case ir.OCLOSURE: 1358 n := n.(*ir.ClosureExpr) 1359 if n.Transient() && len(n.Func.ClosureVars) > 0 { 1360 n.Prealloc = o.newTemp(typecheck.ClosureType(n), false) 1361 } 1362 return n 1363 1364 case ir.OMETHVALUE: 1365 n := n.(*ir.SelectorExpr) 1366 n.X = o.expr(n.X, nil) 1367 if n.Transient() { 1368 t := typecheck.MethodValueType(n) 1369 n.Prealloc = o.newTemp(t, false) 1370 } 1371 return n 1372 1373 case ir.OSLICELIT: 1374 n := n.(*ir.CompLitExpr) 1375 o.exprList(n.List) 1376 if n.Transient() { 1377 t := types.NewArray(n.Type().Elem(), n.Len) 1378 n.Prealloc = o.newTemp(t, false) 1379 } 1380 return n 1381 1382 case ir.ODOTTYPE, ir.ODOTTYPE2: 1383 n := n.(*ir.TypeAssertExpr) 1384 n.X = o.expr(n.X, nil) 1385 if !types.IsDirectIface(n.Type()) || base.Flag.Cfg.Instrumenting { 1386 return o.copyExprClear(n) 1387 } 1388 return n 1389 1390 case ir.ORECV: 1391 n := n.(*ir.UnaryExpr) 1392 n.X = o.expr(n.X, nil) 1393 return o.copyExprClear(n) 1394 1395 case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: 1396 n := n.(*ir.BinaryExpr) 1397 n.X = o.expr(n.X, nil) 1398 n.Y = o.expr(n.Y, nil) 1399 1400 t := n.X.Type() 1401 switch { 1402 case t.IsString(): 1403 // Mark string(byteSlice) arguments to reuse byteSlice backing 1404 // buffer during conversion. String comparison does not 1405 // memorize the strings for later use, so it is safe. 1406 if n.X.Op() == ir.OBYTES2STR { 1407 n.X.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP) 1408 } 1409 if n.Y.Op() == ir.OBYTES2STR { 1410 n.Y.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP) 1411 } 1412 1413 case t.IsStruct() || t.IsArray(): 1414 // for complex comparisons, we need both args to be 1415 // addressable so we can pass them to the runtime. 1416 n.X = o.addrTemp(n.X) 1417 n.Y = o.addrTemp(n.Y) 1418 } 1419 return n 1420 1421 case ir.OMAPLIT: 1422 // Order map by converting: 1423 // map[int]int{ 1424 // a(): b(), 1425 // c(): d(), 1426 // e(): f(), 1427 // } 1428 // to 1429 // m := map[int]int{} 1430 // m[a()] = b() 1431 // m[c()] = d() 1432 // m[e()] = f() 1433 // Then order the result. 1434 // Without this special case, order would otherwise compute all 1435 // the keys and values before storing any of them to the map. 1436 // See issue 26552. 1437 n := n.(*ir.CompLitExpr) 1438 entries := n.List 1439 statics := entries[:0] 1440 var dynamics []*ir.KeyExpr 1441 for _, r := range entries { 1442 r := r.(*ir.KeyExpr) 1443 1444 if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) { 1445 dynamics = append(dynamics, r) 1446 continue 1447 } 1448 1449 // Recursively ordering some static entries can change them to dynamic; 1450 // e.g., OCONVIFACE nodes. See #31777. 1451 r = o.expr(r, nil).(*ir.KeyExpr) 1452 if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) { 1453 dynamics = append(dynamics, r) 1454 continue 1455 } 1456 1457 statics = append(statics, r) 1458 } 1459 n.List = statics 1460 1461 if len(dynamics) == 0 { 1462 return n 1463 } 1464 1465 // Emit the creation of the map (with all its static entries). 1466 m := o.newTemp(n.Type(), false) 1467 as := ir.NewAssignStmt(base.Pos, m, n) 1468 typecheck.Stmt(as) 1469 o.stmt(as) 1470 1471 // Emit eval+insert of dynamic entries, one at a time. 1472 for _, r := range dynamics { 1473 lhs := typecheck.AssignExpr(ir.NewIndexExpr(base.Pos, m, r.Key)).(*ir.IndexExpr) 1474 base.AssertfAt(lhs.Op() == ir.OINDEXMAP, lhs.Pos(), "want OINDEXMAP, have %+v", lhs) 1475 lhs.RType = n.RType 1476 1477 as := ir.NewAssignStmt(base.Pos, lhs, r.Value) 1478 typecheck.Stmt(as) 1479 o.stmt(as) 1480 } 1481 1482 // Remember that we issued these assignments so we can include that count 1483 // in the map alloc hint. 1484 // We're assuming here that all the keys in the map literal are distinct. 1485 // If any are equal, this will be an overcount. Probably not worth accounting 1486 // for that, as equal keys in map literals are rare, and at worst we waste 1487 // a bit of space. 1488 n.Len += int64(len(dynamics)) 1489 1490 return m 1491 } 1492 1493 // No return - type-assertions above. Each case must return for itself. 1494 } 1495 1496 // as2func orders OAS2FUNC nodes. It creates temporaries to ensure left-to-right assignment. 1497 // The caller should order the right-hand side of the assignment before calling order.as2func. 1498 // It rewrites, 1499 // 1500 // a, b, a = ... 1501 // 1502 // as 1503 // 1504 // tmp1, tmp2, tmp3 = ... 1505 // a, b, a = tmp1, tmp2, tmp3 1506 // 1507 // This is necessary to ensure left to right assignment order. 1508 func (o *orderState) as2func(n *ir.AssignListStmt) { 1509 results := n.Rhs[0].Type() 1510 as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil) 1511 for i, nl := range n.Lhs { 1512 if !ir.IsBlank(nl) { 1513 typ := results.Field(i).Type 1514 tmp := o.newTemp(typ, typ.HasPointers()) 1515 n.Lhs[i] = tmp 1516 as.Lhs = append(as.Lhs, nl) 1517 as.Rhs = append(as.Rhs, tmp) 1518 } 1519 } 1520 1521 o.out = append(o.out, n) 1522 o.stmt(typecheck.Stmt(as)) 1523 } 1524 1525 // as2ok orders OAS2XXX with ok. 1526 // Just like as2func, this also adds temporaries to ensure left-to-right assignment. 1527 func (o *orderState) as2ok(n *ir.AssignListStmt) { 1528 as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil) 1529 1530 do := func(i int, typ *types.Type) { 1531 if nl := n.Lhs[i]; !ir.IsBlank(nl) { 1532 var tmp ir.Node = o.newTemp(typ, typ.HasPointers()) 1533 n.Lhs[i] = tmp 1534 as.Lhs = append(as.Lhs, nl) 1535 if i == 1 { 1536 // The "ok" result is an untyped boolean according to the Go 1537 // spec. We need to explicitly convert it to the LHS type in 1538 // case the latter is a defined boolean type (#8475). 1539 tmp = typecheck.Conv(tmp, nl.Type()) 1540 } 1541 as.Rhs = append(as.Rhs, tmp) 1542 } 1543 } 1544 1545 do(0, n.Rhs[0].Type()) 1546 do(1, types.Types[types.TBOOL]) 1547 1548 o.out = append(o.out, n) 1549 o.stmt(typecheck.Stmt(as)) 1550 }