github.com/bir3/gocompiler@v0.3.205/src/cmd/compile/internal/walk/order.go (about) 1 // Copyright 2012 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package walk 6 7 import ( 8 "fmt" 9 "github.com/bir3/gocompiler/src/go/constant" 10 11 "github.com/bir3/gocompiler/src/cmd/compile/internal/base" 12 "github.com/bir3/gocompiler/src/cmd/compile/internal/ir" 13 "github.com/bir3/gocompiler/src/cmd/compile/internal/reflectdata" 14 "github.com/bir3/gocompiler/src/cmd/compile/internal/staticinit" 15 "github.com/bir3/gocompiler/src/cmd/compile/internal/typecheck" 16 "github.com/bir3/gocompiler/src/cmd/compile/internal/types" 17 "github.com/bir3/gocompiler/src/cmd/internal/objabi" 18 "github.com/bir3/gocompiler/src/cmd/internal/src" 19 ) 20 21 // Rewrite tree to use separate statements to enforce 22 // order of evaluation. Makes walk easier, because it 23 // can (after this runs) reorder at will within an expression. 24 // 25 // Rewrite m[k] op= r into m[k] = m[k] op r if op is / or %. 26 // 27 // Introduce temporaries as needed by runtime routines. 28 // For example, the map runtime routines take the map key 29 // by reference, so make sure all map keys are addressable 30 // by copying them to temporaries as needed. 31 // The same is true for channel operations. 32 // 33 // Arrange that map index expressions only appear in direct 34 // assignments x = m[k] or m[k] = x, never in larger expressions. 35 // 36 // Arrange that receive expressions only appear in direct assignments 37 // x = <-c or as standalone statements <-c, never in larger expressions. 38 39 // orderState holds state during the ordering process. 40 type orderState struct { 41 out []ir.Node // list of generated statements 42 temp []*ir.Name // stack of temporary variables 43 free map[string][]*ir.Name // free list of unused temporaries, by type.LinkString(). 44 edit func(ir.Node) ir.Node // cached closure of o.exprNoLHS 45 } 46 47 // order rewrites fn.Nbody to apply the ordering constraints 48 // described in the comment at the top of the file. 49 func order(fn *ir.Func) { 50 if base.Flag.W > 1 { 51 s := fmt.Sprintf("\nbefore order %v", fn.Sym()) 52 ir.DumpList(s, fn.Body) 53 } 54 ir.SetPos(fn) // Set reasonable position for instrumenting code. See issue 53688. 55 orderBlock(&fn.Body, map[string][]*ir.Name{}) 56 } 57 58 // append typechecks stmt and appends it to out. 59 func (o *orderState) append(stmt ir.Node) { 60 o.out = append(o.out, typecheck.Stmt(stmt)) 61 } 62 63 // newTemp allocates a new temporary with the given type, 64 // pushes it onto the temp stack, and returns it. 65 // If clear is true, newTemp emits code to zero the temporary. 66 func (o *orderState) newTemp(t *types.Type, clear bool) *ir.Name { 67 var v *ir.Name 68 key := t.LinkString() 69 if a := o.free[key]; len(a) > 0 { 70 v = a[len(a)-1] 71 if !types.Identical(t, v.Type()) { 72 base.Fatalf("expected %L to have type %v", v, t) 73 } 74 o.free[key] = a[:len(a)-1] 75 } else { 76 v = typecheck.Temp(t) 77 } 78 if clear { 79 o.append(ir.NewAssignStmt(base.Pos, v, nil)) 80 } 81 82 o.temp = append(o.temp, v) 83 return v 84 } 85 86 // copyExpr behaves like newTemp but also emits 87 // code to initialize the temporary to the value n. 88 func (o *orderState) copyExpr(n ir.Node) *ir.Name { 89 return o.copyExpr1(n, false) 90 } 91 92 // copyExprClear is like copyExpr but clears the temp before assignment. 93 // It is provided for use when the evaluation of tmp = n turns into 94 // a function call that is passed a pointer to the temporary as the output space. 95 // If the call blocks before tmp has been written, 96 // the garbage collector will still treat the temporary as live, 97 // so we must zero it before entering that call. 98 // Today, this only happens for channel receive operations. 99 // (The other candidate would be map access, but map access 100 // returns a pointer to the result data instead of taking a pointer 101 // to be filled in.) 102 func (o *orderState) copyExprClear(n ir.Node) *ir.Name { 103 return o.copyExpr1(n, true) 104 } 105 106 func (o *orderState) copyExpr1(n ir.Node, clear bool) *ir.Name { 107 t := n.Type() 108 v := o.newTemp(t, clear) 109 o.append(ir.NewAssignStmt(base.Pos, v, n)) 110 return v 111 } 112 113 // cheapExpr returns a cheap version of n. 114 // The definition of cheap is that n is a variable or constant. 115 // If not, cheapExpr allocates a new tmp, emits tmp = n, 116 // and then returns tmp. 117 func (o *orderState) cheapExpr(n ir.Node) ir.Node { 118 if n == nil { 119 return nil 120 } 121 122 switch n.Op() { 123 case ir.ONAME, ir.OLITERAL, ir.ONIL: 124 return n 125 case ir.OLEN, ir.OCAP: 126 n := n.(*ir.UnaryExpr) 127 l := o.cheapExpr(n.X) 128 if l == n.X { 129 return n 130 } 131 a := ir.SepCopy(n).(*ir.UnaryExpr) 132 a.X = l 133 return typecheck.Expr(a) 134 } 135 136 return o.copyExpr(n) 137 } 138 139 // safeExpr returns a safe version of n. 140 // The definition of safe is that n can appear multiple times 141 // without violating the semantics of the original program, 142 // and that assigning to the safe version has the same effect 143 // as assigning to the original n. 144 // 145 // The intended use is to apply to x when rewriting x += y into x = x + y. 146 func (o *orderState) safeExpr(n ir.Node) ir.Node { 147 switch n.Op() { 148 case ir.ONAME, ir.OLITERAL, ir.ONIL: 149 return n 150 151 case ir.OLEN, ir.OCAP: 152 n := n.(*ir.UnaryExpr) 153 l := o.safeExpr(n.X) 154 if l == n.X { 155 return n 156 } 157 a := ir.SepCopy(n).(*ir.UnaryExpr) 158 a.X = l 159 return typecheck.Expr(a) 160 161 case ir.ODOT: 162 n := n.(*ir.SelectorExpr) 163 l := o.safeExpr(n.X) 164 if l == n.X { 165 return n 166 } 167 a := ir.SepCopy(n).(*ir.SelectorExpr) 168 a.X = l 169 return typecheck.Expr(a) 170 171 case ir.ODOTPTR: 172 n := n.(*ir.SelectorExpr) 173 l := o.cheapExpr(n.X) 174 if l == n.X { 175 return n 176 } 177 a := ir.SepCopy(n).(*ir.SelectorExpr) 178 a.X = l 179 return typecheck.Expr(a) 180 181 case ir.ODEREF: 182 n := n.(*ir.StarExpr) 183 l := o.cheapExpr(n.X) 184 if l == n.X { 185 return n 186 } 187 a := ir.SepCopy(n).(*ir.StarExpr) 188 a.X = l 189 return typecheck.Expr(a) 190 191 case ir.OINDEX, ir.OINDEXMAP: 192 n := n.(*ir.IndexExpr) 193 var l ir.Node 194 if n.X.Type().IsArray() { 195 l = o.safeExpr(n.X) 196 } else { 197 l = o.cheapExpr(n.X) 198 } 199 r := o.cheapExpr(n.Index) 200 if l == n.X && r == n.Index { 201 return n 202 } 203 a := ir.SepCopy(n).(*ir.IndexExpr) 204 a.X = l 205 a.Index = r 206 return typecheck.Expr(a) 207 208 default: 209 base.Fatalf("order.safeExpr %v", n.Op()) 210 return nil // not reached 211 } 212 } 213 214 // addrTemp ensures that n is okay to pass by address to runtime routines. 215 // If the original argument n is not okay, addrTemp creates a tmp, emits 216 // tmp = n, and then returns tmp. 217 // The result of addrTemp MUST be assigned back to n, e.g. 218 // 219 // n.Left = o.addrTemp(n.Left) 220 func (o *orderState) addrTemp(n ir.Node) ir.Node { 221 if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL { 222 // TODO: expand this to all static composite literal nodes? 223 n = typecheck.DefaultLit(n, nil) 224 types.CalcSize(n.Type()) 225 vstat := readonlystaticname(n.Type()) 226 var s staticinit.Schedule 227 s.StaticAssign(vstat, 0, n, n.Type()) 228 if s.Out != nil { 229 base.Fatalf("staticassign of const generated code: %+v", n) 230 } 231 vstat = typecheck.Expr(vstat).(*ir.Name) 232 return vstat 233 } 234 if ir.IsAddressable(n) { 235 return n 236 } 237 return o.copyExpr(n) 238 } 239 240 // mapKeyTemp prepares n to be a key in a map runtime call and returns n. 241 // It should only be used for map runtime calls which have *_fast* versions. 242 // The first parameter is the position of n's containing node, for use in case 243 // that n's position is not unique (e.g., if n is an ONAME). 244 func (o *orderState) mapKeyTemp(outerPos src.XPos, t *types.Type, n ir.Node) ir.Node { 245 pos := outerPos 246 if ir.HasUniquePos(n) { 247 pos = n.Pos() 248 } 249 // Most map calls need to take the address of the key. 250 // Exception: map*_fast* calls. See golang.org/issue/19015. 251 alg := mapfast(t) 252 if alg == mapslow { 253 return o.addrTemp(n) 254 } 255 var kt *types.Type 256 switch alg { 257 case mapfast32: 258 kt = types.Types[types.TUINT32] 259 case mapfast64: 260 kt = types.Types[types.TUINT64] 261 case mapfast32ptr, mapfast64ptr: 262 kt = types.Types[types.TUNSAFEPTR] 263 case mapfaststr: 264 kt = types.Types[types.TSTRING] 265 } 266 nt := n.Type() 267 switch { 268 case nt == kt: 269 return n 270 case nt.Kind() == kt.Kind(), nt.IsPtrShaped() && kt.IsPtrShaped(): 271 // can directly convert (e.g. named type to underlying type, or one pointer to another) 272 return typecheck.Expr(ir.NewConvExpr(pos, ir.OCONVNOP, kt, n)) 273 case nt.IsInteger() && kt.IsInteger(): 274 // can directly convert (e.g. int32 to uint32) 275 if n.Op() == ir.OLITERAL && nt.IsSigned() { 276 // avoid constant overflow error 277 n = ir.NewConstExpr(constant.MakeUint64(uint64(ir.Int64Val(n))), n) 278 n.SetType(kt) 279 return n 280 } 281 return typecheck.Expr(ir.NewConvExpr(pos, ir.OCONV, kt, n)) 282 default: 283 // Unsafe cast through memory. 284 // We'll need to do a load with type kt. Create a temporary of type kt to 285 // ensure sufficient alignment. nt may be under-aligned. 286 if uint8(kt.Alignment()) < uint8(nt.Alignment()) { 287 base.Fatalf("mapKeyTemp: key type is not sufficiently aligned, kt=%v nt=%v", kt, nt) 288 } 289 tmp := o.newTemp(kt, true) 290 // *(*nt)(&tmp) = n 291 var e ir.Node = typecheck.NodAddr(tmp) 292 e = ir.NewConvExpr(pos, ir.OCONVNOP, nt.PtrTo(), e) 293 e = ir.NewStarExpr(pos, e) 294 o.append(ir.NewAssignStmt(pos, e, n)) 295 return tmp 296 } 297 } 298 299 // mapKeyReplaceStrConv replaces OBYTES2STR by OBYTES2STRTMP 300 // in n to avoid string allocations for keys in map lookups. 301 // Returns a bool that signals if a modification was made. 302 // 303 // For: 304 // 305 // x = m[string(k)] 306 // x = m[T1{... Tn{..., string(k), ...}] 307 // 308 // where k is []byte, T1 to Tn is a nesting of struct and array literals, 309 // the allocation of backing bytes for the string can be avoided 310 // by reusing the []byte backing array. These are special cases 311 // for avoiding allocations when converting byte slices to strings. 312 // It would be nice to handle these generally, but because 313 // []byte keys are not allowed in maps, the use of string(k) 314 // comes up in important cases in practice. See issue 3512. 315 func mapKeyReplaceStrConv(n ir.Node) bool { 316 var replaced bool 317 switch n.Op() { 318 case ir.OBYTES2STR: 319 n := n.(*ir.ConvExpr) 320 n.SetOp(ir.OBYTES2STRTMP) 321 replaced = true 322 case ir.OSTRUCTLIT: 323 n := n.(*ir.CompLitExpr) 324 for _, elem := range n.List { 325 elem := elem.(*ir.StructKeyExpr) 326 if mapKeyReplaceStrConv(elem.Value) { 327 replaced = true 328 } 329 } 330 case ir.OARRAYLIT: 331 n := n.(*ir.CompLitExpr) 332 for _, elem := range n.List { 333 if elem.Op() == ir.OKEY { 334 elem = elem.(*ir.KeyExpr).Value 335 } 336 if mapKeyReplaceStrConv(elem) { 337 replaced = true 338 } 339 } 340 } 341 return replaced 342 } 343 344 type ordermarker int 345 346 // markTemp returns the top of the temporary variable stack. 347 func (o *orderState) markTemp() ordermarker { 348 return ordermarker(len(o.temp)) 349 } 350 351 // popTemp pops temporaries off the stack until reaching the mark, 352 // which must have been returned by markTemp. 353 func (o *orderState) popTemp(mark ordermarker) { 354 for _, n := range o.temp[mark:] { 355 key := n.Type().LinkString() 356 o.free[key] = append(o.free[key], n) 357 } 358 o.temp = o.temp[:mark] 359 } 360 361 // stmtList orders each of the statements in the list. 362 func (o *orderState) stmtList(l ir.Nodes) { 363 s := l 364 for i := range s { 365 orderMakeSliceCopy(s[i:]) 366 o.stmt(s[i]) 367 } 368 } 369 370 // orderMakeSliceCopy matches the pattern: 371 // 372 // m = OMAKESLICE([]T, x); OCOPY(m, s) 373 // 374 // and rewrites it to: 375 // 376 // m = OMAKESLICECOPY([]T, x, s); nil 377 func orderMakeSliceCopy(s []ir.Node) { 378 if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting { 379 return 380 } 381 if len(s) < 2 || s[0] == nil || s[0].Op() != ir.OAS || s[1] == nil || s[1].Op() != ir.OCOPY { 382 return 383 } 384 385 as := s[0].(*ir.AssignStmt) 386 cp := s[1].(*ir.BinaryExpr) 387 if as.Y == nil || as.Y.Op() != ir.OMAKESLICE || ir.IsBlank(as.X) || 388 as.X.Op() != ir.ONAME || cp.X.Op() != ir.ONAME || cp.Y.Op() != ir.ONAME || 389 as.X.Name() != cp.X.Name() || cp.X.Name() == cp.Y.Name() { 390 // The line above this one is correct with the differing equality operators: 391 // we want as.X and cp.X to be the same name, 392 // but we want the initial data to be coming from a different name. 393 return 394 } 395 396 mk := as.Y.(*ir.MakeExpr) 397 if mk.Esc() == ir.EscNone || mk.Len == nil || mk.Cap != nil { 398 return 399 } 400 mk.SetOp(ir.OMAKESLICECOPY) 401 mk.Cap = cp.Y 402 // Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s) 403 mk.SetBounded(mk.Len.Op() == ir.OLEN && ir.SameSafeExpr(mk.Len.(*ir.UnaryExpr).X, cp.Y)) 404 as.Y = typecheck.Expr(mk) 405 s[1] = nil // remove separate copy call 406 } 407 408 // edge inserts coverage instrumentation for libfuzzer. 409 func (o *orderState) edge() { 410 if base.Debug.Libfuzzer == 0 { 411 return 412 } 413 414 // Create a new uint8 counter to be allocated in section __sancov_cntrs 415 counter := staticinit.StaticName(types.Types[types.TUINT8]) 416 counter.SetLibfuzzer8BitCounter(true) 417 // As well as setting SetLibfuzzer8BitCounter, we preemptively set the 418 // symbol type to SLIBFUZZER_8BIT_COUNTER so that the race detector 419 // instrumentation pass (which does not have access to the flags set by 420 // SetLibfuzzer8BitCounter) knows to ignore them. This information is 421 // lost by the time it reaches the compile step, so SetLibfuzzer8BitCounter 422 // is still necessary. 423 counter.Linksym().Type = objabi.SLIBFUZZER_8BIT_COUNTER 424 425 // We guarantee that the counter never becomes zero again once it has been 426 // incremented once. This implementation follows the NeverZero optimization 427 // presented by the paper: 428 // "AFL++: Combining Incremental Steps of Fuzzing Research" 429 // The NeverZero policy avoids the overflow to 0 by setting the counter to one 430 // after it reaches 255 and so, if an edge is executed at least one time, the entry is 431 // never 0. 432 // Another policy presented in the paper is the Saturated Counters policy which 433 // freezes the counter when it reaches the value of 255. However, a range 434 // of experiments showed that that decreases overall performance. 435 o.append(ir.NewIfStmt(base.Pos, 436 ir.NewBinaryExpr(base.Pos, ir.OEQ, counter, ir.NewInt(0xff)), 437 []ir.Node{ir.NewAssignStmt(base.Pos, counter, ir.NewInt(1))}, 438 []ir.Node{ir.NewAssignOpStmt(base.Pos, ir.OADD, counter, ir.NewInt(1))})) 439 } 440 441 // orderBlock orders the block of statements in n into a new slice, 442 // and then replaces the old slice in n with the new slice. 443 // free is a map that can be used to obtain temporary variables by type. 444 func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) { 445 if len(*n) != 0 { 446 // Set reasonable position for instrumenting code. See issue 53688. 447 // It would be nice if ir.Nodes had a position (the opening {, probably), 448 // but it doesn't. So we use the first statement's position instead. 449 ir.SetPos((*n)[0]) 450 } 451 var order orderState 452 order.free = free 453 mark := order.markTemp() 454 order.edge() 455 order.stmtList(*n) 456 order.popTemp(mark) 457 *n = order.out 458 } 459 460 // exprInPlace orders the side effects in *np and 461 // leaves them as the init list of the final *np. 462 // The result of exprInPlace MUST be assigned back to n, e.g. 463 // 464 // n.Left = o.exprInPlace(n.Left) 465 func (o *orderState) exprInPlace(n ir.Node) ir.Node { 466 var order orderState 467 order.free = o.free 468 n = order.expr(n, nil) 469 n = ir.InitExpr(order.out, n) 470 471 // insert new temporaries from order 472 // at head of outer list. 473 o.temp = append(o.temp, order.temp...) 474 return n 475 } 476 477 // orderStmtInPlace orders the side effects of the single statement *np 478 // and replaces it with the resulting statement list. 479 // The result of orderStmtInPlace MUST be assigned back to n, e.g. 480 // 481 // n.Left = orderStmtInPlace(n.Left) 482 // 483 // free is a map that can be used to obtain temporary variables by type. 484 func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node { 485 var order orderState 486 order.free = free 487 mark := order.markTemp() 488 order.stmt(n) 489 order.popTemp(mark) 490 return ir.NewBlockStmt(src.NoXPos, order.out) 491 } 492 493 // init moves n's init list to o.out. 494 func (o *orderState) init(n ir.Node) { 495 if ir.MayBeShared(n) { 496 // For concurrency safety, don't mutate potentially shared nodes. 497 // First, ensure that no work is required here. 498 if len(n.Init()) > 0 { 499 base.Fatalf("order.init shared node with ninit") 500 } 501 return 502 } 503 o.stmtList(ir.TakeInit(n)) 504 } 505 506 // call orders the call expression n. 507 // n.Op is OCALLFUNC/OCALLINTER or a builtin like OCOPY. 508 func (o *orderState) call(nn ir.Node) { 509 if len(nn.Init()) > 0 { 510 // Caller should have already called o.init(nn). 511 base.Fatalf("%v with unexpected ninit", nn.Op()) 512 } 513 if nn.Op() == ir.OCALLMETH { 514 base.FatalfAt(nn.Pos(), "OCALLMETH missed by typecheck") 515 } 516 517 // Builtin functions. 518 if nn.Op() != ir.OCALLFUNC && nn.Op() != ir.OCALLINTER { 519 switch n := nn.(type) { 520 default: 521 base.Fatalf("unexpected call: %+v", n) 522 case *ir.UnaryExpr: 523 n.X = o.expr(n.X, nil) 524 case *ir.ConvExpr: 525 n.X = o.expr(n.X, nil) 526 case *ir.BinaryExpr: 527 n.X = o.expr(n.X, nil) 528 n.Y = o.expr(n.Y, nil) 529 case *ir.MakeExpr: 530 n.Len = o.expr(n.Len, nil) 531 n.Cap = o.expr(n.Cap, nil) 532 case *ir.CallExpr: 533 o.exprList(n.Args) 534 } 535 return 536 } 537 538 n := nn.(*ir.CallExpr) 539 typecheck.FixVariadicCall(n) 540 541 if isFuncPCIntrinsic(n) && isIfaceOfFunc(n.Args[0]) { 542 // For internal/abi.FuncPCABIxxx(fn), if fn is a defined function, 543 // do not introduce temporaries here, so it is easier to rewrite it 544 // to symbol address reference later in walk. 545 return 546 } 547 548 n.X = o.expr(n.X, nil) 549 o.exprList(n.Args) 550 } 551 552 // mapAssign appends n to o.out. 553 func (o *orderState) mapAssign(n ir.Node) { 554 switch n.Op() { 555 default: 556 base.Fatalf("order.mapAssign %v", n.Op()) 557 558 case ir.OAS: 559 n := n.(*ir.AssignStmt) 560 if n.X.Op() == ir.OINDEXMAP { 561 n.Y = o.safeMapRHS(n.Y) 562 } 563 o.out = append(o.out, n) 564 case ir.OASOP: 565 n := n.(*ir.AssignOpStmt) 566 if n.X.Op() == ir.OINDEXMAP { 567 n.Y = o.safeMapRHS(n.Y) 568 } 569 o.out = append(o.out, n) 570 } 571 } 572 573 func (o *orderState) safeMapRHS(r ir.Node) ir.Node { 574 // Make sure we evaluate the RHS before starting the map insert. 575 // We need to make sure the RHS won't panic. See issue 22881. 576 if r.Op() == ir.OAPPEND { 577 r := r.(*ir.CallExpr) 578 s := r.Args[1:] 579 for i, n := range s { 580 s[i] = o.cheapExpr(n) 581 } 582 return r 583 } 584 return o.cheapExpr(r) 585 } 586 587 // stmt orders the statement n, appending to o.out. 588 func (o *orderState) stmt(n ir.Node) { 589 if n == nil { 590 return 591 } 592 593 lno := ir.SetPos(n) 594 o.init(n) 595 596 switch n.Op() { 597 default: 598 base.Fatalf("order.stmt %v", n.Op()) 599 600 case ir.OINLMARK: 601 o.out = append(o.out, n) 602 603 case ir.OAS: 604 n := n.(*ir.AssignStmt) 605 t := o.markTemp() 606 n.X = o.expr(n.X, nil) 607 n.Y = o.expr(n.Y, n.X) 608 o.mapAssign(n) 609 o.popTemp(t) 610 611 case ir.OASOP: 612 n := n.(*ir.AssignOpStmt) 613 t := o.markTemp() 614 n.X = o.expr(n.X, nil) 615 n.Y = o.expr(n.Y, nil) 616 617 if base.Flag.Cfg.Instrumenting || n.X.Op() == ir.OINDEXMAP && (n.AsOp == ir.ODIV || n.AsOp == ir.OMOD) { 618 // Rewrite m[k] op= r into m[k] = m[k] op r so 619 // that we can ensure that if op panics 620 // because r is zero, the panic happens before 621 // the map assignment. 622 // DeepCopy is a big hammer here, but safeExpr 623 // makes sure there is nothing too deep being copied. 624 l1 := o.safeExpr(n.X) 625 l2 := ir.DeepCopy(src.NoXPos, l1) 626 if l2.Op() == ir.OINDEXMAP { 627 l2 := l2.(*ir.IndexExpr) 628 l2.Assigned = false 629 } 630 l2 = o.copyExpr(l2) 631 r := o.expr(typecheck.Expr(ir.NewBinaryExpr(n.Pos(), n.AsOp, l2, n.Y)), nil) 632 as := typecheck.Stmt(ir.NewAssignStmt(n.Pos(), l1, r)) 633 o.mapAssign(as) 634 o.popTemp(t) 635 return 636 } 637 638 o.mapAssign(n) 639 o.popTemp(t) 640 641 case ir.OAS2: 642 n := n.(*ir.AssignListStmt) 643 t := o.markTemp() 644 o.exprList(n.Lhs) 645 o.exprList(n.Rhs) 646 o.out = append(o.out, n) 647 o.popTemp(t) 648 649 // Special: avoid copy of func call n.Right 650 case ir.OAS2FUNC: 651 n := n.(*ir.AssignListStmt) 652 t := o.markTemp() 653 o.exprList(n.Lhs) 654 call := n.Rhs[0] 655 o.init(call) 656 if ic, ok := call.(*ir.InlinedCallExpr); ok { 657 o.stmtList(ic.Body) 658 659 n.SetOp(ir.OAS2) 660 n.Rhs = ic.ReturnVars 661 662 o.exprList(n.Rhs) 663 o.out = append(o.out, n) 664 } else { 665 o.call(call) 666 o.as2func(n) 667 } 668 o.popTemp(t) 669 670 // Special: use temporary variables to hold result, 671 // so that runtime can take address of temporary. 672 // No temporary for blank assignment. 673 // 674 // OAS2MAPR: make sure key is addressable if needed, 675 // and make sure OINDEXMAP is not copied out. 676 case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR: 677 n := n.(*ir.AssignListStmt) 678 t := o.markTemp() 679 o.exprList(n.Lhs) 680 681 switch r := n.Rhs[0]; r.Op() { 682 case ir.ODOTTYPE2: 683 r := r.(*ir.TypeAssertExpr) 684 r.X = o.expr(r.X, nil) 685 case ir.ODYNAMICDOTTYPE2: 686 r := r.(*ir.DynamicTypeAssertExpr) 687 r.X = o.expr(r.X, nil) 688 r.RType = o.expr(r.RType, nil) 689 r.ITab = o.expr(r.ITab, nil) 690 case ir.ORECV: 691 r := r.(*ir.UnaryExpr) 692 r.X = o.expr(r.X, nil) 693 case ir.OINDEXMAP: 694 r := r.(*ir.IndexExpr) 695 r.X = o.expr(r.X, nil) 696 r.Index = o.expr(r.Index, nil) 697 // See similar conversion for OINDEXMAP below. 698 _ = mapKeyReplaceStrConv(r.Index) 699 r.Index = o.mapKeyTemp(r.Pos(), r.X.Type(), r.Index) 700 default: 701 base.Fatalf("order.stmt: %v", r.Op()) 702 } 703 704 o.as2ok(n) 705 o.popTemp(t) 706 707 // Special: does not save n onto out. 708 case ir.OBLOCK: 709 n := n.(*ir.BlockStmt) 710 o.stmtList(n.List) 711 712 // Special: n->left is not an expression; save as is. 713 case ir.OBREAK, 714 ir.OCONTINUE, 715 ir.ODCL, 716 ir.ODCLCONST, 717 ir.ODCLTYPE, 718 ir.OFALL, 719 ir.OGOTO, 720 ir.OLABEL, 721 ir.OTAILCALL: 722 o.out = append(o.out, n) 723 724 // Special: handle call arguments. 725 case ir.OCALLFUNC, ir.OCALLINTER: 726 n := n.(*ir.CallExpr) 727 t := o.markTemp() 728 o.call(n) 729 o.out = append(o.out, n) 730 o.popTemp(t) 731 732 case ir.OINLCALL: 733 n := n.(*ir.InlinedCallExpr) 734 o.stmtList(n.Body) 735 736 // discard results; double-check for no side effects 737 for _, result := range n.ReturnVars { 738 if staticinit.AnySideEffects(result) { 739 base.FatalfAt(result.Pos(), "inlined call result has side effects: %v", result) 740 } 741 } 742 743 case ir.OCHECKNIL, ir.OCLOSE, ir.OPANIC, ir.ORECV: 744 n := n.(*ir.UnaryExpr) 745 t := o.markTemp() 746 n.X = o.expr(n.X, nil) 747 o.out = append(o.out, n) 748 o.popTemp(t) 749 750 case ir.OCOPY: 751 n := n.(*ir.BinaryExpr) 752 t := o.markTemp() 753 n.X = o.expr(n.X, nil) 754 n.Y = o.expr(n.Y, nil) 755 o.out = append(o.out, n) 756 o.popTemp(t) 757 758 case ir.OPRINT, ir.OPRINTN, ir.ORECOVERFP: 759 n := n.(*ir.CallExpr) 760 t := o.markTemp() 761 o.call(n) 762 o.out = append(o.out, n) 763 o.popTemp(t) 764 765 // Special: order arguments to inner call but not call itself. 766 case ir.ODEFER, ir.OGO: 767 n := n.(*ir.GoDeferStmt) 768 t := o.markTemp() 769 o.init(n.Call) 770 o.call(n.Call) 771 o.out = append(o.out, n) 772 o.popTemp(t) 773 774 case ir.ODELETE: 775 n := n.(*ir.CallExpr) 776 t := o.markTemp() 777 n.Args[0] = o.expr(n.Args[0], nil) 778 n.Args[1] = o.expr(n.Args[1], nil) 779 n.Args[1] = o.mapKeyTemp(n.Pos(), n.Args[0].Type(), n.Args[1]) 780 o.out = append(o.out, n) 781 o.popTemp(t) 782 783 // Clean temporaries from condition evaluation at 784 // beginning of loop body and after for statement. 785 case ir.OFOR: 786 n := n.(*ir.ForStmt) 787 t := o.markTemp() 788 n.Cond = o.exprInPlace(n.Cond) 789 orderBlock(&n.Body, o.free) 790 n.Post = orderStmtInPlace(n.Post, o.free) 791 o.out = append(o.out, n) 792 o.popTemp(t) 793 794 // Clean temporaries from condition at 795 // beginning of both branches. 796 case ir.OIF: 797 n := n.(*ir.IfStmt) 798 t := o.markTemp() 799 n.Cond = o.exprInPlace(n.Cond) 800 o.popTemp(t) 801 orderBlock(&n.Body, o.free) 802 orderBlock(&n.Else, o.free) 803 o.out = append(o.out, n) 804 805 case ir.ORANGE: 806 // n.Right is the expression being ranged over. 807 // order it, and then make a copy if we need one. 808 // We almost always do, to ensure that we don't 809 // see any value changes made during the loop. 810 // Usually the copy is cheap (e.g., array pointer, 811 // chan, slice, string are all tiny). 812 // The exception is ranging over an array value 813 // (not a slice, not a pointer to array), 814 // which must make a copy to avoid seeing updates made during 815 // the range body. Ranging over an array value is uncommon though. 816 817 // Mark []byte(str) range expression to reuse string backing storage. 818 // It is safe because the storage cannot be mutated. 819 n := n.(*ir.RangeStmt) 820 if n.X.Op() == ir.OSTR2BYTES { 821 n.X.(*ir.ConvExpr).SetOp(ir.OSTR2BYTESTMP) 822 } 823 824 t := o.markTemp() 825 n.X = o.expr(n.X, nil) 826 827 orderBody := true 828 xt := typecheck.RangeExprType(n.X.Type()) 829 switch xt.Kind() { 830 default: 831 base.Fatalf("order.stmt range %v", n.Type()) 832 833 case types.TARRAY, types.TSLICE: 834 if n.Value == nil || ir.IsBlank(n.Value) { 835 // for i := range x will only use x once, to compute len(x). 836 // No need to copy it. 837 break 838 } 839 fallthrough 840 841 case types.TCHAN, types.TSTRING: 842 // chan, string, slice, array ranges use value multiple times. 843 // make copy. 844 r := n.X 845 846 if r.Type().IsString() && r.Type() != types.Types[types.TSTRING] { 847 r = ir.NewConvExpr(base.Pos, ir.OCONV, nil, r) 848 r.SetType(types.Types[types.TSTRING]) 849 r = typecheck.Expr(r) 850 } 851 852 n.X = o.copyExpr(r) 853 854 case types.TMAP: 855 if isMapClear(n) { 856 // Preserve the body of the map clear pattern so it can 857 // be detected during walk. The loop body will not be used 858 // when optimizing away the range loop to a runtime call. 859 orderBody = false 860 break 861 } 862 863 // copy the map value in case it is a map literal. 864 // TODO(rsc): Make tmp = literal expressions reuse tmp. 865 // For maps tmp is just one word so it hardly matters. 866 r := n.X 867 n.X = o.copyExpr(r) 868 869 // n.Prealloc is the temp for the iterator. 870 // MapIterType contains pointers and needs to be zeroed. 871 n.Prealloc = o.newTemp(reflectdata.MapIterType(xt), true) 872 } 873 n.Key = o.exprInPlace(n.Key) 874 n.Value = o.exprInPlace(n.Value) 875 if orderBody { 876 orderBlock(&n.Body, o.free) 877 } 878 o.out = append(o.out, n) 879 o.popTemp(t) 880 881 case ir.ORETURN: 882 n := n.(*ir.ReturnStmt) 883 o.exprList(n.Results) 884 o.out = append(o.out, n) 885 886 // Special: clean case temporaries in each block entry. 887 // Select must enter one of its blocks, so there is no 888 // need for a cleaning at the end. 889 // Doubly special: evaluation order for select is stricter 890 // than ordinary expressions. Even something like p.c 891 // has to be hoisted into a temporary, so that it cannot be 892 // reordered after the channel evaluation for a different 893 // case (if p were nil, then the timing of the fault would 894 // give this away). 895 case ir.OSELECT: 896 n := n.(*ir.SelectStmt) 897 t := o.markTemp() 898 for _, ncas := range n.Cases { 899 r := ncas.Comm 900 ir.SetPos(ncas) 901 902 // Append any new body prologue to ninit. 903 // The next loop will insert ninit into nbody. 904 if len(ncas.Init()) != 0 { 905 base.Fatalf("order select ninit") 906 } 907 if r == nil { 908 continue 909 } 910 switch r.Op() { 911 default: 912 ir.Dump("select case", r) 913 base.Fatalf("unknown op in select %v", r.Op()) 914 915 case ir.OSELRECV2: 916 // case x, ok = <-c 917 r := r.(*ir.AssignListStmt) 918 recv := r.Rhs[0].(*ir.UnaryExpr) 919 recv.X = o.expr(recv.X, nil) 920 if !ir.IsAutoTmp(recv.X) { 921 recv.X = o.copyExpr(recv.X) 922 } 923 init := ir.TakeInit(r) 924 925 colas := r.Def 926 do := func(i int, t *types.Type) { 927 n := r.Lhs[i] 928 if ir.IsBlank(n) { 929 return 930 } 931 // If this is case x := <-ch or case x, y := <-ch, the case has 932 // the ODCL nodes to declare x and y. We want to delay that 933 // declaration (and possible allocation) until inside the case body. 934 // Delete the ODCL nodes here and recreate them inside the body below. 935 if colas { 936 if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).X == n { 937 init = init[1:] 938 939 // iimport may have added a default initialization assignment, 940 // due to how it handles ODCL statements. 941 if len(init) > 0 && init[0].Op() == ir.OAS && init[0].(*ir.AssignStmt).X == n { 942 init = init[1:] 943 } 944 } 945 dcl := typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name))) 946 ncas.PtrInit().Append(dcl) 947 } 948 tmp := o.newTemp(t, t.HasPointers()) 949 as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, n, typecheck.Conv(tmp, n.Type()))) 950 ncas.PtrInit().Append(as) 951 r.Lhs[i] = tmp 952 } 953 do(0, recv.X.Type().Elem()) 954 do(1, types.Types[types.TBOOL]) 955 if len(init) != 0 { 956 ir.DumpList("ninit", init) 957 base.Fatalf("ninit on select recv") 958 } 959 orderBlock(ncas.PtrInit(), o.free) 960 961 case ir.OSEND: 962 r := r.(*ir.SendStmt) 963 if len(r.Init()) != 0 { 964 ir.DumpList("ninit", r.Init()) 965 base.Fatalf("ninit on select send") 966 } 967 968 // case c <- x 969 // r->left is c, r->right is x, both are always evaluated. 970 r.Chan = o.expr(r.Chan, nil) 971 972 if !ir.IsAutoTmp(r.Chan) { 973 r.Chan = o.copyExpr(r.Chan) 974 } 975 r.Value = o.expr(r.Value, nil) 976 if !ir.IsAutoTmp(r.Value) { 977 r.Value = o.copyExpr(r.Value) 978 } 979 } 980 } 981 // Now that we have accumulated all the temporaries, clean them. 982 // Also insert any ninit queued during the previous loop. 983 // (The temporary cleaning must follow that ninit work.) 984 for _, cas := range n.Cases { 985 orderBlock(&cas.Body, o.free) 986 987 // TODO(mdempsky): Is this actually necessary? 988 // walkSelect appears to walk Ninit. 989 cas.Body.Prepend(ir.TakeInit(cas)...) 990 } 991 992 o.out = append(o.out, n) 993 o.popTemp(t) 994 995 // Special: value being sent is passed as a pointer; make it addressable. 996 case ir.OSEND: 997 n := n.(*ir.SendStmt) 998 t := o.markTemp() 999 n.Chan = o.expr(n.Chan, nil) 1000 n.Value = o.expr(n.Value, nil) 1001 if base.Flag.Cfg.Instrumenting { 1002 // Force copying to the stack so that (chan T)(nil) <- x 1003 // is still instrumented as a read of x. 1004 n.Value = o.copyExpr(n.Value) 1005 } else { 1006 n.Value = o.addrTemp(n.Value) 1007 } 1008 o.out = append(o.out, n) 1009 o.popTemp(t) 1010 1011 // TODO(rsc): Clean temporaries more aggressively. 1012 // Note that because walkSwitch will rewrite some of the 1013 // switch into a binary search, this is not as easy as it looks. 1014 // (If we ran that code here we could invoke order.stmt on 1015 // the if-else chain instead.) 1016 // For now just clean all the temporaries at the end. 1017 // In practice that's fine. 1018 case ir.OSWITCH: 1019 n := n.(*ir.SwitchStmt) 1020 if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) { 1021 // Add empty "default:" case for instrumentation. 1022 n.Cases = append(n.Cases, ir.NewCaseStmt(base.Pos, nil, nil)) 1023 } 1024 1025 t := o.markTemp() 1026 n.Tag = o.expr(n.Tag, nil) 1027 for _, ncas := range n.Cases { 1028 o.exprListInPlace(ncas.List) 1029 orderBlock(&ncas.Body, o.free) 1030 } 1031 1032 o.out = append(o.out, n) 1033 o.popTemp(t) 1034 } 1035 1036 base.Pos = lno 1037 } 1038 1039 func hasDefaultCase(n *ir.SwitchStmt) bool { 1040 for _, ncas := range n.Cases { 1041 if len(ncas.List) == 0 { 1042 return true 1043 } 1044 } 1045 return false 1046 } 1047 1048 // exprList orders the expression list l into o. 1049 func (o *orderState) exprList(l ir.Nodes) { 1050 s := l 1051 for i := range s { 1052 s[i] = o.expr(s[i], nil) 1053 } 1054 } 1055 1056 // exprListInPlace orders the expression list l but saves 1057 // the side effects on the individual expression ninit lists. 1058 func (o *orderState) exprListInPlace(l ir.Nodes) { 1059 s := l 1060 for i := range s { 1061 s[i] = o.exprInPlace(s[i]) 1062 } 1063 } 1064 1065 func (o *orderState) exprNoLHS(n ir.Node) ir.Node { 1066 return o.expr(n, nil) 1067 } 1068 1069 // expr orders a single expression, appending side 1070 // effects to o.out as needed. 1071 // If this is part of an assignment lhs = *np, lhs is given. 1072 // Otherwise lhs == nil. (When lhs != nil it may be possible 1073 // to avoid copying the result of the expression to a temporary.) 1074 // The result of expr MUST be assigned back to n, e.g. 1075 // 1076 // n.Left = o.expr(n.Left, lhs) 1077 func (o *orderState) expr(n, lhs ir.Node) ir.Node { 1078 if n == nil { 1079 return n 1080 } 1081 lno := ir.SetPos(n) 1082 n = o.expr1(n, lhs) 1083 base.Pos = lno 1084 return n 1085 } 1086 1087 func (o *orderState) expr1(n, lhs ir.Node) ir.Node { 1088 o.init(n) 1089 1090 switch n.Op() { 1091 default: 1092 if o.edit == nil { 1093 o.edit = o.exprNoLHS // create closure once 1094 } 1095 ir.EditChildren(n, o.edit) 1096 return n 1097 1098 // Addition of strings turns into a function call. 1099 // Allocate a temporary to hold the strings. 1100 // Fewer than 5 strings use direct runtime helpers. 1101 case ir.OADDSTR: 1102 n := n.(*ir.AddStringExpr) 1103 o.exprList(n.List) 1104 1105 if len(n.List) > 5 { 1106 t := types.NewArray(types.Types[types.TSTRING], int64(len(n.List))) 1107 n.Prealloc = o.newTemp(t, false) 1108 } 1109 1110 // Mark string(byteSlice) arguments to reuse byteSlice backing 1111 // buffer during conversion. String concatenation does not 1112 // memorize the strings for later use, so it is safe. 1113 // However, we can do it only if there is at least one non-empty string literal. 1114 // Otherwise if all other arguments are empty strings, 1115 // concatstrings will return the reference to the temp string 1116 // to the caller. 1117 hasbyte := false 1118 1119 haslit := false 1120 for _, n1 := range n.List { 1121 hasbyte = hasbyte || n1.Op() == ir.OBYTES2STR 1122 haslit = haslit || n1.Op() == ir.OLITERAL && len(ir.StringVal(n1)) != 0 1123 } 1124 1125 if haslit && hasbyte { 1126 for _, n2 := range n.List { 1127 if n2.Op() == ir.OBYTES2STR { 1128 n2 := n2.(*ir.ConvExpr) 1129 n2.SetOp(ir.OBYTES2STRTMP) 1130 } 1131 } 1132 } 1133 return n 1134 1135 case ir.OINDEXMAP: 1136 n := n.(*ir.IndexExpr) 1137 n.X = o.expr(n.X, nil) 1138 n.Index = o.expr(n.Index, nil) 1139 needCopy := false 1140 1141 if !n.Assigned { 1142 // Enforce that any []byte slices we are not copying 1143 // can not be changed before the map index by forcing 1144 // the map index to happen immediately following the 1145 // conversions. See copyExpr a few lines below. 1146 needCopy = mapKeyReplaceStrConv(n.Index) 1147 1148 if base.Flag.Cfg.Instrumenting { 1149 // Race detector needs the copy. 1150 needCopy = true 1151 } 1152 } 1153 1154 // key must be addressable 1155 n.Index = o.mapKeyTemp(n.Pos(), n.X.Type(), n.Index) 1156 if needCopy { 1157 return o.copyExpr(n) 1158 } 1159 return n 1160 1161 // concrete type (not interface) argument might need an addressable 1162 // temporary to pass to the runtime conversion routine. 1163 case ir.OCONVIFACE, ir.OCONVIDATA: 1164 n := n.(*ir.ConvExpr) 1165 n.X = o.expr(n.X, nil) 1166 if n.X.Type().IsInterface() { 1167 return n 1168 } 1169 if _, _, needsaddr := dataWordFuncName(n.X.Type()); needsaddr || isStaticCompositeLiteral(n.X) { 1170 // Need a temp if we need to pass the address to the conversion function. 1171 // We also process static composite literal node here, making a named static global 1172 // whose address we can put directly in an interface (see OCONVIFACE/OCONVIDATA case in walk). 1173 n.X = o.addrTemp(n.X) 1174 } 1175 return n 1176 1177 case ir.OCONVNOP: 1178 n := n.(*ir.ConvExpr) 1179 if n.X.Op() == ir.OCALLMETH { 1180 base.FatalfAt(n.X.Pos(), "OCALLMETH missed by typecheck") 1181 } 1182 if n.Type().IsKind(types.TUNSAFEPTR) && n.X.Type().IsKind(types.TUINTPTR) && (n.X.Op() == ir.OCALLFUNC || n.X.Op() == ir.OCALLINTER) { 1183 call := n.X.(*ir.CallExpr) 1184 // When reordering unsafe.Pointer(f()) into a separate 1185 // statement, the conversion and function call must stay 1186 // together. See golang.org/issue/15329. 1187 o.init(call) 1188 o.call(call) 1189 if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting { 1190 return o.copyExpr(n) 1191 } 1192 } else { 1193 n.X = o.expr(n.X, nil) 1194 } 1195 return n 1196 1197 case ir.OANDAND, ir.OOROR: 1198 // ... = LHS && RHS 1199 // 1200 // var r bool 1201 // r = LHS 1202 // if r { // or !r, for OROR 1203 // r = RHS 1204 // } 1205 // ... = r 1206 1207 n := n.(*ir.LogicalExpr) 1208 r := o.newTemp(n.Type(), false) 1209 1210 // Evaluate left-hand side. 1211 lhs := o.expr(n.X, nil) 1212 o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, lhs))) 1213 1214 // Evaluate right-hand side, save generated code. 1215 saveout := o.out 1216 o.out = nil 1217 t := o.markTemp() 1218 o.edge() 1219 rhs := o.expr(n.Y, nil) 1220 o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, rhs))) 1221 o.popTemp(t) 1222 gen := o.out 1223 o.out = saveout 1224 1225 // If left-hand side doesn't cause a short-circuit, issue right-hand side. 1226 nif := ir.NewIfStmt(base.Pos, r, nil, nil) 1227 if n.Op() == ir.OANDAND { 1228 nif.Body = gen 1229 } else { 1230 nif.Else = gen 1231 } 1232 o.out = append(o.out, nif) 1233 return r 1234 1235 case ir.OCALLMETH: 1236 base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck") 1237 panic("unreachable") 1238 1239 case ir.OCALLFUNC, 1240 ir.OCALLINTER, 1241 ir.OCAP, 1242 ir.OCOMPLEX, 1243 ir.OCOPY, 1244 ir.OIMAG, 1245 ir.OLEN, 1246 ir.OMAKECHAN, 1247 ir.OMAKEMAP, 1248 ir.OMAKESLICE, 1249 ir.OMAKESLICECOPY, 1250 ir.ONEW, 1251 ir.OREAL, 1252 ir.ORECOVERFP, 1253 ir.OSTR2BYTES, 1254 ir.OSTR2BYTESTMP, 1255 ir.OSTR2RUNES: 1256 1257 if isRuneCount(n) { 1258 // len([]rune(s)) is rewritten to runtime.countrunes(s) later. 1259 conv := n.(*ir.UnaryExpr).X.(*ir.ConvExpr) 1260 conv.X = o.expr(conv.X, nil) 1261 } else { 1262 o.call(n) 1263 } 1264 1265 if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting { 1266 return o.copyExpr(n) 1267 } 1268 return n 1269 1270 case ir.OINLCALL: 1271 n := n.(*ir.InlinedCallExpr) 1272 o.stmtList(n.Body) 1273 return n.SingleResult() 1274 1275 case ir.OAPPEND: 1276 // Check for append(x, make([]T, y)...) . 1277 n := n.(*ir.CallExpr) 1278 if isAppendOfMake(n) { 1279 n.Args[0] = o.expr(n.Args[0], nil) // order x 1280 mk := n.Args[1].(*ir.MakeExpr) 1281 mk.Len = o.expr(mk.Len, nil) // order y 1282 } else { 1283 o.exprList(n.Args) 1284 } 1285 1286 if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.Args[0]) { 1287 return o.copyExpr(n) 1288 } 1289 return n 1290 1291 case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR: 1292 n := n.(*ir.SliceExpr) 1293 n.X = o.expr(n.X, nil) 1294 n.Low = o.cheapExpr(o.expr(n.Low, nil)) 1295 n.High = o.cheapExpr(o.expr(n.High, nil)) 1296 n.Max = o.cheapExpr(o.expr(n.Max, nil)) 1297 if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.X) { 1298 return o.copyExpr(n) 1299 } 1300 return n 1301 1302 case ir.OCLOSURE: 1303 n := n.(*ir.ClosureExpr) 1304 if n.Transient() && len(n.Func.ClosureVars) > 0 { 1305 n.Prealloc = o.newTemp(typecheck.ClosureType(n), false) 1306 } 1307 return n 1308 1309 case ir.OMETHVALUE: 1310 n := n.(*ir.SelectorExpr) 1311 n.X = o.expr(n.X, nil) 1312 if n.Transient() { 1313 t := typecheck.MethodValueType(n) 1314 n.Prealloc = o.newTemp(t, false) 1315 } 1316 return n 1317 1318 case ir.OSLICELIT: 1319 n := n.(*ir.CompLitExpr) 1320 o.exprList(n.List) 1321 if n.Transient() { 1322 t := types.NewArray(n.Type().Elem(), n.Len) 1323 n.Prealloc = o.newTemp(t, false) 1324 } 1325 return n 1326 1327 case ir.ODOTTYPE, ir.ODOTTYPE2: 1328 n := n.(*ir.TypeAssertExpr) 1329 n.X = o.expr(n.X, nil) 1330 if !types.IsDirectIface(n.Type()) || base.Flag.Cfg.Instrumenting { 1331 return o.copyExprClear(n) 1332 } 1333 return n 1334 1335 case ir.ORECV: 1336 n := n.(*ir.UnaryExpr) 1337 n.X = o.expr(n.X, nil) 1338 return o.copyExprClear(n) 1339 1340 case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: 1341 n := n.(*ir.BinaryExpr) 1342 n.X = o.expr(n.X, nil) 1343 n.Y = o.expr(n.Y, nil) 1344 1345 t := n.X.Type() 1346 switch { 1347 case t.IsString(): 1348 // Mark string(byteSlice) arguments to reuse byteSlice backing 1349 // buffer during conversion. String comparison does not 1350 // memorize the strings for later use, so it is safe. 1351 if n.X.Op() == ir.OBYTES2STR { 1352 n.X.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP) 1353 } 1354 if n.Y.Op() == ir.OBYTES2STR { 1355 n.Y.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP) 1356 } 1357 1358 case t.IsStruct() || t.IsArray(): 1359 // for complex comparisons, we need both args to be 1360 // addressable so we can pass them to the runtime. 1361 n.X = o.addrTemp(n.X) 1362 n.Y = o.addrTemp(n.Y) 1363 } 1364 return n 1365 1366 case ir.OMAPLIT: 1367 // Order map by converting: 1368 // map[int]int{ 1369 // a(): b(), 1370 // c(): d(), 1371 // e(): f(), 1372 // } 1373 // to 1374 // m := map[int]int{} 1375 // m[a()] = b() 1376 // m[c()] = d() 1377 // m[e()] = f() 1378 // Then order the result. 1379 // Without this special case, order would otherwise compute all 1380 // the keys and values before storing any of them to the map. 1381 // See issue 26552. 1382 n := n.(*ir.CompLitExpr) 1383 entries := n.List 1384 statics := entries[:0] 1385 var dynamics []*ir.KeyExpr 1386 for _, r := range entries { 1387 r := r.(*ir.KeyExpr) 1388 1389 if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) { 1390 dynamics = append(dynamics, r) 1391 continue 1392 } 1393 1394 // Recursively ordering some static entries can change them to dynamic; 1395 // e.g., OCONVIFACE nodes. See #31777. 1396 r = o.expr(r, nil).(*ir.KeyExpr) 1397 if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) { 1398 dynamics = append(dynamics, r) 1399 continue 1400 } 1401 1402 statics = append(statics, r) 1403 } 1404 n.List = statics 1405 1406 if len(dynamics) == 0 { 1407 return n 1408 } 1409 1410 // Emit the creation of the map (with all its static entries). 1411 m := o.newTemp(n.Type(), false) 1412 as := ir.NewAssignStmt(base.Pos, m, n) 1413 typecheck.Stmt(as) 1414 o.stmt(as) 1415 1416 // Emit eval+insert of dynamic entries, one at a time. 1417 for _, r := range dynamics { 1418 lhs := typecheck.AssignExpr(ir.NewIndexExpr(base.Pos, m, r.Key)).(*ir.IndexExpr) 1419 base.AssertfAt(lhs.Op() == ir.OINDEXMAP, lhs.Pos(), "want OINDEXMAP, have %+v", lhs) 1420 lhs.RType = n.RType 1421 1422 as := ir.NewAssignStmt(base.Pos, lhs, r.Value) 1423 typecheck.Stmt(as) 1424 o.stmt(as) 1425 } 1426 1427 // Remember that we issued these assignments so we can include that count 1428 // in the map alloc hint. 1429 // We're assuming here that all the keys in the map literal are distinct. 1430 // If any are equal, this will be an overcount. Probably not worth accounting 1431 // for that, as equal keys in map literals are rare, and at worst we waste 1432 // a bit of space. 1433 n.Len += int64(len(dynamics)) 1434 1435 return m 1436 } 1437 1438 // No return - type-assertions above. Each case must return for itself. 1439 } 1440 1441 // as2func orders OAS2FUNC nodes. It creates temporaries to ensure left-to-right assignment. 1442 // The caller should order the right-hand side of the assignment before calling order.as2func. 1443 // It rewrites, 1444 // 1445 // a, b, a = ... 1446 // 1447 // as 1448 // 1449 // tmp1, tmp2, tmp3 = ... 1450 // a, b, a = tmp1, tmp2, tmp3 1451 // 1452 // This is necessary to ensure left to right assignment order. 1453 func (o *orderState) as2func(n *ir.AssignListStmt) { 1454 results := n.Rhs[0].Type() 1455 as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil) 1456 for i, nl := range n.Lhs { 1457 if !ir.IsBlank(nl) { 1458 typ := results.Field(i).Type 1459 tmp := o.newTemp(typ, typ.HasPointers()) 1460 n.Lhs[i] = tmp 1461 as.Lhs = append(as.Lhs, nl) 1462 as.Rhs = append(as.Rhs, tmp) 1463 } 1464 } 1465 1466 o.out = append(o.out, n) 1467 o.stmt(typecheck.Stmt(as)) 1468 } 1469 1470 // as2ok orders OAS2XXX with ok. 1471 // Just like as2func, this also adds temporaries to ensure left-to-right assignment. 1472 func (o *orderState) as2ok(n *ir.AssignListStmt) { 1473 as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil) 1474 1475 do := func(i int, typ *types.Type) { 1476 if nl := n.Lhs[i]; !ir.IsBlank(nl) { 1477 var tmp ir.Node = o.newTemp(typ, typ.HasPointers()) 1478 n.Lhs[i] = tmp 1479 as.Lhs = append(as.Lhs, nl) 1480 if i == 1 { 1481 // The "ok" result is an untyped boolean according to the Go 1482 // spec. We need to explicitly convert it to the LHS type in 1483 // case the latter is a defined boolean type (#8475). 1484 tmp = typecheck.Conv(tmp, nl.Type()) 1485 } 1486 as.Rhs = append(as.Rhs, tmp) 1487 } 1488 } 1489 1490 do(0, n.Rhs[0].Type()) 1491 do(1, types.Types[types.TBOOL]) 1492 1493 o.out = append(o.out, n) 1494 o.stmt(typecheck.Stmt(as)) 1495 } 1496 1497 // isFuncPCIntrinsic returns whether n is a direct call of internal/abi.FuncPCABIxxx functions. 1498 func isFuncPCIntrinsic(n *ir.CallExpr) bool { 1499 if n.Op() != ir.OCALLFUNC || n.X.Op() != ir.ONAME { 1500 return false 1501 } 1502 fn := n.X.(*ir.Name).Sym() 1503 return (fn.Name == "FuncPCABI0" || fn.Name == "FuncPCABIInternal") && 1504 (fn.Pkg.Path == "internal/abi" || fn.Pkg == types.LocalPkg && base.Ctxt.Pkgpath == "internal/abi") 1505 } 1506 1507 // isIfaceOfFunc returns whether n is an interface conversion from a direct reference of a func. 1508 func isIfaceOfFunc(n ir.Node) bool { 1509 return n.Op() == ir.OCONVIFACE && n.(*ir.ConvExpr).X.Op() == ir.ONAME && n.(*ir.ConvExpr).X.(*ir.Name).Class == ir.PFUNC 1510 }