github.com/tidwall/go@v0.0.0-20170415222209-6694a6888b7d/src/cmd/compile/internal/gc/walk.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/compile/internal/types" 9 "cmd/internal/obj" 10 "cmd/internal/sys" 11 "fmt" 12 "strings" 13 ) 14 15 // The constant is known to runtime. 16 const ( 17 tmpstringbufsize = 32 18 ) 19 20 func walk(fn *Node) { 21 Curfn = fn 22 23 if Debug['W'] != 0 { 24 s := fmt.Sprintf("\nbefore %v", Curfn.Func.Nname.Sym) 25 dumplist(s, Curfn.Nbody) 26 } 27 28 lno := lineno 29 30 // Final typecheck for any unused variables. 31 for i, ln := range fn.Func.Dcl { 32 if ln.Op == ONAME && (ln.Class == PAUTO || ln.Class == PAUTOHEAP) { 33 ln = typecheck(ln, Erv|Easgn) 34 fn.Func.Dcl[i] = ln 35 } 36 } 37 38 // Propagate the used flag for typeswitch variables up to the NONAME in it's definition. 39 for _, ln := range fn.Func.Dcl { 40 if ln.Op == ONAME && (ln.Class == PAUTO || ln.Class == PAUTOHEAP) && ln.Name.Defn != nil && ln.Name.Defn.Op == OTYPESW && ln.Used() { 41 ln.Name.Defn.Left.SetUsed(true) 42 } 43 } 44 45 for _, ln := range fn.Func.Dcl { 46 if ln.Op != ONAME || (ln.Class != PAUTO && ln.Class != PAUTOHEAP) || ln.Sym.Name[0] == '&' || ln.Used() { 47 continue 48 } 49 if defn := ln.Name.Defn; defn != nil && defn.Op == OTYPESW { 50 if defn.Left.Used() { 51 continue 52 } 53 yyerrorl(defn.Left.Pos, "%v declared and not used", ln.Sym) 54 defn.Left.SetUsed(true) // suppress repeats 55 } else { 56 yyerrorl(ln.Pos, "%v declared and not used", ln.Sym) 57 } 58 } 59 60 lineno = lno 61 if nerrors != 0 { 62 return 63 } 64 walkstmtlist(Curfn.Nbody.Slice()) 65 if Debug['W'] != 0 { 66 s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym) 67 dumplist(s, Curfn.Nbody) 68 } 69 70 zeroResults() 71 heapmoves() 72 if Debug['W'] != 0 && Curfn.Func.Enter.Len() > 0 { 73 s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym) 74 dumplist(s, Curfn.Func.Enter) 75 } 76 } 77 78 func walkstmtlist(s []*Node) { 79 for i := range s { 80 s[i] = walkstmt(s[i]) 81 } 82 } 83 84 func samelist(a, b []*Node) bool { 85 if len(a) != len(b) { 86 return false 87 } 88 for i, n := range a { 89 if n != b[i] { 90 return false 91 } 92 } 93 return true 94 } 95 96 func paramoutheap(fn *Node) bool { 97 for _, ln := range fn.Func.Dcl { 98 switch ln.Class { 99 case PPARAMOUT: 100 if ln.isParamStackCopy() || ln.Addrtaken() { 101 return true 102 } 103 104 case PAUTO: 105 // stop early - parameters are over 106 return false 107 } 108 } 109 110 return false 111 } 112 113 // adds "adjust" to all the argument locations for the call n. 114 // n must be a defer or go node that has already been walked. 115 func adjustargs(n *Node, adjust int) { 116 var arg *Node 117 var lhs *Node 118 119 callfunc := n.Left 120 for _, arg = range callfunc.List.Slice() { 121 if arg.Op != OAS { 122 Fatalf("call arg not assignment") 123 } 124 lhs = arg.Left 125 if lhs.Op == ONAME { 126 // This is a temporary introduced by reorder1. 127 // The real store to the stack appears later in the arg list. 128 continue 129 } 130 131 if lhs.Op != OINDREGSP { 132 Fatalf("call argument store does not use OINDREGSP") 133 } 134 135 // can't really check this in machine-indep code. 136 //if(lhs->val.u.reg != D_SP) 137 // Fatalf("call arg assign not indreg(SP)") 138 lhs.Xoffset += int64(adjust) 139 } 140 } 141 142 // The result of walkstmt MUST be assigned back to n, e.g. 143 // n.Left = walkstmt(n.Left) 144 func walkstmt(n *Node) *Node { 145 if n == nil { 146 return n 147 } 148 149 setlineno(n) 150 151 walkstmtlist(n.Ninit.Slice()) 152 153 switch n.Op { 154 default: 155 if n.Op == ONAME { 156 yyerror("%v is not a top level statement", n.Sym) 157 } else { 158 yyerror("%v is not a top level statement", n.Op) 159 } 160 Dump("nottop", n) 161 162 case OAS, 163 OASOP, 164 OAS2, 165 OAS2DOTTYPE, 166 OAS2RECV, 167 OAS2FUNC, 168 OAS2MAPR, 169 OCLOSE, 170 OCOPY, 171 OCALLMETH, 172 OCALLINTER, 173 OCALL, 174 OCALLFUNC, 175 ODELETE, 176 OSEND, 177 OPRINT, 178 OPRINTN, 179 OPANIC, 180 OEMPTY, 181 ORECOVER, 182 OGETG: 183 if n.Typecheck == 0 { 184 Fatalf("missing typecheck: %+v", n) 185 } 186 wascopy := n.Op == OCOPY 187 init := n.Ninit 188 n.Ninit.Set(nil) 189 n = walkexpr(n, &init) 190 n = addinit(n, init.Slice()) 191 if wascopy && n.Op == OCONVNOP { 192 n.Op = OEMPTY // don't leave plain values as statements. 193 } 194 195 // special case for a receive where we throw away 196 // the value received. 197 case ORECV: 198 if n.Typecheck == 0 { 199 Fatalf("missing typecheck: %+v", n) 200 } 201 init := n.Ninit 202 n.Ninit.Set(nil) 203 204 n.Left = walkexpr(n.Left, &init) 205 n = mkcall1(chanfn("chanrecv1", 2, n.Left.Type), nil, &init, n.Left, nodnil()) 206 n = walkexpr(n, &init) 207 208 n = addinit(n, init.Slice()) 209 210 case OBREAK, 211 OCONTINUE, 212 OFALL, 213 OGOTO, 214 OLABEL, 215 ODCLCONST, 216 ODCLTYPE, 217 OCHECKNIL, 218 OVARKILL, 219 OVARLIVE: 220 break 221 222 case ODCL: 223 v := n.Left 224 if v.Class == PAUTOHEAP { 225 if compiling_runtime { 226 yyerror("%v escapes to heap, not allowed in runtime.", v) 227 } 228 if prealloc[v] == nil { 229 prealloc[v] = callnew(v.Type) 230 } 231 nn := nod(OAS, v.Name.Param.Heapaddr, prealloc[v]) 232 nn.SetColas(true) 233 nn = typecheck(nn, Etop) 234 return walkstmt(nn) 235 } 236 237 case OBLOCK: 238 walkstmtlist(n.List.Slice()) 239 240 case OXCASE: 241 yyerror("case statement out of place") 242 n.Op = OCASE 243 fallthrough 244 245 case OCASE: 246 n.Right = walkstmt(n.Right) 247 248 case ODEFER: 249 Curfn.Func.SetHasDefer(true) 250 switch n.Left.Op { 251 case OPRINT, OPRINTN: 252 n.Left = walkprintfunc(n.Left, &n.Ninit) 253 254 case OCOPY: 255 n.Left = copyany(n.Left, &n.Ninit, true) 256 257 default: 258 n.Left = walkexpr(n.Left, &n.Ninit) 259 } 260 261 // make room for size & fn arguments. 262 adjustargs(n, 2*Widthptr) 263 264 case OFOR, OFORUNTIL: 265 if n.Left != nil { 266 walkstmtlist(n.Left.Ninit.Slice()) 267 init := n.Left.Ninit 268 n.Left.Ninit.Set(nil) 269 n.Left = walkexpr(n.Left, &init) 270 n.Left = addinit(n.Left, init.Slice()) 271 } 272 273 n.Right = walkstmt(n.Right) 274 walkstmtlist(n.Nbody.Slice()) 275 276 case OIF: 277 n.Left = walkexpr(n.Left, &n.Ninit) 278 walkstmtlist(n.Nbody.Slice()) 279 walkstmtlist(n.Rlist.Slice()) 280 281 case OPROC: 282 switch n.Left.Op { 283 case OPRINT, OPRINTN: 284 n.Left = walkprintfunc(n.Left, &n.Ninit) 285 286 case OCOPY: 287 n.Left = copyany(n.Left, &n.Ninit, true) 288 289 default: 290 n.Left = walkexpr(n.Left, &n.Ninit) 291 } 292 293 // make room for size & fn arguments. 294 adjustargs(n, 2*Widthptr) 295 296 case ORETURN: 297 walkexprlist(n.List.Slice(), &n.Ninit) 298 if n.List.Len() == 0 { 299 break 300 } 301 if (Curfn.Type.FuncType().Outnamed && n.List.Len() > 1) || paramoutheap(Curfn) { 302 // assign to the function out parameters, 303 // so that reorder3 can fix up conflicts 304 var rl []*Node 305 306 var cl Class 307 for _, ln := range Curfn.Func.Dcl { 308 cl = ln.Class 309 if cl == PAUTO || cl == PAUTOHEAP { 310 break 311 } 312 if cl == PPARAMOUT { 313 if ln.isParamStackCopy() { 314 ln = walkexpr(typecheck(nod(OIND, ln.Name.Param.Heapaddr, nil), Erv), nil) 315 } 316 rl = append(rl, ln) 317 } 318 } 319 320 if got, want := n.List.Len(), len(rl); got != want { 321 // order should have rewritten multi-value function calls 322 // with explicit OAS2FUNC nodes. 323 Fatalf("expected %v return arguments, have %v", want, got) 324 } 325 326 if samelist(rl, n.List.Slice()) { 327 // special return in disguise 328 n.List.Set(nil) 329 330 break 331 } 332 333 // move function calls out, to make reorder3's job easier. 334 walkexprlistsafe(n.List.Slice(), &n.Ninit) 335 336 ll := ascompatee(n.Op, rl, n.List.Slice(), &n.Ninit) 337 n.List.Set(reorder3(ll)) 338 break 339 } 340 341 ll := ascompatte(nil, false, Curfn.Type.Results(), n.List.Slice(), 1, &n.Ninit) 342 n.List.Set(ll) 343 344 case ORETJMP: 345 break 346 347 case OSELECT: 348 walkselect(n) 349 350 case OSWITCH: 351 walkswitch(n) 352 353 case ORANGE: 354 n = walkrange(n) 355 356 case OXFALL: 357 yyerror("fallthrough statement out of place") 358 n.Op = OFALL 359 } 360 361 if n.Op == ONAME { 362 Fatalf("walkstmt ended up with name: %+v", n) 363 } 364 return n 365 } 366 367 func isSmallMakeSlice(n *Node) bool { 368 if n.Op != OMAKESLICE { 369 return false 370 } 371 l := n.Left 372 r := n.Right 373 if r == nil { 374 r = l 375 } 376 t := n.Type 377 378 return smallintconst(l) && smallintconst(r) && (t.Elem().Width == 0 || r.Int64() < (1<<16)/t.Elem().Width) 379 } 380 381 // walk the whole tree of the body of an 382 // expression or simple statement. 383 // the types expressions are calculated. 384 // compile-time constants are evaluated. 385 // complex side effects like statements are appended to init 386 func walkexprlist(s []*Node, init *Nodes) { 387 for i := range s { 388 s[i] = walkexpr(s[i], init) 389 } 390 } 391 392 func walkexprlistsafe(s []*Node, init *Nodes) { 393 for i, n := range s { 394 s[i] = safeexpr(n, init) 395 s[i] = walkexpr(s[i], init) 396 } 397 } 398 399 func walkexprlistcheap(s []*Node, init *Nodes) { 400 for i, n := range s { 401 s[i] = cheapexpr(n, init) 402 s[i] = walkexpr(s[i], init) 403 } 404 } 405 406 // Build name of function for interface conversion. 407 // Not all names are possible 408 // (e.g., we'll never generate convE2E or convE2I or convI2E). 409 func convFuncName(from, to *types.Type) string { 410 tkind := to.Tie() 411 switch from.Tie() { 412 case 'I': 413 switch tkind { 414 case 'I': 415 return "convI2I" 416 } 417 case 'T': 418 switch tkind { 419 case 'E': 420 switch { 421 case from.Size() == 2 && from.Align == 2: 422 return "convT2E16" 423 case from.Size() == 4 && from.Align == 4 && !types.Haspointers(from): 424 return "convT2E32" 425 case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !types.Haspointers(from): 426 return "convT2E64" 427 case from.IsString(): 428 return "convT2Estring" 429 case from.IsSlice(): 430 return "convT2Eslice" 431 case !types.Haspointers(from): 432 return "convT2Enoptr" 433 } 434 return "convT2E" 435 case 'I': 436 switch { 437 case from.Size() == 2 && from.Align == 2: 438 return "convT2I16" 439 case from.Size() == 4 && from.Align == 4 && !types.Haspointers(from): 440 return "convT2I32" 441 case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !types.Haspointers(from): 442 return "convT2I64" 443 case from.IsString(): 444 return "convT2Istring" 445 case from.IsSlice(): 446 return "convT2Islice" 447 case !types.Haspointers(from): 448 return "convT2Inoptr" 449 } 450 return "convT2I" 451 } 452 } 453 Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie()) 454 panic("unreachable") 455 } 456 457 // The result of walkexpr MUST be assigned back to n, e.g. 458 // n.Left = walkexpr(n.Left, init) 459 func walkexpr(n *Node, init *Nodes) *Node { 460 if n == nil { 461 return n 462 } 463 464 if init == &n.Ninit { 465 // not okay to use n->ninit when walking n, 466 // because we might replace n with some other node 467 // and would lose the init list. 468 Fatalf("walkexpr init == &n->ninit") 469 } 470 471 if n.Ninit.Len() != 0 { 472 walkstmtlist(n.Ninit.Slice()) 473 init.AppendNodes(&n.Ninit) 474 } 475 476 lno := setlineno(n) 477 478 if Debug['w'] > 1 { 479 Dump("walk-before", n) 480 } 481 482 if n.Typecheck != 1 { 483 Fatalf("missed typecheck: %+v", n) 484 } 485 486 if n.Op == ONAME && n.Class == PAUTOHEAP { 487 nn := nod(OIND, n.Name.Param.Heapaddr, nil) 488 nn = typecheck(nn, Erv) 489 nn = walkexpr(nn, init) 490 nn.Left.SetNonNil(true) 491 return nn 492 } 493 494 opswitch: 495 switch n.Op { 496 default: 497 Dump("walk", n) 498 Fatalf("walkexpr: switch 1 unknown op %+S", n) 499 500 case ONONAME, OINDREGSP, OEMPTY, OGETG: 501 502 case OTYPE, ONAME, OLITERAL: 503 // TODO(mdempsky): Just return n; see discussion on CL 38655. 504 // Perhaps refactor to use Node.mayBeShared for these instead. 505 // If these return early, make sure to still call 506 // stringsym for constant strings. 507 508 case ONOT, OMINUS, OPLUS, OCOM, OREAL, OIMAG, ODOTMETH, ODOTINTER, 509 OIND, OSPTR, OITAB, OIDATA, OADDR: 510 n.Left = walkexpr(n.Left, init) 511 512 case OEFACE, OAND, OSUB, OMUL, OLT, OLE, OGE, OGT, OADD, OOR, OXOR: 513 n.Left = walkexpr(n.Left, init) 514 n.Right = walkexpr(n.Right, init) 515 516 case ODOT: 517 usefield(n) 518 n.Left = walkexpr(n.Left, init) 519 520 case ODOTTYPE, ODOTTYPE2: 521 n.Left = walkexpr(n.Left, init) 522 // Set up interface type addresses for back end. 523 n.Right = typename(n.Type) 524 if n.Op == ODOTTYPE { 525 n.Right.Right = typename(n.Left.Type) 526 } 527 if !n.Type.IsInterface() && !n.Left.Type.IsEmptyInterface() { 528 n.List.Set1(itabname(n.Type, n.Left.Type)) 529 } 530 531 case ODOTPTR: 532 usefield(n) 533 if n.Op == ODOTPTR && n.Left.Type.Elem().Width == 0 { 534 // No actual copy will be generated, so emit an explicit nil check. 535 n.Left = cheapexpr(n.Left, init) 536 537 checknil(n.Left, init) 538 } 539 540 n.Left = walkexpr(n.Left, init) 541 542 case OLEN, OCAP: 543 n.Left = walkexpr(n.Left, init) 544 545 // replace len(*[10]int) with 10. 546 // delayed until now to preserve side effects. 547 t := n.Left.Type 548 549 if t.IsPtr() { 550 t = t.Elem() 551 } 552 if t.IsArray() { 553 safeexpr(n.Left, init) 554 nodconst(n, n.Type, t.NumElem()) 555 n.Typecheck = 1 556 } 557 558 case OLSH, ORSH: 559 n.Left = walkexpr(n.Left, init) 560 n.Right = walkexpr(n.Right, init) 561 t := n.Left.Type 562 n.SetBounded(bounded(n.Right, 8*t.Width)) 563 if Debug['m'] != 0 && n.Etype != 0 && !Isconst(n.Right, CTINT) { 564 Warn("shift bounds check elided") 565 } 566 567 case OCOMPLEX: 568 // Use results from call expression as arguments for complex. 569 if n.Left == nil && n.Right == nil { 570 n.Left = n.List.First() 571 n.Right = n.List.Second() 572 } 573 n.Left = walkexpr(n.Left, init) 574 n.Right = walkexpr(n.Right, init) 575 576 case OEQ, ONE: 577 n.Left = walkexpr(n.Left, init) 578 n.Right = walkexpr(n.Right, init) 579 580 // Disable safemode while compiling this code: the code we 581 // generate internally can refer to unsafe.Pointer. 582 // In this case it can happen if we need to generate an == 583 // for a struct containing a reflect.Value, which itself has 584 // an unexported field of type unsafe.Pointer. 585 old_safemode := safemode 586 safemode = false 587 n = walkcompare(n, init) 588 safemode = old_safemode 589 590 case OANDAND, OOROR: 591 n.Left = walkexpr(n.Left, init) 592 593 // cannot put side effects from n.Right on init, 594 // because they cannot run before n.Left is checked. 595 // save elsewhere and store on the eventual n.Right. 596 var ll Nodes 597 598 n.Right = walkexpr(n.Right, &ll) 599 n.Right = addinit(n.Right, ll.Slice()) 600 n = walkinrange(n, init) 601 602 case OPRINT, OPRINTN: 603 walkexprlist(n.List.Slice(), init) 604 n = walkprint(n, init) 605 606 case OPANIC: 607 n = mkcall("gopanic", nil, init, n.Left) 608 609 case ORECOVER: 610 n = mkcall("gorecover", n.Type, init, nod(OADDR, nodfp, nil)) 611 612 case OCLOSUREVAR, OCFUNC: 613 n.SetAddable(true) 614 615 case OCALLINTER: 616 usemethod(n) 617 t := n.Left.Type 618 if n.List.Len() != 0 && n.List.First().Op == OAS { 619 break 620 } 621 n.Left = walkexpr(n.Left, init) 622 walkexprlist(n.List.Slice(), init) 623 ll := ascompatte(n, n.Isddd(), t.Params(), n.List.Slice(), 0, init) 624 n.List.Set(reorder1(ll)) 625 626 case OCALLFUNC: 627 if n.Left.Op == OCLOSURE { 628 // Transform direct call of a closure to call of a normal function. 629 // transformclosure already did all preparation work. 630 631 // Prepend captured variables to argument list. 632 n.List.Prepend(n.Left.Func.Enter.Slice()...) 633 634 n.Left.Func.Enter.Set(nil) 635 636 // Replace OCLOSURE with ONAME/PFUNC. 637 n.Left = n.Left.Func.Closure.Func.Nname 638 639 // Update type of OCALLFUNC node. 640 // Output arguments had not changed, but their offsets could. 641 if n.Left.Type.Results().NumFields() == 1 { 642 n.Type = n.Left.Type.Results().Field(0).Type 643 } else { 644 n.Type = n.Left.Type.Results() 645 } 646 } 647 648 t := n.Left.Type 649 if n.List.Len() != 0 && n.List.First().Op == OAS { 650 break 651 } 652 653 n.Left = walkexpr(n.Left, init) 654 walkexprlist(n.List.Slice(), init) 655 656 ll := ascompatte(n, n.Isddd(), t.Params(), n.List.Slice(), 0, init) 657 n.List.Set(reorder1(ll)) 658 659 case OCALLMETH: 660 t := n.Left.Type 661 if n.List.Len() != 0 && n.List.First().Op == OAS { 662 break 663 } 664 n.Left = walkexpr(n.Left, init) 665 walkexprlist(n.List.Slice(), init) 666 ll := ascompatte(n, false, t.Recvs(), []*Node{n.Left.Left}, 0, init) 667 lr := ascompatte(n, n.Isddd(), t.Params(), n.List.Slice(), 0, init) 668 ll = append(ll, lr...) 669 n.Left.Left = nil 670 updateHasCall(n.Left) 671 n.List.Set(reorder1(ll)) 672 673 case OAS: 674 init.AppendNodes(&n.Ninit) 675 676 n.Left = walkexpr(n.Left, init) 677 n.Left = safeexpr(n.Left, init) 678 679 if oaslit(n, init) { 680 break 681 } 682 683 if n.Right == nil { 684 // TODO(austin): Check all "implicit zeroing" 685 break 686 } 687 688 if !instrumenting && iszero(n.Right) { 689 break 690 } 691 692 switch n.Right.Op { 693 default: 694 n.Right = walkexpr(n.Right, init) 695 696 case ORECV: 697 // x = <-c; n.Left is x, n.Right.Left is c. 698 // orderstmt made sure x is addressable. 699 n.Right.Left = walkexpr(n.Right.Left, init) 700 701 n1 := nod(OADDR, n.Left, nil) 702 r := n.Right.Left // the channel 703 n = mkcall1(chanfn("chanrecv1", 2, r.Type), nil, init, r, n1) 704 n = walkexpr(n, init) 705 break opswitch 706 707 case OAPPEND: 708 // x = append(...) 709 r := n.Right 710 if r.Type.Elem().NotInHeap() { 711 yyerror("%v is go:notinheap; heap allocation disallowed", r.Type.Elem()) 712 } 713 if r.Isddd() { 714 r = appendslice(r, init) // also works for append(slice, string). 715 } else { 716 r = walkappend(r, init, n) 717 } 718 n.Right = r 719 if r.Op == OAPPEND { 720 // Left in place for back end. 721 // Do not add a new write barrier. 722 // Set up address of type for back end. 723 r.Left = typename(r.Type.Elem()) 724 break opswitch 725 } 726 // Otherwise, lowered for race detector. 727 // Treat as ordinary assignment. 728 } 729 730 if n.Left != nil && n.Right != nil { 731 n = convas(n, init) 732 } 733 734 case OAS2: 735 init.AppendNodes(&n.Ninit) 736 walkexprlistsafe(n.List.Slice(), init) 737 walkexprlistsafe(n.Rlist.Slice(), init) 738 ll := ascompatee(OAS, n.List.Slice(), n.Rlist.Slice(), init) 739 ll = reorder3(ll) 740 n = liststmt(ll) 741 742 // a,b,... = fn() 743 case OAS2FUNC: 744 init.AppendNodes(&n.Ninit) 745 746 r := n.Rlist.First() 747 walkexprlistsafe(n.List.Slice(), init) 748 r = walkexpr(r, init) 749 750 if isIntrinsicCall(r) { 751 n.Rlist.Set1(r) 752 break 753 } 754 init.Append(r) 755 756 ll := ascompatet(n.Op, n.List, r.Type) 757 n = liststmt(ll) 758 759 // x, y = <-c 760 // orderstmt made sure x is addressable. 761 case OAS2RECV: 762 init.AppendNodes(&n.Ninit) 763 764 r := n.Rlist.First() 765 walkexprlistsafe(n.List.Slice(), init) 766 r.Left = walkexpr(r.Left, init) 767 var n1 *Node 768 if isblank(n.List.First()) { 769 n1 = nodnil() 770 } else { 771 n1 = nod(OADDR, n.List.First(), nil) 772 } 773 n1.Etype = 1 // addr does not escape 774 fn := chanfn("chanrecv2", 2, r.Left.Type) 775 ok := n.List.Second() 776 call := mkcall1(fn, ok.Type, init, r.Left, n1) 777 n = nod(OAS, ok, call) 778 n = typecheck(n, Etop) 779 780 // a,b = m[i] 781 case OAS2MAPR: 782 init.AppendNodes(&n.Ninit) 783 784 r := n.Rlist.First() 785 walkexprlistsafe(n.List.Slice(), init) 786 r.Left = walkexpr(r.Left, init) 787 r.Right = walkexpr(r.Right, init) 788 t := r.Left.Type 789 790 fast := mapfast(t) 791 var key *Node 792 if fast != mapslow { 793 // fast versions take key by value 794 key = r.Right 795 } else { 796 // standard version takes key by reference 797 // orderexpr made sure key is addressable. 798 key = nod(OADDR, r.Right, nil) 799 } 800 801 // from: 802 // a,b = m[i] 803 // to: 804 // var,b = mapaccess2*(t, m, i) 805 // a = *var 806 a := n.List.First() 807 808 if w := t.Val().Width; w <= 1024 { // 1024 must match ../../../../runtime/hashmap.go:maxZero 809 fn := mapfn(mapaccess2[fast], t) 810 r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key) 811 } else { 812 fn := mapfn("mapaccess2_fat", t) 813 z := zeroaddr(w) 814 r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key, z) 815 } 816 817 // mapaccess2* returns a typed bool, but due to spec changes, 818 // the boolean result of i.(T) is now untyped so we make it the 819 // same type as the variable on the lhs. 820 if ok := n.List.Second(); !isblank(ok) && ok.Type.IsBoolean() { 821 r.Type.Field(1).Type = ok.Type 822 } 823 n.Rlist.Set1(r) 824 n.Op = OAS2FUNC 825 826 // don't generate a = *var if a is _ 827 if !isblank(a) { 828 var_ := temp(types.NewPtr(t.Val())) 829 var_.Typecheck = 1 830 var_.SetNonNil(true) // mapaccess always returns a non-nil pointer 831 n.List.SetFirst(var_) 832 n = walkexpr(n, init) 833 init.Append(n) 834 n = nod(OAS, a, nod(OIND, var_, nil)) 835 } 836 837 n = typecheck(n, Etop) 838 n = walkexpr(n, init) 839 840 case ODELETE: 841 init.AppendNodes(&n.Ninit) 842 map_ := n.List.First() 843 key := n.List.Second() 844 map_ = walkexpr(map_, init) 845 key = walkexpr(key, init) 846 847 t := map_.Type 848 fast := mapfast(t) 849 if fast == mapslow { 850 // orderstmt made sure key is addressable. 851 key = nod(OADDR, key, nil) 852 } 853 n = mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key) 854 855 case OAS2DOTTYPE: 856 walkexprlistsafe(n.List.Slice(), init) 857 n.Rlist.SetFirst(walkexpr(n.Rlist.First(), init)) 858 859 case OCONVIFACE: 860 n.Left = walkexpr(n.Left, init) 861 862 // Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped. 863 if isdirectiface(n.Left.Type) { 864 var t *Node 865 if n.Type.IsEmptyInterface() { 866 t = typename(n.Left.Type) 867 } else { 868 t = itabname(n.Left.Type, n.Type) 869 } 870 l := nod(OEFACE, t, n.Left) 871 l.Type = n.Type 872 l.Typecheck = n.Typecheck 873 n = l 874 break 875 } 876 877 if staticbytes == nil { 878 staticbytes = newname(Runtimepkg.Lookup("staticbytes")) 879 staticbytes.Class = PEXTERN 880 staticbytes.Type = types.NewArray(types.Types[TUINT8], 256) 881 zerobase = newname(Runtimepkg.Lookup("zerobase")) 882 zerobase.Class = PEXTERN 883 zerobase.Type = types.Types[TUINTPTR] 884 } 885 886 // Optimize convT2{E,I} for many cases in which T is not pointer-shaped, 887 // by using an existing addressable value identical to n.Left 888 // or creating one on the stack. 889 var value *Node 890 switch { 891 case n.Left.Type.Size() == 0: 892 // n.Left is zero-sized. Use zerobase. 893 cheapexpr(n.Left, init) // Evaluate n.Left for side-effects. See issue 19246. 894 value = zerobase 895 case n.Left.Type.IsBoolean() || (n.Left.Type.Size() == 1 && n.Left.Type.IsInteger()): 896 // n.Left is a bool/byte. Use staticbytes[n.Left]. 897 n.Left = cheapexpr(n.Left, init) 898 value = nod(OINDEX, staticbytes, byteindex(n.Left)) 899 value.SetBounded(true) 900 case n.Left.Class == PEXTERN && n.Left.Name != nil && n.Left.Name.Readonly(): 901 // n.Left is a readonly global; use it directly. 902 value = n.Left 903 case !n.Left.Type.IsInterface() && n.Esc == EscNone && n.Left.Type.Width <= 1024: 904 // n.Left does not escape. Use a stack temporary initialized to n.Left. 905 value = temp(n.Left.Type) 906 init.Append(typecheck(nod(OAS, value, n.Left), Etop)) 907 } 908 909 if value != nil { 910 // Value is identical to n.Left. 911 // Construct the interface directly: {type/itab, &value}. 912 var t *Node 913 if n.Type.IsEmptyInterface() { 914 t = typename(n.Left.Type) 915 } else { 916 t = itabname(n.Left.Type, n.Type) 917 } 918 l := nod(OEFACE, t, typecheck(nod(OADDR, value, nil), Erv)) 919 l.Type = n.Type 920 l.Typecheck = n.Typecheck 921 n = l 922 break 923 } 924 925 // Implement interface to empty interface conversion. 926 // tmp = i.itab 927 // if tmp != nil { 928 // tmp = tmp.type 929 // } 930 // e = iface{tmp, i.data} 931 if n.Type.IsEmptyInterface() && n.Left.Type.IsInterface() && !n.Left.Type.IsEmptyInterface() { 932 // Evaluate the input interface. 933 c := temp(n.Left.Type) 934 init.Append(nod(OAS, c, n.Left)) 935 936 // Get the itab out of the interface. 937 tmp := temp(types.NewPtr(types.Types[TUINT8])) 938 init.Append(nod(OAS, tmp, typecheck(nod(OITAB, c, nil), Erv))) 939 940 // Get the type out of the itab. 941 nif := nod(OIF, typecheck(nod(ONE, tmp, nodnil()), Erv), nil) 942 nif.Nbody.Set1(nod(OAS, tmp, itabType(tmp))) 943 init.Append(nif) 944 945 // Build the result. 946 e := nod(OEFACE, tmp, ifaceData(c, types.NewPtr(types.Types[TUINT8]))) 947 e.Type = n.Type // assign type manually, typecheck doesn't understand OEFACE. 948 e.Typecheck = 1 949 n = e 950 break 951 } 952 953 var ll []*Node 954 if n.Type.IsEmptyInterface() { 955 if !n.Left.Type.IsInterface() { 956 ll = append(ll, typename(n.Left.Type)) 957 } 958 } else { 959 if n.Left.Type.IsInterface() { 960 ll = append(ll, typename(n.Type)) 961 } else { 962 ll = append(ll, itabname(n.Left.Type, n.Type)) 963 } 964 } 965 966 if n.Left.Type.IsInterface() { 967 ll = append(ll, n.Left) 968 } else { 969 // regular types are passed by reference to avoid C vararg calls 970 // orderexpr arranged for n.Left to be a temporary for all 971 // the conversions it could see. comparison of an interface 972 // with a non-interface, especially in a switch on interface value 973 // with non-interface cases, is not visible to orderstmt, so we 974 // have to fall back on allocating a temp here. 975 if islvalue(n.Left) { 976 ll = append(ll, nod(OADDR, n.Left, nil)) 977 } else { 978 ll = append(ll, nod(OADDR, copyexpr(n.Left, n.Left.Type, init), nil)) 979 } 980 dowidth(n.Left.Type) 981 } 982 983 fn := syslook(convFuncName(n.Left.Type, n.Type)) 984 fn = substArgTypes(fn, n.Left.Type, n.Type) 985 dowidth(fn.Type) 986 n = nod(OCALL, fn, nil) 987 n.List.Set(ll) 988 n = typecheck(n, Erv) 989 n = walkexpr(n, init) 990 991 case OCONV, OCONVNOP: 992 if thearch.LinkArch.Family == sys.ARM || thearch.LinkArch.Family == sys.MIPS { 993 if n.Left.Type.IsFloat() { 994 if n.Type.Etype == TINT64 { 995 n = mkcall("float64toint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64])) 996 break 997 } 998 999 if n.Type.Etype == TUINT64 { 1000 n = mkcall("float64touint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64])) 1001 break 1002 } 1003 } 1004 1005 if n.Type.IsFloat() { 1006 if n.Left.Type.Etype == TINT64 { 1007 n = conv(mkcall("int64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TINT64])), n.Type) 1008 break 1009 } 1010 1011 if n.Left.Type.Etype == TUINT64 { 1012 n = conv(mkcall("uint64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TUINT64])), n.Type) 1013 break 1014 } 1015 } 1016 } 1017 1018 if thearch.LinkArch.Family == sys.I386 { 1019 if n.Left.Type.IsFloat() { 1020 if n.Type.Etype == TINT64 { 1021 n = mkcall("float64toint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64])) 1022 break 1023 } 1024 1025 if n.Type.Etype == TUINT64 { 1026 n = mkcall("float64touint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64])) 1027 break 1028 } 1029 if n.Type.Etype == TUINT32 || n.Type.Etype == TUINT || n.Type.Etype == TUINTPTR { 1030 n = mkcall("float64touint32", n.Type, init, conv(n.Left, types.Types[TFLOAT64])) 1031 break 1032 } 1033 } 1034 if n.Type.IsFloat() { 1035 if n.Left.Type.Etype == TINT64 { 1036 n = conv(mkcall("int64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TINT64])), n.Type) 1037 break 1038 } 1039 1040 if n.Left.Type.Etype == TUINT64 { 1041 n = conv(mkcall("uint64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TUINT64])), n.Type) 1042 break 1043 } 1044 if n.Left.Type.Etype == TUINT32 || n.Left.Type.Etype == TUINT || n.Left.Type.Etype == TUINTPTR { 1045 n = conv(mkcall("uint32tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TUINT32])), n.Type) 1046 break 1047 } 1048 } 1049 } 1050 1051 n.Left = walkexpr(n.Left, init) 1052 1053 case OANDNOT: 1054 n.Left = walkexpr(n.Left, init) 1055 n.Op = OAND 1056 n.Right = nod(OCOM, n.Right, nil) 1057 n.Right = typecheck(n.Right, Erv) 1058 n.Right = walkexpr(n.Right, init) 1059 1060 case ODIV, OMOD: 1061 n.Left = walkexpr(n.Left, init) 1062 n.Right = walkexpr(n.Right, init) 1063 1064 // rewrite complex div into function call. 1065 et := n.Left.Type.Etype 1066 1067 if isComplex[et] && n.Op == ODIV { 1068 t := n.Type 1069 n = mkcall("complex128div", types.Types[TCOMPLEX128], init, conv(n.Left, types.Types[TCOMPLEX128]), conv(n.Right, types.Types[TCOMPLEX128])) 1070 n = conv(n, t) 1071 break 1072 } 1073 1074 // Nothing to do for float divisions. 1075 if isFloat[et] { 1076 break 1077 } 1078 1079 // rewrite 64-bit div and mod on 32-bit architectures. 1080 // TODO: Remove this code once we can introduce 1081 // runtime calls late in SSA processing. 1082 if Widthreg < 8 && (et == TINT64 || et == TUINT64) { 1083 if n.Right.Op == OLITERAL { 1084 // Leave div/mod by constant powers of 2. 1085 // The SSA backend will handle those. 1086 switch et { 1087 case TINT64: 1088 c := n.Right.Int64() 1089 if c < 0 { 1090 c = -c 1091 } 1092 if c != 0 && c&(c-1) == 0 { 1093 break opswitch 1094 } 1095 case TUINT64: 1096 c := uint64(n.Right.Int64()) 1097 if c != 0 && c&(c-1) == 0 { 1098 break opswitch 1099 } 1100 } 1101 } 1102 var fn string 1103 if et == TINT64 { 1104 fn = "int64" 1105 } else { 1106 fn = "uint64" 1107 } 1108 if n.Op == ODIV { 1109 fn += "div" 1110 } else { 1111 fn += "mod" 1112 } 1113 n = mkcall(fn, n.Type, init, conv(n.Left, types.Types[et]), conv(n.Right, types.Types[et])) 1114 } 1115 1116 case OINDEX: 1117 n.Left = walkexpr(n.Left, init) 1118 1119 // save the original node for bounds checking elision. 1120 // If it was a ODIV/OMOD walk might rewrite it. 1121 r := n.Right 1122 1123 n.Right = walkexpr(n.Right, init) 1124 1125 // if range of type cannot exceed static array bound, 1126 // disable bounds check. 1127 if n.Bounded() { 1128 break 1129 } 1130 t := n.Left.Type 1131 if t != nil && t.IsPtr() { 1132 t = t.Elem() 1133 } 1134 if t.IsArray() { 1135 n.SetBounded(bounded(r, t.NumElem())) 1136 if Debug['m'] != 0 && n.Bounded() && !Isconst(n.Right, CTINT) { 1137 Warn("index bounds check elided") 1138 } 1139 if smallintconst(n.Right) && !n.Bounded() { 1140 yyerror("index out of bounds") 1141 } 1142 } else if Isconst(n.Left, CTSTR) { 1143 n.SetBounded(bounded(r, int64(len(n.Left.Val().U.(string))))) 1144 if Debug['m'] != 0 && n.Bounded() && !Isconst(n.Right, CTINT) { 1145 Warn("index bounds check elided") 1146 } 1147 if smallintconst(n.Right) && !n.Bounded() { 1148 yyerror("index out of bounds") 1149 } 1150 } 1151 1152 if Isconst(n.Right, CTINT) { 1153 if n.Right.Val().U.(*Mpint).CmpInt64(0) < 0 || n.Right.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 { 1154 yyerror("index out of bounds") 1155 } 1156 } 1157 1158 case OINDEXMAP: 1159 // Replace m[k] with *map{access1,assign}(maptype, m, &k) 1160 n.Left = walkexpr(n.Left, init) 1161 n.Right = walkexpr(n.Right, init) 1162 map_ := n.Left 1163 key := n.Right 1164 t := map_.Type 1165 if n.Etype == 1 { 1166 // This m[k] expression is on the left-hand side of an assignment. 1167 fast := mapfast(t) 1168 if fast == mapslow { 1169 // standard version takes key by reference. 1170 // orderexpr made sure key is addressable. 1171 key = nod(OADDR, key, nil) 1172 } 1173 n = mkcall1(mapfn(mapassign[fast], t), nil, init, typename(t), map_, key) 1174 } else { 1175 // m[k] is not the target of an assignment. 1176 fast := mapfast(t) 1177 if fast == mapslow { 1178 // standard version takes key by reference. 1179 // orderexpr made sure key is addressable. 1180 key = nod(OADDR, key, nil) 1181 } 1182 1183 if w := t.Val().Width; w <= 1024 { // 1024 must match ../../../../runtime/hashmap.go:maxZero 1184 n = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Val()), init, typename(t), map_, key) 1185 } else { 1186 z := zeroaddr(w) 1187 n = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Val()), init, typename(t), map_, key, z) 1188 } 1189 } 1190 n.Type = types.NewPtr(t.Val()) 1191 n.SetNonNil(true) // mapaccess1* and mapassign always return non-nil pointers. 1192 n = nod(OIND, n, nil) 1193 n.Type = t.Val() 1194 n.Typecheck = 1 1195 1196 case ORECV: 1197 Fatalf("walkexpr ORECV") // should see inside OAS only 1198 1199 case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR: 1200 n.Left = walkexpr(n.Left, init) 1201 low, high, max := n.SliceBounds() 1202 low = walkexpr(low, init) 1203 if low != nil && iszero(low) { 1204 // Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k]. 1205 low = nil 1206 } 1207 high = walkexpr(high, init) 1208 max = walkexpr(max, init) 1209 n.SetSliceBounds(low, high, max) 1210 if n.Op.IsSlice3() { 1211 if max != nil && max.Op == OCAP && samesafeexpr(n.Left, max.Left) { 1212 // Reduce x[i:j:cap(x)] to x[i:j]. 1213 if n.Op == OSLICE3 { 1214 n.Op = OSLICE 1215 } else { 1216 n.Op = OSLICEARR 1217 } 1218 n = reduceSlice(n) 1219 } 1220 } else { 1221 n = reduceSlice(n) 1222 } 1223 1224 case ONEW: 1225 if n.Esc == EscNone { 1226 if n.Type.Elem().Width >= 1<<16 { 1227 Fatalf("large ONEW with EscNone: %v", n) 1228 } 1229 r := temp(n.Type.Elem()) 1230 r = nod(OAS, r, nil) // zero temp 1231 r = typecheck(r, Etop) 1232 init.Append(r) 1233 r = nod(OADDR, r.Left, nil) 1234 r = typecheck(r, Erv) 1235 n = r 1236 } else { 1237 n = callnew(n.Type.Elem()) 1238 } 1239 1240 case OCMPSTR: 1241 // s + "badgerbadgerbadger" == "badgerbadgerbadger" 1242 if (Op(n.Etype) == OEQ || Op(n.Etype) == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && n.Left.List.Len() == 2 && Isconst(n.Left.List.Second(), CTSTR) && strlit(n.Right) == strlit(n.Left.List.Second()) { 1243 // TODO(marvin): Fix Node.EType type union. 1244 r := nod(Op(n.Etype), nod(OLEN, n.Left.List.First(), nil), nodintconst(0)) 1245 r = typecheck(r, Erv) 1246 r = walkexpr(r, init) 1247 r.Type = n.Type 1248 n = r 1249 break 1250 } 1251 1252 // Rewrite comparisons to short constant strings as length+byte-wise comparisons. 1253 var cs, ncs *Node // const string, non-const string 1254 switch { 1255 case Isconst(n.Left, CTSTR) && Isconst(n.Right, CTSTR): 1256 // ignore; will be constant evaluated 1257 case Isconst(n.Left, CTSTR): 1258 cs = n.Left 1259 ncs = n.Right 1260 case Isconst(n.Right, CTSTR): 1261 cs = n.Right 1262 ncs = n.Left 1263 } 1264 if cs != nil { 1265 cmp := Op(n.Etype) 1266 // maxRewriteLen was chosen empirically. 1267 // It is the value that minimizes cmd/go file size 1268 // across most architectures. 1269 // See the commit description for CL 26758 for details. 1270 maxRewriteLen := 6 1271 // Some architectures can load unaligned byte sequence as 1 word. 1272 // So we can cover longer strings with the same amount of code. 1273 canCombineLoads := false 1274 combine64bit := false 1275 // TODO: does this improve performance on any other architectures? 1276 switch thearch.LinkArch.Family { 1277 case sys.AMD64: 1278 // Larger compare require longer instructions, so keep this reasonably low. 1279 // Data from CL 26758 shows that longer strings are rare. 1280 // If we really want we can do 16 byte SSE comparisons in the future. 1281 maxRewriteLen = 16 1282 canCombineLoads = true 1283 combine64bit = true 1284 case sys.I386: 1285 maxRewriteLen = 8 1286 canCombineLoads = true 1287 } 1288 var and Op 1289 switch cmp { 1290 case OEQ: 1291 and = OANDAND 1292 case ONE: 1293 and = OOROR 1294 default: 1295 // Don't do byte-wise comparisons for <, <=, etc. 1296 // They're fairly complicated. 1297 // Length-only checks are ok, though. 1298 maxRewriteLen = 0 1299 } 1300 if s := cs.Val().U.(string); len(s) <= maxRewriteLen { 1301 if len(s) > 0 { 1302 ncs = safeexpr(ncs, init) 1303 } 1304 // TODO(marvin): Fix Node.EType type union. 1305 r := nod(cmp, nod(OLEN, ncs, nil), nodintconst(int64(len(s)))) 1306 remains := len(s) 1307 for i := 0; remains > 0; { 1308 if remains == 1 || !canCombineLoads { 1309 cb := nodintconst(int64(s[i])) 1310 ncb := nod(OINDEX, ncs, nodintconst(int64(i))) 1311 r = nod(and, r, nod(cmp, ncb, cb)) 1312 remains-- 1313 i++ 1314 continue 1315 } 1316 var step int 1317 var convType *types.Type 1318 switch { 1319 case remains >= 8 && combine64bit: 1320 convType = types.Types[TINT64] 1321 step = 8 1322 case remains >= 4: 1323 convType = types.Types[TUINT32] 1324 step = 4 1325 case remains >= 2: 1326 convType = types.Types[TUINT16] 1327 step = 2 1328 } 1329 ncsubstr := nod(OINDEX, ncs, nodintconst(int64(i))) 1330 ncsubstr = conv(ncsubstr, convType) 1331 csubstr := int64(s[i]) 1332 // Calculate large constant from bytes as sequence of shifts and ors. 1333 // Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ... 1334 // ssa will combine this into a single large load. 1335 for offset := 1; offset < step; offset++ { 1336 b := nod(OINDEX, ncs, nodintconst(int64(i+offset))) 1337 b = conv(b, convType) 1338 b = nod(OLSH, b, nodintconst(int64(8*offset))) 1339 ncsubstr = nod(OOR, ncsubstr, b) 1340 csubstr = csubstr | int64(s[i+offset])<<uint8(8*offset) 1341 } 1342 csubstrPart := nodintconst(csubstr) 1343 // Compare "step" bytes as once 1344 r = nod(and, r, nod(cmp, csubstrPart, ncsubstr)) 1345 remains -= step 1346 i += step 1347 } 1348 r = typecheck(r, Erv) 1349 r = walkexpr(r, init) 1350 r.Type = n.Type 1351 n = r 1352 break 1353 } 1354 } 1355 1356 var r *Node 1357 // TODO(marvin): Fix Node.EType type union. 1358 if Op(n.Etype) == OEQ || Op(n.Etype) == ONE { 1359 // prepare for rewrite below 1360 n.Left = cheapexpr(n.Left, init) 1361 n.Right = cheapexpr(n.Right, init) 1362 1363 r = mkcall("eqstring", types.Types[TBOOL], init, conv(n.Left, types.Types[TSTRING]), conv(n.Right, types.Types[TSTRING])) 1364 1365 // quick check of len before full compare for == or != 1366 // eqstring assumes that the lengths are equal 1367 // TODO(marvin): Fix Node.EType type union. 1368 if Op(n.Etype) == OEQ { 1369 // len(left) == len(right) && eqstring(left, right) 1370 r = nod(OANDAND, nod(OEQ, nod(OLEN, n.Left, nil), nod(OLEN, n.Right, nil)), r) 1371 } else { 1372 // len(left) != len(right) || !eqstring(left, right) 1373 r = nod(ONOT, r, nil) 1374 r = nod(OOROR, nod(ONE, nod(OLEN, n.Left, nil), nod(OLEN, n.Right, nil)), r) 1375 } 1376 1377 r = typecheck(r, Erv) 1378 r = walkexpr(r, nil) 1379 } else { 1380 // sys_cmpstring(s1, s2) :: 0 1381 r = mkcall("cmpstring", types.Types[TINT], init, conv(n.Left, types.Types[TSTRING]), conv(n.Right, types.Types[TSTRING])) 1382 // TODO(marvin): Fix Node.EType type union. 1383 r = nod(Op(n.Etype), r, nodintconst(0)) 1384 } 1385 1386 r = typecheck(r, Erv) 1387 if !n.Type.IsBoolean() { 1388 Fatalf("cmp %v", n.Type) 1389 } 1390 r.Type = n.Type 1391 n = r 1392 1393 case OADDSTR: 1394 n = addstr(n, init) 1395 1396 case OAPPEND: 1397 // order should make sure we only see OAS(node, OAPPEND), which we handle above. 1398 Fatalf("append outside assignment") 1399 1400 case OCOPY: 1401 n = copyany(n, init, instrumenting && !compiling_runtime) 1402 1403 // cannot use chanfn - closechan takes any, not chan any 1404 case OCLOSE: 1405 fn := syslook("closechan") 1406 1407 fn = substArgTypes(fn, n.Left.Type) 1408 n = mkcall1(fn, nil, init, n.Left) 1409 1410 case OMAKECHAN: 1411 n = mkcall1(chanfn("makechan", 1, n.Type), n.Type, init, typename(n.Type), conv(n.Left, types.Types[TINT64])) 1412 1413 case OMAKEMAP: 1414 t := n.Type 1415 1416 a := nodnil() // hmap buffer 1417 r := nodnil() // bucket buffer 1418 if n.Esc == EscNone { 1419 // Allocate hmap buffer on stack. 1420 var_ := temp(hmap(t)) 1421 1422 a = nod(OAS, var_, nil) // zero temp 1423 a = typecheck(a, Etop) 1424 init.Append(a) 1425 a = nod(OADDR, var_, nil) 1426 1427 // Allocate one bucket on stack. 1428 // Maximum key/value size is 128 bytes, larger objects 1429 // are stored with an indirection. So max bucket size is 2048+eps. 1430 var_ = temp(mapbucket(t)) 1431 1432 r = nod(OAS, var_, nil) // zero temp 1433 r = typecheck(r, Etop) 1434 init.Append(r) 1435 r = nod(OADDR, var_, nil) 1436 } 1437 1438 fn := syslook("makemap") 1439 fn = substArgTypes(fn, hmap(t), mapbucket(t), t.Key(), t.Val()) 1440 n = mkcall1(fn, n.Type, init, typename(n.Type), conv(n.Left, types.Types[TINT64]), a, r) 1441 1442 case OMAKESLICE: 1443 l := n.Left 1444 r := n.Right 1445 if r == nil { 1446 r = safeexpr(l, init) 1447 l = r 1448 } 1449 t := n.Type 1450 if n.Esc == EscNone { 1451 if !isSmallMakeSlice(n) { 1452 Fatalf("non-small OMAKESLICE with EscNone: %v", n) 1453 } 1454 // var arr [r]T 1455 // n = arr[:l] 1456 t = types.NewArray(t.Elem(), nonnegintconst(r)) // [r]T 1457 var_ := temp(t) 1458 a := nod(OAS, var_, nil) // zero temp 1459 a = typecheck(a, Etop) 1460 init.Append(a) 1461 r := nod(OSLICE, var_, nil) // arr[:l] 1462 r.SetSliceBounds(nil, l, nil) 1463 r = conv(r, n.Type) // in case n.Type is named. 1464 r = typecheck(r, Erv) 1465 r = walkexpr(r, init) 1466 n = r 1467 } else { 1468 // n escapes; set up a call to makeslice. 1469 // When len and cap can fit into int, use makeslice instead of 1470 // makeslice64, which is faster and shorter on 32 bit platforms. 1471 1472 if t.Elem().NotInHeap() { 1473 yyerror("%v is go:notinheap; heap allocation disallowed", t.Elem()) 1474 } 1475 1476 len, cap := l, r 1477 1478 fnname := "makeslice64" 1479 argtype := types.Types[TINT64] 1480 1481 // typechecking guarantees that TIDEAL len/cap are positive and fit in an int. 1482 // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT 1483 // will be handled by the negative range checks in makeslice during runtime. 1484 if (len.Type.IsKind(TIDEAL) || maxintval[len.Type.Etype].Cmp(maxintval[TUINT]) <= 0) && 1485 (cap.Type.IsKind(TIDEAL) || maxintval[cap.Type.Etype].Cmp(maxintval[TUINT]) <= 0) { 1486 fnname = "makeslice" 1487 argtype = types.Types[TINT] 1488 } 1489 1490 fn := syslook(fnname) 1491 fn = substArgTypes(fn, t.Elem()) // any-1 1492 n = mkcall1(fn, t, init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype)) 1493 } 1494 1495 case ORUNESTR: 1496 a := nodnil() 1497 if n.Esc == EscNone { 1498 t := types.NewArray(types.Types[TUINT8], 4) 1499 var_ := temp(t) 1500 a = nod(OADDR, var_, nil) 1501 } 1502 1503 // intstring(*[4]byte, rune) 1504 n = mkcall("intstring", n.Type, init, a, conv(n.Left, types.Types[TINT64])) 1505 1506 case OARRAYBYTESTR: 1507 a := nodnil() 1508 if n.Esc == EscNone { 1509 // Create temporary buffer for string on stack. 1510 t := types.NewArray(types.Types[TUINT8], tmpstringbufsize) 1511 1512 a = nod(OADDR, temp(t), nil) 1513 } 1514 1515 // slicebytetostring(*[32]byte, []byte) string; 1516 n = mkcall("slicebytetostring", n.Type, init, a, n.Left) 1517 1518 // slicebytetostringtmp([]byte) string; 1519 case OARRAYBYTESTRTMP: 1520 n.Left = walkexpr(n.Left, init) 1521 1522 if !instrumenting { 1523 // Let the backend handle OARRAYBYTESTRTMP directly 1524 // to avoid a function call to slicebytetostringtmp. 1525 break 1526 } 1527 1528 n = mkcall("slicebytetostringtmp", n.Type, init, n.Left) 1529 1530 // slicerunetostring(*[32]byte, []rune) string; 1531 case OARRAYRUNESTR: 1532 a := nodnil() 1533 1534 if n.Esc == EscNone { 1535 // Create temporary buffer for string on stack. 1536 t := types.NewArray(types.Types[TUINT8], tmpstringbufsize) 1537 1538 a = nod(OADDR, temp(t), nil) 1539 } 1540 1541 n = mkcall("slicerunetostring", n.Type, init, a, n.Left) 1542 1543 // stringtoslicebyte(*32[byte], string) []byte; 1544 case OSTRARRAYBYTE: 1545 a := nodnil() 1546 1547 if n.Esc == EscNone { 1548 // Create temporary buffer for slice on stack. 1549 t := types.NewArray(types.Types[TUINT8], tmpstringbufsize) 1550 1551 a = nod(OADDR, temp(t), nil) 1552 } 1553 1554 n = mkcall("stringtoslicebyte", n.Type, init, a, conv(n.Left, types.Types[TSTRING])) 1555 1556 case OSTRARRAYBYTETMP: 1557 // []byte(string) conversion that creates a slice 1558 // referring to the actual string bytes. 1559 // This conversion is handled later by the backend and 1560 // is only for use by internal compiler optimizations 1561 // that know that the slice won't be mutated. 1562 // The only such case today is: 1563 // for i, c := range []byte(string) 1564 n.Left = walkexpr(n.Left, init) 1565 1566 // stringtoslicerune(*[32]rune, string) []rune 1567 case OSTRARRAYRUNE: 1568 a := nodnil() 1569 1570 if n.Esc == EscNone { 1571 // Create temporary buffer for slice on stack. 1572 t := types.NewArray(types.Types[TINT32], tmpstringbufsize) 1573 1574 a = nod(OADDR, temp(t), nil) 1575 } 1576 1577 n = mkcall("stringtoslicerune", n.Type, init, a, n.Left) 1578 1579 // ifaceeq(i1 any-1, i2 any-2) (ret bool); 1580 case OCMPIFACE: 1581 if !eqtype(n.Left.Type, n.Right.Type) { 1582 Fatalf("ifaceeq %v %v %v", n.Op, n.Left.Type, n.Right.Type) 1583 } 1584 var fn *Node 1585 if n.Left.Type.IsEmptyInterface() { 1586 fn = syslook("efaceeq") 1587 } else { 1588 fn = syslook("ifaceeq") 1589 } 1590 1591 n.Right = cheapexpr(n.Right, init) 1592 n.Left = cheapexpr(n.Left, init) 1593 lt := nod(OITAB, n.Left, nil) 1594 rt := nod(OITAB, n.Right, nil) 1595 ld := nod(OIDATA, n.Left, nil) 1596 rd := nod(OIDATA, n.Right, nil) 1597 ld.Type = types.Types[TUNSAFEPTR] 1598 rd.Type = types.Types[TUNSAFEPTR] 1599 ld.Typecheck = 1 1600 rd.Typecheck = 1 1601 call := mkcall1(fn, n.Type, init, lt, ld, rd) 1602 1603 // Check itable/type before full compare. 1604 // Note: short-circuited because order matters. 1605 // TODO(marvin): Fix Node.EType type union. 1606 var cmp *Node 1607 if Op(n.Etype) == OEQ { 1608 cmp = nod(OANDAND, nod(OEQ, lt, rt), call) 1609 } else { 1610 cmp = nod(OOROR, nod(ONE, lt, rt), nod(ONOT, call, nil)) 1611 } 1612 cmp = typecheck(cmp, Erv) 1613 cmp = walkexpr(cmp, init) 1614 cmp.Type = n.Type 1615 n = cmp 1616 1617 case OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT, OPTRLIT: 1618 if isStaticCompositeLiteral(n) { 1619 // n can be directly represented in the read-only data section. 1620 // Make direct reference to the static data. See issue 12841. 1621 vstat := staticname(n.Type) 1622 vstat.Name.SetReadonly(true) 1623 fixedlit(inInitFunction, initKindStatic, n, vstat, init) 1624 n = vstat 1625 n = typecheck(n, Erv) 1626 break 1627 } 1628 var_ := temp(n.Type) 1629 anylit(n, var_, init) 1630 n = var_ 1631 1632 case OSEND: 1633 n1 := n.Right 1634 n1 = assignconv(n1, n.Left.Type.Elem(), "chan send") 1635 n1 = walkexpr(n1, init) 1636 n1 = nod(OADDR, n1, nil) 1637 n = mkcall1(chanfn("chansend1", 2, n.Left.Type), nil, init, n.Left, n1) 1638 1639 case OCLOSURE: 1640 n = walkclosure(n, init) 1641 1642 case OCALLPART: 1643 n = walkpartialcall(n, init) 1644 } 1645 1646 // Expressions that are constant at run time but not 1647 // considered const by the language spec are not turned into 1648 // constants until walk. For example, if n is y%1 == 0, the 1649 // walk of y%1 may have replaced it by 0. 1650 // Check whether n with its updated args is itself now a constant. 1651 t := n.Type 1652 evconst(n) 1653 if n.Type != t { 1654 Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type) 1655 } 1656 if n.Op == OLITERAL { 1657 n = typecheck(n, Erv) 1658 // Emit string symbol now to avoid emitting 1659 // any concurrently during the backend. 1660 if s, ok := n.Val().U.(string); ok { 1661 _ = stringsym(s) 1662 } 1663 } 1664 1665 updateHasCall(n) 1666 1667 if Debug['w'] != 0 && n != nil { 1668 Dump("walk", n) 1669 } 1670 1671 lineno = lno 1672 return n 1673 } 1674 1675 // TODO(josharian): combine this with its caller and simplify 1676 func reduceSlice(n *Node) *Node { 1677 low, high, max := n.SliceBounds() 1678 if high != nil && high.Op == OLEN && samesafeexpr(n.Left, high.Left) { 1679 // Reduce x[i:len(x)] to x[i:]. 1680 high = nil 1681 } 1682 n.SetSliceBounds(low, high, max) 1683 if (n.Op == OSLICE || n.Op == OSLICESTR) && low == nil && high == nil { 1684 // Reduce x[:] to x. 1685 if Debug_slice > 0 { 1686 Warn("slice: omit slice operation") 1687 } 1688 return n.Left 1689 } 1690 return n 1691 } 1692 1693 func ascompatee1(op Op, l *Node, r *Node, init *Nodes) *Node { 1694 // convas will turn map assigns into function calls, 1695 // making it impossible for reorder3 to work. 1696 n := nod(OAS, l, r) 1697 1698 if l.Op == OINDEXMAP { 1699 return n 1700 } 1701 1702 return convas(n, init) 1703 } 1704 1705 func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node { 1706 // check assign expression list to 1707 // a expression list. called in 1708 // expr-list = expr-list 1709 1710 // ensure order of evaluation for function calls 1711 for i := range nl { 1712 nl[i] = safeexpr(nl[i], init) 1713 } 1714 for i1 := range nr { 1715 nr[i1] = safeexpr(nr[i1], init) 1716 } 1717 1718 var nn []*Node 1719 i := 0 1720 for ; i < len(nl); i++ { 1721 if i >= len(nr) { 1722 break 1723 } 1724 // Do not generate 'x = x' during return. See issue 4014. 1725 if op == ORETURN && samesafeexpr(nl[i], nr[i]) { 1726 continue 1727 } 1728 nn = append(nn, ascompatee1(op, nl[i], nr[i], init)) 1729 } 1730 1731 // cannot happen: caller checked that lists had same length 1732 if i < len(nl) || i < len(nr) { 1733 var nln, nrn Nodes 1734 nln.Set(nl) 1735 nrn.Set(nr) 1736 Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), Curfn.Func.Nname.Sym.Name) 1737 } 1738 return nn 1739 } 1740 1741 // l is an lv and rt is the type of an rv 1742 // return 1 if this implies a function call 1743 // evaluating the lv or a function call 1744 // in the conversion of the types 1745 func fncall(l *Node, rt *types.Type) bool { 1746 if l.HasCall() || l.Op == OINDEXMAP { 1747 return true 1748 } 1749 if needwritebarrier(l) { 1750 return true 1751 } 1752 if eqtype(l.Type, rt) { 1753 return false 1754 } 1755 return true 1756 } 1757 1758 // check assign type list to 1759 // a expression list. called in 1760 // expr-list = func() 1761 func ascompatet(op Op, nl Nodes, nr *types.Type) []*Node { 1762 if nl.Len() != nr.NumFields() { 1763 Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields()) 1764 } 1765 1766 var nn, mm Nodes 1767 for i, l := range nl.Slice() { 1768 if isblank(l) { 1769 continue 1770 } 1771 r := nr.Field(i) 1772 1773 // any lv that causes a fn call must be 1774 // deferred until all the return arguments 1775 // have been pulled from the output arguments 1776 if fncall(l, r.Type) { 1777 tmp := temp(r.Type) 1778 tmp = typecheck(tmp, Erv) 1779 a := nod(OAS, l, tmp) 1780 a = convas(a, &mm) 1781 mm.Append(a) 1782 l = tmp 1783 } 1784 1785 a := nod(OAS, l, nodarg(r, 0)) 1786 a = convas(a, &nn) 1787 updateHasCall(a) 1788 if a.HasCall() { 1789 Dump("ascompatet ucount", a) 1790 Fatalf("ascompatet: too many function calls evaluating parameters") 1791 } 1792 1793 nn.Append(a) 1794 } 1795 return append(nn.Slice(), mm.Slice()...) 1796 } 1797 1798 // package all the arguments that match a ... T parameter into a []T. 1799 func mkdotargslice(typ *types.Type, args []*Node, init *Nodes, ddd *Node) *Node { 1800 esc := uint16(EscUnknown) 1801 if ddd != nil { 1802 esc = ddd.Esc 1803 } 1804 1805 if len(args) == 0 { 1806 n := nodnil() 1807 n.Type = typ 1808 return n 1809 } 1810 1811 n := nod(OCOMPLIT, nil, typenod(typ)) 1812 if ddd != nil && prealloc[ddd] != nil { 1813 prealloc[n] = prealloc[ddd] // temporary to use 1814 } 1815 n.List.Set(args) 1816 n.Esc = esc 1817 n = typecheck(n, Erv) 1818 if n.Type == nil { 1819 Fatalf("mkdotargslice: typecheck failed") 1820 } 1821 n = walkexpr(n, init) 1822 return n 1823 } 1824 1825 // check assign expression list to 1826 // a type list. called in 1827 // return expr-list 1828 // func(expr-list) 1829 func ascompatte(call *Node, isddd bool, lhs *types.Type, rhs []*Node, fp int, init *Nodes) []*Node { 1830 var nn []*Node 1831 1832 // f(g()) where g has multiple return values 1833 if len(rhs) == 1 && rhs[0].Type.IsFuncArgStruct() { 1834 // optimization - can do block copy 1835 if eqtypenoname(rhs[0].Type, lhs) { 1836 nl := nodarg(lhs, fp) 1837 nr := nod(OCONVNOP, rhs[0], nil) 1838 nr.Type = nl.Type 1839 nn = []*Node{convas(nod(OAS, nl, nr), init)} 1840 goto ret 1841 } 1842 1843 // conversions involved. 1844 // copy into temporaries. 1845 var tmps []*Node 1846 for _, nr := range rhs[0].Type.FieldSlice() { 1847 tmps = append(tmps, temp(nr.Type)) 1848 } 1849 1850 a := nod(OAS2, nil, nil) 1851 a.List.Set(tmps) 1852 a.Rlist.Set(rhs) 1853 a = typecheck(a, Etop) 1854 a = walkstmt(a) 1855 init.Append(a) 1856 1857 rhs = tmps 1858 } 1859 1860 // For each parameter (LHS), assign its corresponding argument (RHS). 1861 // If there's a ... parameter (which is only valid as the final 1862 // parameter) and this is not a ... call expression, 1863 // then assign the remaining arguments as a slice. 1864 for i, nl := range lhs.FieldSlice() { 1865 var nr *Node 1866 if nl.Isddd() && !isddd { 1867 nr = mkdotargslice(nl.Type, rhs[i:], init, call.Right) 1868 } else { 1869 nr = rhs[i] 1870 } 1871 1872 a := nod(OAS, nodarg(nl, fp), nr) 1873 a = convas(a, init) 1874 nn = append(nn, a) 1875 } 1876 1877 ret: 1878 for _, n := range nn { 1879 n.Typecheck = 1 1880 } 1881 return nn 1882 } 1883 1884 // generate code for print 1885 func walkprint(nn *Node, init *Nodes) *Node { 1886 var r *Node 1887 var n *Node 1888 var on *Node 1889 var t *types.Type 1890 var et types.EType 1891 1892 op := nn.Op 1893 all := nn.List 1894 var calls []*Node 1895 notfirst := false 1896 1897 // Hoist all the argument evaluation up before the lock. 1898 walkexprlistcheap(all.Slice(), init) 1899 1900 calls = append(calls, mkcall("printlock", nil, init)) 1901 for i1, n1 := range all.Slice() { 1902 if notfirst { 1903 calls = append(calls, mkcall("printsp", nil, init)) 1904 } 1905 1906 notfirst = op == OPRINTN 1907 1908 n = n1 1909 if n.Op == OLITERAL { 1910 switch n.Val().Ctype() { 1911 case CTRUNE: 1912 n = defaultlit(n, types.Runetype) 1913 1914 case CTINT: 1915 n = defaultlit(n, types.Types[TINT64]) 1916 1917 case CTFLT: 1918 n = defaultlit(n, types.Types[TFLOAT64]) 1919 } 1920 } 1921 1922 if n.Op != OLITERAL && n.Type != nil && n.Type.Etype == TIDEAL { 1923 n = defaultlit(n, types.Types[TINT64]) 1924 } 1925 n = defaultlit(n, nil) 1926 all.SetIndex(i1, n) 1927 if n.Type == nil || n.Type.Etype == TFORW { 1928 continue 1929 } 1930 1931 t = n.Type 1932 et = n.Type.Etype 1933 if n.Type.IsInterface() { 1934 if n.Type.IsEmptyInterface() { 1935 on = syslook("printeface") 1936 } else { 1937 on = syslook("printiface") 1938 } 1939 on = substArgTypes(on, n.Type) // any-1 1940 } else if n.Type.IsPtr() || et == TCHAN || et == TMAP || et == TFUNC || et == TUNSAFEPTR { 1941 on = syslook("printpointer") 1942 on = substArgTypes(on, n.Type) // any-1 1943 } else if n.Type.IsSlice() { 1944 on = syslook("printslice") 1945 on = substArgTypes(on, n.Type) // any-1 1946 } else if isInt[et] { 1947 if et == TUINT64 { 1948 if isRuntimePkg(t.Sym.Pkg) && t.Sym.Name == "hex" { 1949 on = syslook("printhex") 1950 } else { 1951 on = syslook("printuint") 1952 } 1953 } else { 1954 on = syslook("printint") 1955 } 1956 } else if isFloat[et] { 1957 on = syslook("printfloat") 1958 } else if isComplex[et] { 1959 on = syslook("printcomplex") 1960 } else if et == TBOOL { 1961 on = syslook("printbool") 1962 } else if et == TSTRING { 1963 on = syslook("printstring") 1964 } else { 1965 badtype(OPRINT, n.Type, nil) 1966 continue 1967 } 1968 1969 t = on.Type.Params().Field(0).Type 1970 1971 if !eqtype(t, n.Type) { 1972 n = nod(OCONV, n, nil) 1973 n.Type = t 1974 } 1975 1976 r = nod(OCALL, on, nil) 1977 r.List.Append(n) 1978 calls = append(calls, r) 1979 } 1980 1981 if op == OPRINTN { 1982 calls = append(calls, mkcall("printnl", nil, nil)) 1983 } 1984 1985 calls = append(calls, mkcall("printunlock", nil, init)) 1986 1987 typecheckslice(calls, Etop) 1988 walkexprlist(calls, init) 1989 1990 r = nod(OEMPTY, nil, nil) 1991 r = typecheck(r, Etop) 1992 r = walkexpr(r, init) 1993 r.Ninit.Set(calls) 1994 return r 1995 } 1996 1997 func callnew(t *types.Type) *Node { 1998 if t.NotInHeap() { 1999 yyerror("%v is go:notinheap; heap allocation disallowed", t) 2000 } 2001 dowidth(t) 2002 fn := syslook("newobject") 2003 fn = substArgTypes(fn, t) 2004 v := mkcall1(fn, types.NewPtr(t), nil, typename(t)) 2005 v.SetNonNil(true) 2006 return v 2007 } 2008 2009 func iscallret(n *Node) bool { 2010 n = outervalue(n) 2011 return n.Op == OINDREGSP 2012 } 2013 2014 func isstack(n *Node) bool { 2015 n = outervalue(n) 2016 2017 // If n is *autotmp and autotmp = &foo, replace n with foo. 2018 // We introduce such temps when initializing struct literals. 2019 if n.Op == OIND && n.Left.Op == ONAME && n.Left.IsAutoTmp() { 2020 defn := n.Left.Name.Defn 2021 if defn != nil && defn.Op == OAS && defn.Right.Op == OADDR { 2022 n = defn.Right.Left 2023 } 2024 } 2025 2026 switch n.Op { 2027 case OINDREGSP: 2028 return true 2029 2030 case ONAME: 2031 switch n.Class { 2032 case PAUTO, PPARAM, PPARAMOUT: 2033 return true 2034 } 2035 } 2036 2037 return false 2038 } 2039 2040 // isReflectHeaderDataField reports whether l is an expression p.Data 2041 // where p has type reflect.SliceHeader or reflect.StringHeader. 2042 func isReflectHeaderDataField(l *Node) bool { 2043 if l.Type != types.Types[TUINTPTR] { 2044 return false 2045 } 2046 2047 var tsym *types.Sym 2048 switch l.Op { 2049 case ODOT: 2050 tsym = l.Left.Type.Sym 2051 case ODOTPTR: 2052 tsym = l.Left.Type.Elem().Sym 2053 default: 2054 return false 2055 } 2056 2057 if tsym == nil || l.Sym.Name != "Data" || tsym.Pkg.Path != "reflect" { 2058 return false 2059 } 2060 return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader" 2061 } 2062 2063 // Do we need a write barrier for assigning to l? 2064 func needwritebarrier(l *Node) bool { 2065 if !use_writebarrier { 2066 return false 2067 } 2068 2069 if l == nil || isblank(l) { 2070 return false 2071 } 2072 2073 // No write barrier for write to stack. 2074 if isstack(l) { 2075 return false 2076 } 2077 2078 // Package unsafe's documentation says storing pointers into 2079 // reflect.SliceHeader and reflect.StringHeader's Data fields 2080 // is valid, even though they have type uintptr (#19168). 2081 if isReflectHeaderDataField(l) { 2082 return true 2083 } 2084 2085 // No write barrier for write of non-pointers. 2086 dowidth(l.Type) 2087 if !types.Haspointers(l.Type) { 2088 return false 2089 } 2090 2091 // No write barrier if this is a pointer to a go:notinheap 2092 // type, since the write barrier's inheap(ptr) check will fail. 2093 if l.Type.IsPtr() && l.Type.Elem().NotInHeap() { 2094 return false 2095 } 2096 2097 // TODO: We can eliminate write barriers if we know *both* the 2098 // current and new content of the slot must already be shaded. 2099 // We know a pointer is shaded if it's nil, or points to 2100 // static data, a global (variable or function), or the stack. 2101 // The nil optimization could be particularly useful for 2102 // writes to just-allocated objects. Unfortunately, knowing 2103 // the "current" value of the slot requires flow analysis. 2104 2105 // Otherwise, be conservative and use write barrier. 2106 return true 2107 } 2108 2109 func convas(n *Node, init *Nodes) *Node { 2110 if n.Op != OAS { 2111 Fatalf("convas: not OAS %v", n.Op) 2112 } 2113 2114 n.Typecheck = 1 2115 2116 var lt *types.Type 2117 var rt *types.Type 2118 if n.Left == nil || n.Right == nil { 2119 goto out 2120 } 2121 2122 lt = n.Left.Type 2123 rt = n.Right.Type 2124 if lt == nil || rt == nil { 2125 goto out 2126 } 2127 2128 if isblank(n.Left) { 2129 n.Right = defaultlit(n.Right, nil) 2130 goto out 2131 } 2132 2133 if !eqtype(lt, rt) { 2134 n.Right = assignconv(n.Right, lt, "assignment") 2135 n.Right = walkexpr(n.Right, init) 2136 } 2137 dowidth(n.Right.Type) 2138 2139 out: 2140 updateHasCall(n) 2141 return n 2142 } 2143 2144 // from ascompat[te] 2145 // evaluating actual function arguments. 2146 // f(a,b) 2147 // if there is exactly one function expr, 2148 // then it is done first. otherwise must 2149 // make temp variables 2150 func reorder1(all []*Node) []*Node { 2151 c := 0 // function calls 2152 t := 0 // total parameters 2153 2154 for _, n := range all { 2155 t++ 2156 updateHasCall(n) 2157 if n.HasCall() { 2158 c++ 2159 } 2160 } 2161 2162 if c == 0 || t == 1 { 2163 return all 2164 } 2165 2166 var g []*Node // fncalls assigned to tempnames 2167 var f *Node // last fncall assigned to stack 2168 var r []*Node // non fncalls and tempnames assigned to stack 2169 d := 0 2170 var a *Node 2171 for _, n := range all { 2172 if !n.HasCall() { 2173 r = append(r, n) 2174 continue 2175 } 2176 2177 d++ 2178 if d == c { 2179 f = n 2180 continue 2181 } 2182 2183 // make assignment of fncall to tempname 2184 a = temp(n.Right.Type) 2185 2186 a = nod(OAS, a, n.Right) 2187 g = append(g, a) 2188 2189 // put normal arg assignment on list 2190 // with fncall replaced by tempname 2191 n.Right = a.Left 2192 2193 r = append(r, n) 2194 } 2195 2196 if f != nil { 2197 g = append(g, f) 2198 } 2199 return append(g, r...) 2200 } 2201 2202 // from ascompat[ee] 2203 // a,b = c,d 2204 // simultaneous assignment. there cannot 2205 // be later use of an earlier lvalue. 2206 // 2207 // function calls have been removed. 2208 func reorder3(all []*Node) []*Node { 2209 var l *Node 2210 2211 // If a needed expression may be affected by an 2212 // earlier assignment, make an early copy of that 2213 // expression and use the copy instead. 2214 var early []*Node 2215 2216 var mapinit Nodes 2217 for i, n := range all { 2218 l = n.Left 2219 2220 // Save subexpressions needed on left side. 2221 // Drill through non-dereferences. 2222 for { 2223 if l.Op == ODOT || l.Op == OPAREN { 2224 l = l.Left 2225 continue 2226 } 2227 2228 if l.Op == OINDEX && l.Left.Type.IsArray() { 2229 l.Right = reorder3save(l.Right, all, i, &early) 2230 l = l.Left 2231 continue 2232 } 2233 2234 break 2235 } 2236 2237 switch l.Op { 2238 default: 2239 Fatalf("reorder3 unexpected lvalue %#v", l.Op) 2240 2241 case ONAME: 2242 break 2243 2244 case OINDEX, OINDEXMAP: 2245 l.Left = reorder3save(l.Left, all, i, &early) 2246 l.Right = reorder3save(l.Right, all, i, &early) 2247 if l.Op == OINDEXMAP { 2248 all[i] = convas(all[i], &mapinit) 2249 } 2250 2251 case OIND, ODOTPTR: 2252 l.Left = reorder3save(l.Left, all, i, &early) 2253 } 2254 2255 // Save expression on right side. 2256 all[i].Right = reorder3save(all[i].Right, all, i, &early) 2257 } 2258 2259 early = append(mapinit.Slice(), early...) 2260 return append(early, all...) 2261 } 2262 2263 // if the evaluation of *np would be affected by the 2264 // assignments in all up to but not including the ith assignment, 2265 // copy into a temporary during *early and 2266 // replace *np with that temp. 2267 // The result of reorder3save MUST be assigned back to n, e.g. 2268 // n.Left = reorder3save(n.Left, all, i, early) 2269 func reorder3save(n *Node, all []*Node, i int, early *[]*Node) *Node { 2270 if !aliased(n, all, i) { 2271 return n 2272 } 2273 2274 q := temp(n.Type) 2275 q = nod(OAS, q, n) 2276 q = typecheck(q, Etop) 2277 *early = append(*early, q) 2278 return q.Left 2279 } 2280 2281 // what's the outer value that a write to n affects? 2282 // outer value means containing struct or array. 2283 func outervalue(n *Node) *Node { 2284 for { 2285 if n.Op == OXDOT { 2286 Fatalf("OXDOT in walk") 2287 } 2288 if n.Op == ODOT || n.Op == OPAREN || n.Op == OCONVNOP { 2289 n = n.Left 2290 continue 2291 } 2292 2293 if n.Op == OINDEX && n.Left.Type != nil && n.Left.Type.IsArray() { 2294 n = n.Left 2295 continue 2296 } 2297 2298 break 2299 } 2300 2301 return n 2302 } 2303 2304 // Is it possible that the computation of n might be 2305 // affected by writes in as up to but not including the ith element? 2306 func aliased(n *Node, all []*Node, i int) bool { 2307 if n == nil { 2308 return false 2309 } 2310 2311 // Treat all fields of a struct as referring to the whole struct. 2312 // We could do better but we would have to keep track of the fields. 2313 for n.Op == ODOT { 2314 n = n.Left 2315 } 2316 2317 // Look for obvious aliasing: a variable being assigned 2318 // during the all list and appearing in n. 2319 // Also record whether there are any writes to main memory. 2320 // Also record whether there are any writes to variables 2321 // whose addresses have been taken. 2322 memwrite := 0 2323 2324 varwrite := 0 2325 var a *Node 2326 for _, an := range all[:i] { 2327 a = outervalue(an.Left) 2328 2329 for a.Op == ODOT { 2330 a = a.Left 2331 } 2332 2333 if a.Op != ONAME { 2334 memwrite = 1 2335 continue 2336 } 2337 2338 switch n.Class { 2339 default: 2340 varwrite = 1 2341 continue 2342 2343 case PAUTO, PPARAM, PPARAMOUT: 2344 if n.Addrtaken() { 2345 varwrite = 1 2346 continue 2347 } 2348 2349 if vmatch2(a, n) { 2350 // Direct hit. 2351 return true 2352 } 2353 } 2354 } 2355 2356 // The variables being written do not appear in n. 2357 // However, n might refer to computed addresses 2358 // that are being written. 2359 2360 // If no computed addresses are affected by the writes, no aliasing. 2361 if memwrite == 0 && varwrite == 0 { 2362 return false 2363 } 2364 2365 // If n does not refer to computed addresses 2366 // (that is, if n only refers to variables whose addresses 2367 // have not been taken), no aliasing. 2368 if varexpr(n) { 2369 return false 2370 } 2371 2372 // Otherwise, both the writes and n refer to computed memory addresses. 2373 // Assume that they might conflict. 2374 return true 2375 } 2376 2377 // does the evaluation of n only refer to variables 2378 // whose addresses have not been taken? 2379 // (and no other memory) 2380 func varexpr(n *Node) bool { 2381 if n == nil { 2382 return true 2383 } 2384 2385 switch n.Op { 2386 case OLITERAL: 2387 return true 2388 2389 case ONAME: 2390 switch n.Class { 2391 case PAUTO, PPARAM, PPARAMOUT: 2392 if !n.Addrtaken() { 2393 return true 2394 } 2395 } 2396 2397 return false 2398 2399 case OADD, 2400 OSUB, 2401 OOR, 2402 OXOR, 2403 OMUL, 2404 ODIV, 2405 OMOD, 2406 OLSH, 2407 ORSH, 2408 OAND, 2409 OANDNOT, 2410 OPLUS, 2411 OMINUS, 2412 OCOM, 2413 OPAREN, 2414 OANDAND, 2415 OOROR, 2416 OCONV, 2417 OCONVNOP, 2418 OCONVIFACE, 2419 ODOTTYPE: 2420 return varexpr(n.Left) && varexpr(n.Right) 2421 2422 case ODOT: // but not ODOTPTR 2423 // Should have been handled in aliased. 2424 Fatalf("varexpr unexpected ODOT") 2425 } 2426 2427 // Be conservative. 2428 return false 2429 } 2430 2431 // is the name l mentioned in r? 2432 func vmatch2(l *Node, r *Node) bool { 2433 if r == nil { 2434 return false 2435 } 2436 switch r.Op { 2437 // match each right given left 2438 case ONAME: 2439 return l == r 2440 2441 case OLITERAL: 2442 return false 2443 } 2444 2445 if vmatch2(l, r.Left) { 2446 return true 2447 } 2448 if vmatch2(l, r.Right) { 2449 return true 2450 } 2451 for _, n := range r.List.Slice() { 2452 if vmatch2(l, n) { 2453 return true 2454 } 2455 } 2456 return false 2457 } 2458 2459 // is any name mentioned in l also mentioned in r? 2460 // called by sinit.go 2461 func vmatch1(l *Node, r *Node) bool { 2462 // isolate all left sides 2463 if l == nil || r == nil { 2464 return false 2465 } 2466 switch l.Op { 2467 case ONAME: 2468 switch l.Class { 2469 case PPARAM, PAUTO: 2470 break 2471 2472 default: 2473 // assignment to non-stack variable must be 2474 // delayed if right has function calls. 2475 if r.HasCall() { 2476 return true 2477 } 2478 } 2479 2480 return vmatch2(l, r) 2481 2482 case OLITERAL: 2483 return false 2484 } 2485 2486 if vmatch1(l.Left, r) { 2487 return true 2488 } 2489 if vmatch1(l.Right, r) { 2490 return true 2491 } 2492 for _, n := range l.List.Slice() { 2493 if vmatch1(n, r) { 2494 return true 2495 } 2496 } 2497 return false 2498 } 2499 2500 // paramstoheap returns code to allocate memory for heap-escaped parameters 2501 // and to copy non-result prameters' values from the stack. 2502 func paramstoheap(params *types.Type) []*Node { 2503 var nn []*Node 2504 for _, t := range params.Fields().Slice() { 2505 v := asNode(t.Nname) 2506 if v != nil && v.Sym != nil && strings.HasPrefix(v.Sym.Name, "~r") { // unnamed result 2507 v = nil 2508 } 2509 if v == nil { 2510 continue 2511 } 2512 2513 if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil { 2514 nn = append(nn, walkstmt(nod(ODCL, v, nil))) 2515 if stackcopy.Class == PPARAM { 2516 nn = append(nn, walkstmt(typecheck(nod(OAS, v, stackcopy), Etop))) 2517 } 2518 } 2519 } 2520 2521 return nn 2522 } 2523 2524 // zeroResults zeros the return values at the start of the function. 2525 // We need to do this very early in the function. Defer might stop a 2526 // panic and show the return values as they exist at the time of 2527 // panic. For precise stacks, the garbage collector assumes results 2528 // are always live, so we need to zero them before any allocations, 2529 // even allocations to move params/results to the heap. 2530 // The generated code is added to Curfn's Enter list. 2531 func zeroResults() { 2532 lno := lineno 2533 lineno = Curfn.Pos 2534 for _, f := range Curfn.Type.Results().Fields().Slice() { 2535 if v := asNode(f.Nname); v != nil && v.Name.Param.Heapaddr != nil { 2536 // The local which points to the return value is the 2537 // thing that needs zeroing. This is already handled 2538 // by a Needzero annotation in plive.go:livenessepilogue. 2539 continue 2540 } 2541 // Zero the stack location containing f. 2542 Curfn.Func.Enter.Append(nod(OAS, nodarg(f, 1), nil)) 2543 } 2544 lineno = lno 2545 } 2546 2547 // returnsfromheap returns code to copy values for heap-escaped parameters 2548 // back to the stack. 2549 func returnsfromheap(params *types.Type) []*Node { 2550 var nn []*Node 2551 for _, t := range params.Fields().Slice() { 2552 v := asNode(t.Nname) 2553 if v == nil { 2554 continue 2555 } 2556 if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil && stackcopy.Class == PPARAMOUT { 2557 nn = append(nn, walkstmt(typecheck(nod(OAS, stackcopy, v), Etop))) 2558 } 2559 } 2560 2561 return nn 2562 } 2563 2564 // heapmoves generates code to handle migrating heap-escaped parameters 2565 // between the stack and the heap. The generated code is added to Curfn's 2566 // Enter and Exit lists. 2567 func heapmoves() { 2568 lno := lineno 2569 lineno = Curfn.Pos 2570 nn := paramstoheap(Curfn.Type.Recvs()) 2571 nn = append(nn, paramstoheap(Curfn.Type.Params())...) 2572 nn = append(nn, paramstoheap(Curfn.Type.Results())...) 2573 Curfn.Func.Enter.Append(nn...) 2574 lineno = Curfn.Func.Endlineno 2575 Curfn.Func.Exit.Append(returnsfromheap(Curfn.Type.Results())...) 2576 lineno = lno 2577 } 2578 2579 func vmkcall(fn *Node, t *types.Type, init *Nodes, va []*Node) *Node { 2580 if fn.Type == nil || fn.Type.Etype != TFUNC { 2581 Fatalf("mkcall %v %v", fn, fn.Type) 2582 } 2583 2584 n := fn.Type.Params().NumFields() 2585 2586 r := nod(OCALL, fn, nil) 2587 r.List.Set(va[:n]) 2588 if fn.Type.Results().NumFields() > 0 { 2589 r = typecheck(r, Erv|Efnstruct) 2590 } else { 2591 r = typecheck(r, Etop) 2592 } 2593 r = walkexpr(r, init) 2594 r.Type = t 2595 return r 2596 } 2597 2598 func mkcall(name string, t *types.Type, init *Nodes, args ...*Node) *Node { 2599 return vmkcall(syslook(name), t, init, args) 2600 } 2601 2602 func mkcall1(fn *Node, t *types.Type, init *Nodes, args ...*Node) *Node { 2603 return vmkcall(fn, t, init, args) 2604 } 2605 2606 func conv(n *Node, t *types.Type) *Node { 2607 if eqtype(n.Type, t) { 2608 return n 2609 } 2610 n = nod(OCONV, n, nil) 2611 n.Type = t 2612 n = typecheck(n, Erv) 2613 return n 2614 } 2615 2616 // byteindex converts n, which is byte-sized, to a uint8. 2617 // We cannot use conv, because we allow converting bool to uint8 here, 2618 // which is forbidden in user code. 2619 func byteindex(n *Node) *Node { 2620 if eqtype(n.Type, types.Types[TUINT8]) { 2621 return n 2622 } 2623 n = nod(OCONV, n, nil) 2624 n.Type = types.Types[TUINT8] 2625 n.Typecheck = 1 2626 return n 2627 } 2628 2629 func chanfn(name string, n int, t *types.Type) *Node { 2630 if !t.IsChan() { 2631 Fatalf("chanfn %v", t) 2632 } 2633 fn := syslook(name) 2634 switch n { 2635 default: 2636 Fatalf("chanfn %d", n) 2637 case 1: 2638 fn = substArgTypes(fn, t.Elem()) 2639 case 2: 2640 fn = substArgTypes(fn, t.Elem(), t.Elem()) 2641 } 2642 return fn 2643 } 2644 2645 func mapfn(name string, t *types.Type) *Node { 2646 if !t.IsMap() { 2647 Fatalf("mapfn %v", t) 2648 } 2649 fn := syslook(name) 2650 fn = substArgTypes(fn, t.Key(), t.Val(), t.Key(), t.Val()) 2651 return fn 2652 } 2653 2654 func mapfndel(name string, t *types.Type) *Node { 2655 if !t.IsMap() { 2656 Fatalf("mapfn %v", t) 2657 } 2658 fn := syslook(name) 2659 fn = substArgTypes(fn, t.Key(), t.Val(), t.Key()) 2660 return fn 2661 } 2662 2663 const ( 2664 mapslow = iota 2665 mapfast32 2666 mapfast64 2667 mapfaststr 2668 nmapfast 2669 ) 2670 2671 type mapnames [nmapfast]string 2672 2673 func mkmapnames(base string) mapnames { 2674 return mapnames{base, base + "_fast32", base + "_fast64", base + "_faststr"} 2675 } 2676 2677 var mapaccess1 mapnames = mkmapnames("mapaccess1") 2678 var mapaccess2 mapnames = mkmapnames("mapaccess2") 2679 var mapassign mapnames = mkmapnames("mapassign") 2680 var mapdelete mapnames = mkmapnames("mapdelete") 2681 2682 func mapfast(t *types.Type) int { 2683 // Check ../../runtime/hashmap.go:maxValueSize before changing. 2684 if t.Val().Width > 128 { 2685 return mapslow 2686 } 2687 switch algtype(t.Key()) { 2688 case AMEM32: 2689 return mapfast32 2690 case AMEM64: 2691 return mapfast64 2692 case ASTRING: 2693 return mapfaststr 2694 } 2695 return mapslow 2696 } 2697 2698 func writebarrierfn(name string, l *types.Type, r *types.Type) *Node { 2699 fn := syslook(name) 2700 fn = substArgTypes(fn, l, r) 2701 return fn 2702 } 2703 2704 func addstr(n *Node, init *Nodes) *Node { 2705 // orderexpr rewrote OADDSTR to have a list of strings. 2706 c := n.List.Len() 2707 2708 if c < 2 { 2709 Fatalf("addstr count %d too small", c) 2710 } 2711 2712 buf := nodnil() 2713 if n.Esc == EscNone { 2714 sz := int64(0) 2715 for _, n1 := range n.List.Slice() { 2716 if n1.Op == OLITERAL { 2717 sz += int64(len(n1.Val().U.(string))) 2718 } 2719 } 2720 2721 // Don't allocate the buffer if the result won't fit. 2722 if sz < tmpstringbufsize { 2723 // Create temporary buffer for result string on stack. 2724 t := types.NewArray(types.Types[TUINT8], tmpstringbufsize) 2725 2726 buf = nod(OADDR, temp(t), nil) 2727 } 2728 } 2729 2730 // build list of string arguments 2731 args := []*Node{buf} 2732 for _, n2 := range n.List.Slice() { 2733 args = append(args, conv(n2, types.Types[TSTRING])) 2734 } 2735 2736 var fn string 2737 if c <= 5 { 2738 // small numbers of strings use direct runtime helpers. 2739 // note: orderexpr knows this cutoff too. 2740 fn = fmt.Sprintf("concatstring%d", c) 2741 } else { 2742 // large numbers of strings are passed to the runtime as a slice. 2743 fn = "concatstrings" 2744 2745 t := types.NewSlice(types.Types[TSTRING]) 2746 slice := nod(OCOMPLIT, nil, typenod(t)) 2747 if prealloc[n] != nil { 2748 prealloc[slice] = prealloc[n] 2749 } 2750 slice.List.Set(args[1:]) // skip buf arg 2751 args = []*Node{buf, slice} 2752 slice.Esc = EscNone 2753 } 2754 2755 cat := syslook(fn) 2756 r := nod(OCALL, cat, nil) 2757 r.List.Set(args) 2758 r = typecheck(r, Erv) 2759 r = walkexpr(r, init) 2760 r.Type = n.Type 2761 2762 return r 2763 } 2764 2765 // expand append(l1, l2...) to 2766 // init { 2767 // s := l1 2768 // n := len(s) + len(l2) 2769 // // Compare as uint so growslice can panic on overflow. 2770 // if uint(n) > uint(cap(s)) { 2771 // s = growslice(s, n) 2772 // } 2773 // s = s[:n] 2774 // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) 2775 // } 2776 // s 2777 // 2778 // l2 is allowed to be a string. 2779 func appendslice(n *Node, init *Nodes) *Node { 2780 walkexprlistsafe(n.List.Slice(), init) 2781 2782 // walkexprlistsafe will leave OINDEX (s[n]) alone if both s 2783 // and n are name or literal, but those may index the slice we're 2784 // modifying here. Fix explicitly. 2785 ls := n.List.Slice() 2786 for i1, n1 := range ls { 2787 ls[i1] = cheapexpr(n1, init) 2788 } 2789 2790 l1 := n.List.First() 2791 l2 := n.List.Second() 2792 2793 var l []*Node 2794 2795 // var s []T 2796 s := temp(l1.Type) 2797 l = append(l, nod(OAS, s, l1)) // s = l1 2798 2799 // n := len(s) + len(l2) 2800 nn := temp(types.Types[TINT]) 2801 l = append(l, nod(OAS, nn, nod(OADD, nod(OLEN, s, nil), nod(OLEN, l2, nil)))) 2802 2803 // if uint(n) > uint(cap(s)) 2804 nif := nod(OIF, nil, nil) 2805 nif.Left = nod(OGT, nod(OCONV, nn, nil), nod(OCONV, nod(OCAP, s, nil), nil)) 2806 nif.Left.Left.Type = types.Types[TUINT] 2807 nif.Left.Right.Type = types.Types[TUINT] 2808 2809 // instantiate growslice(Type*, []any, int) []any 2810 fn := syslook("growslice") 2811 fn = substArgTypes(fn, s.Type.Elem(), s.Type.Elem()) 2812 2813 // s = growslice(T, s, n) 2814 nif.Nbody.Set1(nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(s.Type.Elem()), s, nn))) 2815 l = append(l, nif) 2816 2817 // s = s[:n] 2818 nt := nod(OSLICE, s, nil) 2819 nt.SetSliceBounds(nil, nn, nil) 2820 nt.Etype = 1 2821 l = append(l, nod(OAS, s, nt)) 2822 2823 if types.Haspointers(l1.Type.Elem()) { 2824 // copy(s[len(l1):], l2) 2825 nptr1 := nod(OSLICE, s, nil) 2826 nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil) 2827 nptr1.Etype = 1 2828 nptr2 := l2 2829 fn := syslook("typedslicecopy") 2830 fn = substArgTypes(fn, l1.Type, l2.Type) 2831 var ln Nodes 2832 ln.Set(l) 2833 nt := mkcall1(fn, types.Types[TINT], &ln, typename(l1.Type.Elem()), nptr1, nptr2) 2834 l = append(ln.Slice(), nt) 2835 } else if instrumenting && !compiling_runtime { 2836 // rely on runtime to instrument copy. 2837 // copy(s[len(l1):], l2) 2838 nptr1 := nod(OSLICE, s, nil) 2839 nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil) 2840 nptr1.Etype = 1 2841 nptr2 := l2 2842 var fn *Node 2843 if l2.Type.IsString() { 2844 fn = syslook("slicestringcopy") 2845 } else { 2846 fn = syslook("slicecopy") 2847 } 2848 fn = substArgTypes(fn, l1.Type, l2.Type) 2849 var ln Nodes 2850 ln.Set(l) 2851 nt := mkcall1(fn, types.Types[TINT], &ln, nptr1, nptr2, nodintconst(s.Type.Elem().Width)) 2852 l = append(ln.Slice(), nt) 2853 } else { 2854 // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) 2855 nptr1 := nod(OINDEX, s, nod(OLEN, l1, nil)) 2856 nptr1.SetBounded(true) 2857 2858 nptr1 = nod(OADDR, nptr1, nil) 2859 2860 nptr2 := nod(OSPTR, l2, nil) 2861 2862 fn := syslook("memmove") 2863 fn = substArgTypes(fn, s.Type.Elem(), s.Type.Elem()) 2864 2865 var ln Nodes 2866 ln.Set(l) 2867 nwid := cheapexpr(conv(nod(OLEN, l2, nil), types.Types[TUINTPTR]), &ln) 2868 2869 nwid = nod(OMUL, nwid, nodintconst(s.Type.Elem().Width)) 2870 nt := mkcall1(fn, nil, &ln, nptr1, nptr2, nwid) 2871 l = append(ln.Slice(), nt) 2872 } 2873 2874 typecheckslice(l, Etop) 2875 walkstmtlist(l) 2876 init.Append(l...) 2877 return s 2878 } 2879 2880 // Rewrite append(src, x, y, z) so that any side effects in 2881 // x, y, z (including runtime panics) are evaluated in 2882 // initialization statements before the append. 2883 // For normal code generation, stop there and leave the 2884 // rest to cgen_append. 2885 // 2886 // For race detector, expand append(src, a [, b]* ) to 2887 // 2888 // init { 2889 // s := src 2890 // const argc = len(args) - 1 2891 // if cap(s) - len(s) < argc { 2892 // s = growslice(s, len(s)+argc) 2893 // } 2894 // n := len(s) 2895 // s = s[:n+argc] 2896 // s[n] = a 2897 // s[n+1] = b 2898 // ... 2899 // } 2900 // s 2901 func walkappend(n *Node, init *Nodes, dst *Node) *Node { 2902 if !samesafeexpr(dst, n.List.First()) { 2903 n.List.SetFirst(safeexpr(n.List.First(), init)) 2904 n.List.SetFirst(walkexpr(n.List.First(), init)) 2905 } 2906 walkexprlistsafe(n.List.Slice()[1:], init) 2907 2908 // walkexprlistsafe will leave OINDEX (s[n]) alone if both s 2909 // and n are name or literal, but those may index the slice we're 2910 // modifying here. Fix explicitly. 2911 // Using cheapexpr also makes sure that the evaluation 2912 // of all arguments (and especially any panics) happen 2913 // before we begin to modify the slice in a visible way. 2914 ls := n.List.Slice()[1:] 2915 for i, n := range ls { 2916 ls[i] = cheapexpr(n, init) 2917 } 2918 2919 nsrc := n.List.First() 2920 2921 argc := n.List.Len() - 1 2922 if argc < 1 { 2923 return nsrc 2924 } 2925 2926 // General case, with no function calls left as arguments. 2927 // Leave for gen, except that instrumentation requires old form. 2928 if !instrumenting || compiling_runtime { 2929 return n 2930 } 2931 2932 var l []*Node 2933 2934 ns := temp(nsrc.Type) 2935 l = append(l, nod(OAS, ns, nsrc)) // s = src 2936 2937 na := nodintconst(int64(argc)) // const argc 2938 nx := nod(OIF, nil, nil) // if cap(s) - len(s) < argc 2939 nx.Left = nod(OLT, nod(OSUB, nod(OCAP, ns, nil), nod(OLEN, ns, nil)), na) 2940 2941 fn := syslook("growslice") // growslice(<type>, old []T, mincap int) (ret []T) 2942 fn = substArgTypes(fn, ns.Type.Elem(), ns.Type.Elem()) 2943 2944 nx.Nbody.Set1(nod(OAS, ns, 2945 mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type.Elem()), ns, 2946 nod(OADD, nod(OLEN, ns, nil), na)))) 2947 2948 l = append(l, nx) 2949 2950 nn := temp(types.Types[TINT]) 2951 l = append(l, nod(OAS, nn, nod(OLEN, ns, nil))) // n = len(s) 2952 2953 nx = nod(OSLICE, ns, nil) // ...s[:n+argc] 2954 nx.SetSliceBounds(nil, nod(OADD, nn, na), nil) 2955 nx.Etype = 1 2956 l = append(l, nod(OAS, ns, nx)) // s = s[:n+argc] 2957 2958 ls = n.List.Slice()[1:] 2959 for i, n := range ls { 2960 nx = nod(OINDEX, ns, nn) // s[n] ... 2961 nx.SetBounded(true) 2962 l = append(l, nod(OAS, nx, n)) // s[n] = arg 2963 if i+1 < len(ls) { 2964 l = append(l, nod(OAS, nn, nod(OADD, nn, nodintconst(1)))) // n = n + 1 2965 } 2966 } 2967 2968 typecheckslice(l, Etop) 2969 walkstmtlist(l) 2970 init.Append(l...) 2971 return ns 2972 } 2973 2974 // Lower copy(a, b) to a memmove call or a runtime call. 2975 // 2976 // init { 2977 // n := len(a) 2978 // if n > len(b) { n = len(b) } 2979 // memmove(a.ptr, b.ptr, n*sizeof(elem(a))) 2980 // } 2981 // n; 2982 // 2983 // Also works if b is a string. 2984 // 2985 func copyany(n *Node, init *Nodes, runtimecall bool) *Node { 2986 if types.Haspointers(n.Left.Type.Elem()) { 2987 fn := writebarrierfn("typedslicecopy", n.Left.Type, n.Right.Type) 2988 return mkcall1(fn, n.Type, init, typename(n.Left.Type.Elem()), n.Left, n.Right) 2989 } 2990 2991 if runtimecall { 2992 var fn *Node 2993 if n.Right.Type.IsString() { 2994 fn = syslook("slicestringcopy") 2995 } else { 2996 fn = syslook("slicecopy") 2997 } 2998 fn = substArgTypes(fn, n.Left.Type, n.Right.Type) 2999 return mkcall1(fn, n.Type, init, n.Left, n.Right, nodintconst(n.Left.Type.Elem().Width)) 3000 } 3001 3002 n.Left = walkexpr(n.Left, init) 3003 n.Right = walkexpr(n.Right, init) 3004 nl := temp(n.Left.Type) 3005 nr := temp(n.Right.Type) 3006 var l []*Node 3007 l = append(l, nod(OAS, nl, n.Left)) 3008 l = append(l, nod(OAS, nr, n.Right)) 3009 3010 nfrm := nod(OSPTR, nr, nil) 3011 nto := nod(OSPTR, nl, nil) 3012 3013 nlen := temp(types.Types[TINT]) 3014 3015 // n = len(to) 3016 l = append(l, nod(OAS, nlen, nod(OLEN, nl, nil))) 3017 3018 // if n > len(frm) { n = len(frm) } 3019 nif := nod(OIF, nil, nil) 3020 3021 nif.Left = nod(OGT, nlen, nod(OLEN, nr, nil)) 3022 nif.Nbody.Append(nod(OAS, nlen, nod(OLEN, nr, nil))) 3023 l = append(l, nif) 3024 3025 // Call memmove. 3026 fn := syslook("memmove") 3027 3028 fn = substArgTypes(fn, nl.Type.Elem(), nl.Type.Elem()) 3029 nwid := temp(types.Types[TUINTPTR]) 3030 l = append(l, nod(OAS, nwid, conv(nlen, types.Types[TUINTPTR]))) 3031 nwid = nod(OMUL, nwid, nodintconst(nl.Type.Elem().Width)) 3032 l = append(l, mkcall1(fn, nil, init, nto, nfrm, nwid)) 3033 3034 typecheckslice(l, Etop) 3035 walkstmtlist(l) 3036 init.Append(l...) 3037 return nlen 3038 } 3039 3040 func eqfor(t *types.Type, needsize *int) *Node { 3041 // Should only arrive here with large memory or 3042 // a struct/array containing a non-memory field/element. 3043 // Small memory is handled inline, and single non-memory 3044 // is handled during type check (OCMPSTR etc). 3045 switch a, _ := algtype1(t); a { 3046 case AMEM: 3047 n := syslook("memequal") 3048 n = substArgTypes(n, t, t) 3049 *needsize = 1 3050 return n 3051 case ASPECIAL: 3052 sym := typesymprefix(".eq", t) 3053 n := newname(sym) 3054 n.Class = PFUNC 3055 ntype := nod(OTFUNC, nil, nil) 3056 ntype.List.Append(anonfield(types.NewPtr(t))) 3057 ntype.List.Append(anonfield(types.NewPtr(t))) 3058 ntype.Rlist.Append(anonfield(types.Types[TBOOL])) 3059 ntype = typecheck(ntype, Etype) 3060 n.Type = ntype.Type 3061 *needsize = 0 3062 return n 3063 } 3064 Fatalf("eqfor %v", t) 3065 return nil 3066 } 3067 3068 // The result of walkcompare MUST be assigned back to n, e.g. 3069 // n.Left = walkcompare(n.Left, init) 3070 func walkcompare(n *Node, init *Nodes) *Node { 3071 // Given interface value l and concrete value r, rewrite 3072 // l == r 3073 // into types-equal && data-equal. 3074 // This is efficient, avoids allocations, and avoids runtime calls. 3075 var l, r *Node 3076 if n.Left.Type.IsInterface() && !n.Right.Type.IsInterface() { 3077 l = n.Left 3078 r = n.Right 3079 } else if !n.Left.Type.IsInterface() && n.Right.Type.IsInterface() { 3080 l = n.Right 3081 r = n.Left 3082 } 3083 3084 if l != nil { 3085 // Handle both == and !=. 3086 eq := n.Op 3087 var andor Op 3088 if eq == OEQ { 3089 andor = OANDAND 3090 } else { 3091 andor = OOROR 3092 } 3093 // Check for types equal. 3094 // For empty interface, this is: 3095 // l.tab == type(r) 3096 // For non-empty interface, this is: 3097 // l.tab != nil && l.tab._type == type(r) 3098 var eqtype *Node 3099 tab := nod(OITAB, l, nil) 3100 rtyp := typename(r.Type) 3101 if l.Type.IsEmptyInterface() { 3102 tab.Type = types.NewPtr(types.Types[TUINT8]) 3103 tab.Typecheck = 1 3104 eqtype = nod(eq, tab, rtyp) 3105 } else { 3106 nonnil := nod(brcom(eq), nodnil(), tab) 3107 match := nod(eq, itabType(tab), rtyp) 3108 eqtype = nod(andor, nonnil, match) 3109 } 3110 // Check for data equal. 3111 eqdata := nod(eq, ifaceData(l, r.Type), r) 3112 // Put it all together. 3113 expr := nod(andor, eqtype, eqdata) 3114 n = finishcompare(n, expr, init) 3115 return n 3116 } 3117 3118 // Must be comparison of array or struct. 3119 // Otherwise back end handles it. 3120 // While we're here, decide whether to 3121 // inline or call an eq alg. 3122 t := n.Left.Type 3123 var inline bool 3124 switch t.Etype { 3125 default: 3126 return n 3127 case TARRAY: 3128 inline = t.NumElem() <= 1 || (t.NumElem() <= 4 && issimple[t.Elem().Etype]) 3129 case TSTRUCT: 3130 inline = t.NumFields() <= 4 3131 } 3132 3133 cmpl := n.Left 3134 for cmpl != nil && cmpl.Op == OCONVNOP { 3135 cmpl = cmpl.Left 3136 } 3137 cmpr := n.Right 3138 for cmpr != nil && cmpr.Op == OCONVNOP { 3139 cmpr = cmpr.Left 3140 } 3141 3142 // Chose not to inline. Call equality function directly. 3143 if !inline { 3144 if isvaluelit(cmpl) { 3145 var_ := temp(cmpl.Type) 3146 anylit(cmpl, var_, init) 3147 cmpl = var_ 3148 } 3149 if isvaluelit(cmpr) { 3150 var_ := temp(cmpr.Type) 3151 anylit(cmpr, var_, init) 3152 cmpr = var_ 3153 } 3154 if !islvalue(cmpl) || !islvalue(cmpr) { 3155 Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr) 3156 } 3157 3158 // eq algs take pointers 3159 pl := temp(types.NewPtr(t)) 3160 al := nod(OAS, pl, nod(OADDR, cmpl, nil)) 3161 al.Right.Etype = 1 // addr does not escape 3162 al = typecheck(al, Etop) 3163 init.Append(al) 3164 3165 pr := temp(types.NewPtr(t)) 3166 ar := nod(OAS, pr, nod(OADDR, cmpr, nil)) 3167 ar.Right.Etype = 1 // addr does not escape 3168 ar = typecheck(ar, Etop) 3169 init.Append(ar) 3170 3171 var needsize int 3172 call := nod(OCALL, eqfor(t, &needsize), nil) 3173 call.List.Append(pl) 3174 call.List.Append(pr) 3175 if needsize != 0 { 3176 call.List.Append(nodintconst(t.Width)) 3177 } 3178 res := call 3179 if n.Op != OEQ { 3180 res = nod(ONOT, res, nil) 3181 } 3182 n = finishcompare(n, res, init) 3183 return n 3184 } 3185 3186 // inline: build boolean expression comparing element by element 3187 andor := OANDAND 3188 if n.Op == ONE { 3189 andor = OOROR 3190 } 3191 var expr *Node 3192 compare := func(el, er *Node) { 3193 a := nod(n.Op, el, er) 3194 if expr == nil { 3195 expr = a 3196 } else { 3197 expr = nod(andor, expr, a) 3198 } 3199 } 3200 cmpl = safeexpr(cmpl, init) 3201 cmpr = safeexpr(cmpr, init) 3202 if t.IsStruct() { 3203 for _, f := range t.Fields().Slice() { 3204 sym := f.Sym 3205 if isblanksym(sym) { 3206 continue 3207 } 3208 compare( 3209 nodSym(OXDOT, cmpl, sym), 3210 nodSym(OXDOT, cmpr, sym), 3211 ) 3212 } 3213 } else { 3214 for i := 0; int64(i) < t.NumElem(); i++ { 3215 compare( 3216 nod(OINDEX, cmpl, nodintconst(int64(i))), 3217 nod(OINDEX, cmpr, nodintconst(int64(i))), 3218 ) 3219 } 3220 } 3221 if expr == nil { 3222 expr = nodbool(n.Op == OEQ) 3223 } 3224 n = finishcompare(n, expr, init) 3225 return n 3226 } 3227 3228 // The result of finishcompare MUST be assigned back to n, e.g. 3229 // n.Left = finishcompare(n.Left, x, r, init) 3230 func finishcompare(n, r *Node, init *Nodes) *Node { 3231 // Use nn here to avoid passing r to typecheck. 3232 nn := r 3233 nn = typecheck(nn, Erv) 3234 nn = walkexpr(nn, init) 3235 r = nn 3236 if r.Type != n.Type { 3237 r = nod(OCONVNOP, r, nil) 3238 r.Type = n.Type 3239 r.Typecheck = 1 3240 nn = r 3241 } 3242 return nn 3243 } 3244 3245 // isIntOrdering reports whether n is a <, ≤, >, or ≥ ordering between integers. 3246 func (n *Node) isIntOrdering() bool { 3247 switch n.Op { 3248 case OLE, OLT, OGE, OGT: 3249 default: 3250 return false 3251 } 3252 return n.Left.Type.IsInteger() && n.Right.Type.IsInteger() 3253 } 3254 3255 // walkinrange optimizes integer-in-range checks, such as 4 <= x && x < 10. 3256 // n must be an OANDAND or OOROR node. 3257 // The result of walkinrange MUST be assigned back to n, e.g. 3258 // n.Left = walkinrange(n.Left) 3259 func walkinrange(n *Node, init *Nodes) *Node { 3260 // We are looking for something equivalent to a opl b OP b opr c, where: 3261 // * a, b, and c have integer type 3262 // * b is side-effect-free 3263 // * opl and opr are each < or ≤ 3264 // * OP is && 3265 l := n.Left 3266 r := n.Right 3267 if !l.isIntOrdering() || !r.isIntOrdering() { 3268 return n 3269 } 3270 3271 // Find b, if it exists, and rename appropriately. 3272 // Input is: l.Left l.Op l.Right ANDAND/OROR r.Left r.Op r.Right 3273 // Output is: a opl b(==x) ANDAND/OROR b(==x) opr c 3274 a, opl, b := l.Left, l.Op, l.Right 3275 x, opr, c := r.Left, r.Op, r.Right 3276 for i := 0; ; i++ { 3277 if samesafeexpr(b, x) { 3278 break 3279 } 3280 if i == 3 { 3281 // Tried all permutations and couldn't find an appropriate b == x. 3282 return n 3283 } 3284 if i&1 == 0 { 3285 a, opl, b = b, brrev(opl), a 3286 } else { 3287 x, opr, c = c, brrev(opr), x 3288 } 3289 } 3290 3291 // If n.Op is ||, apply de Morgan. 3292 // Negate the internal ops now; we'll negate the top level op at the end. 3293 // Henceforth assume &&. 3294 negateResult := n.Op == OOROR 3295 if negateResult { 3296 opl = brcom(opl) 3297 opr = brcom(opr) 3298 } 3299 3300 cmpdir := func(o Op) int { 3301 switch o { 3302 case OLE, OLT: 3303 return -1 3304 case OGE, OGT: 3305 return +1 3306 } 3307 Fatalf("walkinrange cmpdir %v", o) 3308 return 0 3309 } 3310 if cmpdir(opl) != cmpdir(opr) { 3311 // Not a range check; something like b < a && b < c. 3312 return n 3313 } 3314 3315 switch opl { 3316 case OGE, OGT: 3317 // We have something like a > b && b ≥ c. 3318 // Switch and reverse ops and rename constants, 3319 // to make it look like a ≤ b && b < c. 3320 a, c = c, a 3321 opl, opr = brrev(opr), brrev(opl) 3322 } 3323 3324 // We must ensure that c-a is non-negative. 3325 // For now, require a and c to be constants. 3326 // In the future, we could also support a == 0 and c == len/cap(...). 3327 // Unfortunately, by this point, most len/cap expressions have been 3328 // stored into temporary variables. 3329 if !Isconst(a, CTINT) || !Isconst(c, CTINT) { 3330 return n 3331 } 3332 3333 if opl == OLT { 3334 // We have a < b && ... 3335 // We need a ≤ b && ... to safely use unsigned comparison tricks. 3336 // If a is not the maximum constant for b's type, 3337 // we can increment a and switch to ≤. 3338 if a.Int64() >= maxintval[b.Type.Etype].Int64() { 3339 return n 3340 } 3341 a = nodintconst(a.Int64() + 1) 3342 opl = OLE 3343 } 3344 3345 bound := c.Int64() - a.Int64() 3346 if bound < 0 { 3347 // Bad news. Something like 5 <= x && x < 3. 3348 // Rare in practice, and we still need to generate side-effects, 3349 // so just leave it alone. 3350 return n 3351 } 3352 3353 // We have a ≤ b && b < c (or a ≤ b && b ≤ c). 3354 // This is equivalent to (a-a) ≤ (b-a) && (b-a) < (c-a), 3355 // which is equivalent to 0 ≤ (b-a) && (b-a) < (c-a), 3356 // which is equivalent to uint(b-a) < uint(c-a). 3357 ut := b.Type.ToUnsigned() 3358 lhs := conv(nod(OSUB, b, a), ut) 3359 rhs := nodintconst(bound) 3360 if negateResult { 3361 // Negate top level. 3362 opr = brcom(opr) 3363 } 3364 cmp := nod(opr, lhs, rhs) 3365 cmp.Pos = n.Pos 3366 cmp = addinit(cmp, l.Ninit.Slice()) 3367 cmp = addinit(cmp, r.Ninit.Slice()) 3368 // Typecheck the AST rooted at cmp... 3369 cmp = typecheck(cmp, Erv) 3370 // ...but then reset cmp's type to match n's type. 3371 cmp.Type = n.Type 3372 cmp = walkexpr(cmp, init) 3373 return cmp 3374 } 3375 3376 // return 1 if integer n must be in range [0, max), 0 otherwise 3377 func bounded(n *Node, max int64) bool { 3378 if n.Type == nil || !n.Type.IsInteger() { 3379 return false 3380 } 3381 3382 sign := n.Type.IsSigned() 3383 bits := int32(8 * n.Type.Width) 3384 3385 if smallintconst(n) { 3386 v := n.Int64() 3387 return 0 <= v && v < max 3388 } 3389 3390 switch n.Op { 3391 case OAND: 3392 v := int64(-1) 3393 if smallintconst(n.Left) { 3394 v = n.Left.Int64() 3395 } else if smallintconst(n.Right) { 3396 v = n.Right.Int64() 3397 } 3398 3399 if 0 <= v && v < max { 3400 return true 3401 } 3402 3403 case OMOD: 3404 if !sign && smallintconst(n.Right) { 3405 v := n.Right.Int64() 3406 if 0 <= v && v <= max { 3407 return true 3408 } 3409 } 3410 3411 case ODIV: 3412 if !sign && smallintconst(n.Right) { 3413 v := n.Right.Int64() 3414 for bits > 0 && v >= 2 { 3415 bits-- 3416 v >>= 1 3417 } 3418 } 3419 3420 case ORSH: 3421 if !sign && smallintconst(n.Right) { 3422 v := n.Right.Int64() 3423 if v > int64(bits) { 3424 return true 3425 } 3426 bits -= int32(v) 3427 } 3428 } 3429 3430 if !sign && bits <= 62 && 1<<uint(bits) <= max { 3431 return true 3432 } 3433 3434 return false 3435 } 3436 3437 // usemethod check interface method calls for uses of reflect.Type.Method. 3438 func usemethod(n *Node) { 3439 t := n.Left.Type 3440 3441 // Looking for either of: 3442 // Method(int) reflect.Method 3443 // MethodByName(string) (reflect.Method, bool) 3444 // 3445 // TODO(crawshaw): improve precision of match by working out 3446 // how to check the method name. 3447 if n := t.Params().NumFields(); n != 1 { 3448 return 3449 } 3450 if n := t.Results().NumFields(); n != 1 && n != 2 { 3451 return 3452 } 3453 p0 := t.Params().Field(0) 3454 res0 := t.Results().Field(0) 3455 var res1 *types.Field 3456 if t.Results().NumFields() == 2 { 3457 res1 = t.Results().Field(1) 3458 } 3459 3460 if res1 == nil { 3461 if p0.Type.Etype != TINT { 3462 return 3463 } 3464 } else { 3465 if !p0.Type.IsString() { 3466 return 3467 } 3468 if !res1.Type.IsBoolean() { 3469 return 3470 } 3471 } 3472 if res0.Type.String() != "reflect.Method" { 3473 return 3474 } 3475 3476 Curfn.Func.SetReflectMethod(true) 3477 } 3478 3479 func usefield(n *Node) { 3480 if obj.Fieldtrack_enabled == 0 { 3481 return 3482 } 3483 3484 switch n.Op { 3485 default: 3486 Fatalf("usefield %v", n.Op) 3487 3488 case ODOT, ODOTPTR: 3489 break 3490 } 3491 if n.Sym == nil { 3492 // No field name. This DOTPTR was built by the compiler for access 3493 // to runtime data structures. Ignore. 3494 return 3495 } 3496 3497 t := n.Left.Type 3498 if t.IsPtr() { 3499 t = t.Elem() 3500 } 3501 field := dotField[typeSymKey{t.Orig, n.Sym}] 3502 if field == nil { 3503 Fatalf("usefield %v %v without paramfld", n.Left.Type, n.Sym) 3504 } 3505 if !strings.Contains(field.Note, "go:\"track\"") { 3506 return 3507 } 3508 3509 outer := n.Left.Type 3510 if outer.IsPtr() { 3511 outer = outer.Elem() 3512 } 3513 if outer.Sym == nil { 3514 yyerror("tracked field must be in named struct type") 3515 } 3516 if !exportname(field.Sym.Name) { 3517 yyerror("tracked field must be exported (upper case)") 3518 } 3519 3520 sym := tracksym(outer, field) 3521 if Curfn.Func.FieldTrack == nil { 3522 Curfn.Func.FieldTrack = make(map[*types.Sym]struct{}) 3523 } 3524 Curfn.Func.FieldTrack[sym] = struct{}{} 3525 } 3526 3527 func candiscardlist(l Nodes) bool { 3528 for _, n := range l.Slice() { 3529 if !candiscard(n) { 3530 return false 3531 } 3532 } 3533 return true 3534 } 3535 3536 func candiscard(n *Node) bool { 3537 if n == nil { 3538 return true 3539 } 3540 3541 switch n.Op { 3542 default: 3543 return false 3544 3545 // Discardable as long as the subpieces are. 3546 case ONAME, 3547 ONONAME, 3548 OTYPE, 3549 OPACK, 3550 OLITERAL, 3551 OADD, 3552 OSUB, 3553 OOR, 3554 OXOR, 3555 OADDSTR, 3556 OADDR, 3557 OANDAND, 3558 OARRAYBYTESTR, 3559 OARRAYRUNESTR, 3560 OSTRARRAYBYTE, 3561 OSTRARRAYRUNE, 3562 OCAP, 3563 OCMPIFACE, 3564 OCMPSTR, 3565 OCOMPLIT, 3566 OMAPLIT, 3567 OSTRUCTLIT, 3568 OARRAYLIT, 3569 OSLICELIT, 3570 OPTRLIT, 3571 OCONV, 3572 OCONVIFACE, 3573 OCONVNOP, 3574 ODOT, 3575 OEQ, 3576 ONE, 3577 OLT, 3578 OLE, 3579 OGT, 3580 OGE, 3581 OKEY, 3582 OSTRUCTKEY, 3583 OLEN, 3584 OMUL, 3585 OLSH, 3586 ORSH, 3587 OAND, 3588 OANDNOT, 3589 ONEW, 3590 ONOT, 3591 OCOM, 3592 OPLUS, 3593 OMINUS, 3594 OOROR, 3595 OPAREN, 3596 ORUNESTR, 3597 OREAL, 3598 OIMAG, 3599 OCOMPLEX: 3600 break 3601 3602 // Discardable as long as we know it's not division by zero. 3603 case ODIV, OMOD: 3604 if Isconst(n.Right, CTINT) && n.Right.Val().U.(*Mpint).CmpInt64(0) != 0 { 3605 break 3606 } 3607 if Isconst(n.Right, CTFLT) && n.Right.Val().U.(*Mpflt).CmpFloat64(0) != 0 { 3608 break 3609 } 3610 return false 3611 3612 // Discardable as long as we know it won't fail because of a bad size. 3613 case OMAKECHAN, OMAKEMAP: 3614 if Isconst(n.Left, CTINT) && n.Left.Val().U.(*Mpint).CmpInt64(0) == 0 { 3615 break 3616 } 3617 return false 3618 3619 // Difficult to tell what sizes are okay. 3620 case OMAKESLICE: 3621 return false 3622 } 3623 3624 if !candiscard(n.Left) || !candiscard(n.Right) || !candiscardlist(n.Ninit) || !candiscardlist(n.Nbody) || !candiscardlist(n.List) || !candiscardlist(n.Rlist) { 3625 return false 3626 } 3627 3628 return true 3629 } 3630 3631 // rewrite 3632 // print(x, y, z) 3633 // into 3634 // func(a1, a2, a3) { 3635 // print(a1, a2, a3) 3636 // }(x, y, z) 3637 // and same for println. 3638 3639 var walkprintfunc_prgen int 3640 3641 // The result of walkprintfunc MUST be assigned back to n, e.g. 3642 // n.Left = walkprintfunc(n.Left, init) 3643 func walkprintfunc(n *Node, init *Nodes) *Node { 3644 if n.Ninit.Len() != 0 { 3645 walkstmtlist(n.Ninit.Slice()) 3646 init.AppendNodes(&n.Ninit) 3647 } 3648 3649 t := nod(OTFUNC, nil, nil) 3650 num := 0 3651 var printargs []*Node 3652 var a *Node 3653 var buf string 3654 for _, n1 := range n.List.Slice() { 3655 buf = fmt.Sprintf("a%d", num) 3656 num++ 3657 a = namedfield(buf, n1.Type) 3658 t.List.Append(a) 3659 printargs = append(printargs, a.Left) 3660 } 3661 3662 oldfn := Curfn 3663 Curfn = nil 3664 3665 walkprintfunc_prgen++ 3666 sym := lookupN("print·%d", walkprintfunc_prgen) 3667 fn := dclfunc(sym, t) 3668 3669 a = nod(n.Op, nil, nil) 3670 a.List.Set(printargs) 3671 a = typecheck(a, Etop) 3672 a = walkstmt(a) 3673 3674 fn.Nbody.Set1(a) 3675 3676 funcbody(fn) 3677 3678 fn = typecheck(fn, Etop) 3679 typecheckslice(fn.Nbody.Slice(), Etop) 3680 xtop = append(xtop, fn) 3681 Curfn = oldfn 3682 3683 a = nod(OCALL, nil, nil) 3684 a.Left = fn.Func.Nname 3685 a.List.Set(n.List.Slice()) 3686 a = typecheck(a, Etop) 3687 a = walkexpr(a, init) 3688 return a 3689 } 3690 3691 // substArgTypes substitutes the given list of types for 3692 // successive occurrences of the "any" placeholder in the 3693 // type syntax expression n.Type. 3694 // The result of substArgTypes MUST be assigned back to old, e.g. 3695 // n.Left = substArgTypes(n.Left, t1, t2) 3696 func substArgTypes(old *Node, types_ ...*types.Type) *Node { 3697 n := *old // make shallow copy 3698 3699 for _, t := range types_ { 3700 dowidth(t) 3701 } 3702 n.Type = types.SubstAny(n.Type, &types_) 3703 if len(types_) > 0 { 3704 Fatalf("substArgTypes: too many argument types") 3705 } 3706 return &n 3707 }