github.com/mattn/go@v0.0.0-20171011075504-07f7db3ea99f/src/cmd/compile/internal/gc/walk.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/compile/internal/types" 9 "cmd/internal/objabi" 10 "cmd/internal/sys" 11 "fmt" 12 "strings" 13 ) 14 15 // The constant is known to runtime. 16 const ( 17 tmpstringbufsize = 32 18 ) 19 20 func walk(fn *Node) { 21 Curfn = fn 22 23 if Debug['W'] != 0 { 24 s := fmt.Sprintf("\nbefore %v", Curfn.Func.Nname.Sym) 25 dumplist(s, Curfn.Nbody) 26 } 27 28 lno := lineno 29 30 // Final typecheck for any unused variables. 31 for i, ln := range fn.Func.Dcl { 32 if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) { 33 ln = typecheck(ln, Erv|Easgn) 34 fn.Func.Dcl[i] = ln 35 } 36 } 37 38 // Propagate the used flag for typeswitch variables up to the NONAME in it's definition. 39 for _, ln := range fn.Func.Dcl { 40 if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) && ln.Name.Defn != nil && ln.Name.Defn.Op == OTYPESW && ln.Name.Used() { 41 ln.Name.Defn.Left.Name.SetUsed(true) 42 } 43 } 44 45 for _, ln := range fn.Func.Dcl { 46 if ln.Op != ONAME || (ln.Class() != PAUTO && ln.Class() != PAUTOHEAP) || ln.Sym.Name[0] == '&' || ln.Name.Used() { 47 continue 48 } 49 if defn := ln.Name.Defn; defn != nil && defn.Op == OTYPESW { 50 if defn.Left.Name.Used() { 51 continue 52 } 53 yyerrorl(defn.Left.Pos, "%v declared and not used", ln.Sym) 54 defn.Left.Name.SetUsed(true) // suppress repeats 55 } else { 56 yyerrorl(ln.Pos, "%v declared and not used", ln.Sym) 57 } 58 } 59 60 lineno = lno 61 if nerrors != 0 { 62 return 63 } 64 walkstmtlist(Curfn.Nbody.Slice()) 65 if Debug['W'] != 0 { 66 s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym) 67 dumplist(s, Curfn.Nbody) 68 } 69 70 zeroResults() 71 heapmoves() 72 if Debug['W'] != 0 && Curfn.Func.Enter.Len() > 0 { 73 s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym) 74 dumplist(s, Curfn.Func.Enter) 75 } 76 } 77 78 func walkstmtlist(s []*Node) { 79 for i := range s { 80 s[i] = walkstmt(s[i]) 81 } 82 } 83 84 func samelist(a, b []*Node) bool { 85 if len(a) != len(b) { 86 return false 87 } 88 for i, n := range a { 89 if n != b[i] { 90 return false 91 } 92 } 93 return true 94 } 95 96 func paramoutheap(fn *Node) bool { 97 for _, ln := range fn.Func.Dcl { 98 switch ln.Class() { 99 case PPARAMOUT: 100 if ln.isParamStackCopy() || ln.Addrtaken() { 101 return true 102 } 103 104 case PAUTO: 105 // stop early - parameters are over 106 return false 107 } 108 } 109 110 return false 111 } 112 113 // adds "adjust" to all the argument locations for the call n. 114 // n must be a defer or go node that has already been walked. 115 func adjustargs(n *Node, adjust int) { 116 callfunc := n.Left 117 for _, arg := range callfunc.List.Slice() { 118 if arg.Op != OAS { 119 Fatalf("call arg not assignment") 120 } 121 lhs := arg.Left 122 if lhs.Op == ONAME { 123 // This is a temporary introduced by reorder1. 124 // The real store to the stack appears later in the arg list. 125 continue 126 } 127 128 if lhs.Op != OINDREGSP { 129 Fatalf("call argument store does not use OINDREGSP") 130 } 131 132 // can't really check this in machine-indep code. 133 //if(lhs->val.u.reg != D_SP) 134 // Fatalf("call arg assign not indreg(SP)") 135 lhs.Xoffset += int64(adjust) 136 } 137 } 138 139 // The result of walkstmt MUST be assigned back to n, e.g. 140 // n.Left = walkstmt(n.Left) 141 func walkstmt(n *Node) *Node { 142 if n == nil { 143 return n 144 } 145 146 setlineno(n) 147 148 walkstmtlist(n.Ninit.Slice()) 149 150 switch n.Op { 151 default: 152 if n.Op == ONAME { 153 yyerror("%v is not a top level statement", n.Sym) 154 } else { 155 yyerror("%v is not a top level statement", n.Op) 156 } 157 Dump("nottop", n) 158 159 case OAS, 160 OASOP, 161 OAS2, 162 OAS2DOTTYPE, 163 OAS2RECV, 164 OAS2FUNC, 165 OAS2MAPR, 166 OCLOSE, 167 OCOPY, 168 OCALLMETH, 169 OCALLINTER, 170 OCALL, 171 OCALLFUNC, 172 ODELETE, 173 OSEND, 174 OPRINT, 175 OPRINTN, 176 OPANIC, 177 OEMPTY, 178 ORECOVER, 179 OGETG: 180 if n.Typecheck() == 0 { 181 Fatalf("missing typecheck: %+v", n) 182 } 183 wascopy := n.Op == OCOPY 184 init := n.Ninit 185 n.Ninit.Set(nil) 186 n = walkexpr(n, &init) 187 n = addinit(n, init.Slice()) 188 if wascopy && n.Op == OCONVNOP { 189 n.Op = OEMPTY // don't leave plain values as statements. 190 } 191 192 // special case for a receive where we throw away 193 // the value received. 194 case ORECV: 195 if n.Typecheck() == 0 { 196 Fatalf("missing typecheck: %+v", n) 197 } 198 init := n.Ninit 199 n.Ninit.Set(nil) 200 201 n.Left = walkexpr(n.Left, &init) 202 n = mkcall1(chanfn("chanrecv1", 2, n.Left.Type), nil, &init, n.Left, nodnil()) 203 n = walkexpr(n, &init) 204 205 n = addinit(n, init.Slice()) 206 207 case OBREAK, 208 OCONTINUE, 209 OFALL, 210 OGOTO, 211 OLABEL, 212 ODCLCONST, 213 ODCLTYPE, 214 OCHECKNIL, 215 OVARKILL, 216 OVARLIVE: 217 break 218 219 case ODCL: 220 v := n.Left 221 if v.Class() == PAUTOHEAP { 222 if compiling_runtime { 223 yyerror("%v escapes to heap, not allowed in runtime.", v) 224 } 225 if prealloc[v] == nil { 226 prealloc[v] = callnew(v.Type) 227 } 228 nn := nod(OAS, v.Name.Param.Heapaddr, prealloc[v]) 229 nn.SetColas(true) 230 nn = typecheck(nn, Etop) 231 return walkstmt(nn) 232 } 233 234 case OBLOCK: 235 walkstmtlist(n.List.Slice()) 236 237 case OXCASE: 238 yyerror("case statement out of place") 239 n.Op = OCASE 240 fallthrough 241 242 case OCASE: 243 n.Right = walkstmt(n.Right) 244 245 case ODEFER: 246 Curfn.Func.SetHasDefer(true) 247 switch n.Left.Op { 248 case OPRINT, OPRINTN: 249 n.Left = walkprintfunc(n.Left, &n.Ninit) 250 251 case OCOPY: 252 n.Left = copyany(n.Left, &n.Ninit, true) 253 254 default: 255 n.Left = walkexpr(n.Left, &n.Ninit) 256 } 257 258 // make room for size & fn arguments. 259 adjustargs(n, 2*Widthptr) 260 261 case OFOR, OFORUNTIL: 262 if n.Left != nil { 263 walkstmtlist(n.Left.Ninit.Slice()) 264 init := n.Left.Ninit 265 n.Left.Ninit.Set(nil) 266 n.Left = walkexpr(n.Left, &init) 267 n.Left = addinit(n.Left, init.Slice()) 268 } 269 270 n.Right = walkstmt(n.Right) 271 walkstmtlist(n.Nbody.Slice()) 272 273 case OIF: 274 n.Left = walkexpr(n.Left, &n.Ninit) 275 walkstmtlist(n.Nbody.Slice()) 276 walkstmtlist(n.Rlist.Slice()) 277 278 case OPROC: 279 switch n.Left.Op { 280 case OPRINT, OPRINTN: 281 n.Left = walkprintfunc(n.Left, &n.Ninit) 282 283 case OCOPY: 284 n.Left = copyany(n.Left, &n.Ninit, true) 285 286 default: 287 n.Left = walkexpr(n.Left, &n.Ninit) 288 } 289 290 // make room for size & fn arguments. 291 adjustargs(n, 2*Widthptr) 292 293 case ORETURN: 294 walkexprlist(n.List.Slice(), &n.Ninit) 295 if n.List.Len() == 0 { 296 break 297 } 298 if (Curfn.Type.FuncType().Outnamed && n.List.Len() > 1) || paramoutheap(Curfn) { 299 // assign to the function out parameters, 300 // so that reorder3 can fix up conflicts 301 var rl []*Node 302 303 for _, ln := range Curfn.Func.Dcl { 304 cl := ln.Class() 305 if cl == PAUTO || cl == PAUTOHEAP { 306 break 307 } 308 if cl == PPARAMOUT { 309 if ln.isParamStackCopy() { 310 ln = walkexpr(typecheck(nod(OIND, ln.Name.Param.Heapaddr, nil), Erv), nil) 311 } 312 rl = append(rl, ln) 313 } 314 } 315 316 if got, want := n.List.Len(), len(rl); got != want { 317 // order should have rewritten multi-value function calls 318 // with explicit OAS2FUNC nodes. 319 Fatalf("expected %v return arguments, have %v", want, got) 320 } 321 322 if samelist(rl, n.List.Slice()) { 323 // special return in disguise 324 n.List.Set(nil) 325 326 break 327 } 328 329 // move function calls out, to make reorder3's job easier. 330 walkexprlistsafe(n.List.Slice(), &n.Ninit) 331 332 ll := ascompatee(n.Op, rl, n.List.Slice(), &n.Ninit) 333 n.List.Set(reorder3(ll)) 334 break 335 } 336 337 ll := ascompatte(nil, false, Curfn.Type.Results(), n.List.Slice(), 1, &n.Ninit) 338 n.List.Set(ll) 339 340 case ORETJMP: 341 break 342 343 case OSELECT: 344 walkselect(n) 345 346 case OSWITCH: 347 walkswitch(n) 348 349 case ORANGE: 350 n = walkrange(n) 351 } 352 353 if n.Op == ONAME { 354 Fatalf("walkstmt ended up with name: %+v", n) 355 } 356 return n 357 } 358 359 func isSmallMakeSlice(n *Node) bool { 360 if n.Op != OMAKESLICE { 361 return false 362 } 363 l := n.Left 364 r := n.Right 365 if r == nil { 366 r = l 367 } 368 t := n.Type 369 370 return smallintconst(l) && smallintconst(r) && (t.Elem().Width == 0 || r.Int64() < (1<<16)/t.Elem().Width) 371 } 372 373 // walk the whole tree of the body of an 374 // expression or simple statement. 375 // the types expressions are calculated. 376 // compile-time constants are evaluated. 377 // complex side effects like statements are appended to init 378 func walkexprlist(s []*Node, init *Nodes) { 379 for i := range s { 380 s[i] = walkexpr(s[i], init) 381 } 382 } 383 384 func walkexprlistsafe(s []*Node, init *Nodes) { 385 for i, n := range s { 386 s[i] = safeexpr(n, init) 387 s[i] = walkexpr(s[i], init) 388 } 389 } 390 391 func walkexprlistcheap(s []*Node, init *Nodes) { 392 for i, n := range s { 393 s[i] = cheapexpr(n, init) 394 s[i] = walkexpr(s[i], init) 395 } 396 } 397 398 // Build name of function for interface conversion. 399 // Not all names are possible 400 // (e.g., we'll never generate convE2E or convE2I or convI2E). 401 func convFuncName(from, to *types.Type) string { 402 tkind := to.Tie() 403 switch from.Tie() { 404 case 'I': 405 switch tkind { 406 case 'I': 407 return "convI2I" 408 } 409 case 'T': 410 switch tkind { 411 case 'E': 412 switch { 413 case from.Size() == 2 && from.Align == 2: 414 return "convT2E16" 415 case from.Size() == 4 && from.Align == 4 && !types.Haspointers(from): 416 return "convT2E32" 417 case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !types.Haspointers(from): 418 return "convT2E64" 419 case from.IsString(): 420 return "convT2Estring" 421 case from.IsSlice(): 422 return "convT2Eslice" 423 case !types.Haspointers(from): 424 return "convT2Enoptr" 425 } 426 return "convT2E" 427 case 'I': 428 switch { 429 case from.Size() == 2 && from.Align == 2: 430 return "convT2I16" 431 case from.Size() == 4 && from.Align == 4 && !types.Haspointers(from): 432 return "convT2I32" 433 case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !types.Haspointers(from): 434 return "convT2I64" 435 case from.IsString(): 436 return "convT2Istring" 437 case from.IsSlice(): 438 return "convT2Islice" 439 case !types.Haspointers(from): 440 return "convT2Inoptr" 441 } 442 return "convT2I" 443 } 444 } 445 Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie()) 446 panic("unreachable") 447 } 448 449 // The result of walkexpr MUST be assigned back to n, e.g. 450 // n.Left = walkexpr(n.Left, init) 451 func walkexpr(n *Node, init *Nodes) *Node { 452 if n == nil { 453 return n 454 } 455 456 // Eagerly checkwidth all expressions for the back end. 457 if n.Type != nil && !n.Type.WidthCalculated() { 458 switch n.Type.Etype { 459 case TBLANK, TNIL, TIDEAL: 460 default: 461 checkwidth(n.Type) 462 } 463 } 464 465 if init == &n.Ninit { 466 // not okay to use n->ninit when walking n, 467 // because we might replace n with some other node 468 // and would lose the init list. 469 Fatalf("walkexpr init == &n->ninit") 470 } 471 472 if n.Ninit.Len() != 0 { 473 walkstmtlist(n.Ninit.Slice()) 474 init.AppendNodes(&n.Ninit) 475 } 476 477 lno := setlineno(n) 478 479 if Debug['w'] > 1 { 480 Dump("walk-before", n) 481 } 482 483 if n.Typecheck() != 1 { 484 Fatalf("missed typecheck: %+v", n) 485 } 486 487 if n.Op == ONAME && n.Class() == PAUTOHEAP { 488 nn := nod(OIND, n.Name.Param.Heapaddr, nil) 489 nn = typecheck(nn, Erv) 490 nn = walkexpr(nn, init) 491 nn.Left.SetNonNil(true) 492 return nn 493 } 494 495 opswitch: 496 switch n.Op { 497 default: 498 Dump("walk", n) 499 Fatalf("walkexpr: switch 1 unknown op %+S", n) 500 501 case ONONAME, OINDREGSP, OEMPTY, OGETG: 502 503 case OTYPE, ONAME, OLITERAL: 504 // TODO(mdempsky): Just return n; see discussion on CL 38655. 505 // Perhaps refactor to use Node.mayBeShared for these instead. 506 // If these return early, make sure to still call 507 // stringsym for constant strings. 508 509 case ONOT, OMINUS, OPLUS, OCOM, OREAL, OIMAG, ODOTMETH, ODOTINTER, 510 OIND, OSPTR, OITAB, OIDATA, OADDR: 511 n.Left = walkexpr(n.Left, init) 512 513 case OEFACE, OAND, OSUB, OMUL, OLT, OLE, OGE, OGT, OADD, OOR, OXOR: 514 n.Left = walkexpr(n.Left, init) 515 n.Right = walkexpr(n.Right, init) 516 517 case ODOT: 518 usefield(n) 519 n.Left = walkexpr(n.Left, init) 520 521 case ODOTTYPE, ODOTTYPE2: 522 n.Left = walkexpr(n.Left, init) 523 // Set up interface type addresses for back end. 524 n.Right = typename(n.Type) 525 if n.Op == ODOTTYPE { 526 n.Right.Right = typename(n.Left.Type) 527 } 528 if !n.Type.IsInterface() && !n.Left.Type.IsEmptyInterface() { 529 n.List.Set1(itabname(n.Type, n.Left.Type)) 530 } 531 532 case ODOTPTR: 533 usefield(n) 534 if n.Op == ODOTPTR && n.Left.Type.Elem().Width == 0 { 535 // No actual copy will be generated, so emit an explicit nil check. 536 n.Left = cheapexpr(n.Left, init) 537 538 checknil(n.Left, init) 539 } 540 541 n.Left = walkexpr(n.Left, init) 542 543 case OLEN, OCAP: 544 n.Left = walkexpr(n.Left, init) 545 546 // replace len(*[10]int) with 10. 547 // delayed until now to preserve side effects. 548 t := n.Left.Type 549 550 if t.IsPtr() { 551 t = t.Elem() 552 } 553 if t.IsArray() { 554 safeexpr(n.Left, init) 555 nodconst(n, n.Type, t.NumElem()) 556 n.SetTypecheck(1) 557 } 558 559 case OLSH, ORSH: 560 n.Left = walkexpr(n.Left, init) 561 n.Right = walkexpr(n.Right, init) 562 t := n.Left.Type 563 n.SetBounded(bounded(n.Right, 8*t.Width)) 564 if Debug['m'] != 0 && n.Etype != 0 && !Isconst(n.Right, CTINT) { 565 Warn("shift bounds check elided") 566 } 567 568 case OCOMPLEX: 569 // Use results from call expression as arguments for complex. 570 if n.Left == nil && n.Right == nil { 571 n.Left = n.List.First() 572 n.Right = n.List.Second() 573 } 574 n.Left = walkexpr(n.Left, init) 575 n.Right = walkexpr(n.Right, init) 576 577 case OEQ, ONE: 578 n.Left = walkexpr(n.Left, init) 579 n.Right = walkexpr(n.Right, init) 580 581 // Disable safemode while compiling this code: the code we 582 // generate internally can refer to unsafe.Pointer. 583 // In this case it can happen if we need to generate an == 584 // for a struct containing a reflect.Value, which itself has 585 // an unexported field of type unsafe.Pointer. 586 old_safemode := safemode 587 safemode = false 588 n = walkcompare(n, init) 589 safemode = old_safemode 590 591 case OANDAND, OOROR: 592 n.Left = walkexpr(n.Left, init) 593 594 // cannot put side effects from n.Right on init, 595 // because they cannot run before n.Left is checked. 596 // save elsewhere and store on the eventual n.Right. 597 var ll Nodes 598 599 n.Right = walkexpr(n.Right, &ll) 600 n.Right = addinit(n.Right, ll.Slice()) 601 n = walkinrange(n, init) 602 603 case OPRINT, OPRINTN: 604 walkexprlist(n.List.Slice(), init) 605 n = walkprint(n, init) 606 607 case OPANIC: 608 n = mkcall("gopanic", nil, init, n.Left) 609 610 case ORECOVER: 611 n = mkcall("gorecover", n.Type, init, nod(OADDR, nodfp, nil)) 612 613 case OCLOSUREVAR, OCFUNC: 614 n.SetAddable(true) 615 616 case OCALLINTER: 617 usemethod(n) 618 t := n.Left.Type 619 if n.List.Len() != 0 && n.List.First().Op == OAS { 620 break 621 } 622 n.Left = walkexpr(n.Left, init) 623 walkexprlist(n.List.Slice(), init) 624 ll := ascompatte(n, n.Isddd(), t.Params(), n.List.Slice(), 0, init) 625 n.List.Set(reorder1(ll)) 626 627 case OCALLFUNC: 628 if n.Left.Op == OCLOSURE { 629 // Transform direct call of a closure to call of a normal function. 630 // transformclosure already did all preparation work. 631 632 // Prepend captured variables to argument list. 633 n.List.Prepend(n.Left.Func.Enter.Slice()...) 634 635 n.Left.Func.Enter.Set(nil) 636 637 // Replace OCLOSURE with ONAME/PFUNC. 638 n.Left = n.Left.Func.Closure.Func.Nname 639 640 // Update type of OCALLFUNC node. 641 // Output arguments had not changed, but their offsets could. 642 if n.Left.Type.NumResults() == 1 { 643 n.Type = n.Left.Type.Results().Field(0).Type 644 } else { 645 n.Type = n.Left.Type.Results() 646 } 647 } 648 649 t := n.Left.Type 650 if n.List.Len() != 0 && n.List.First().Op == OAS { 651 break 652 } 653 654 n.Left = walkexpr(n.Left, init) 655 walkexprlist(n.List.Slice(), init) 656 657 ll := ascompatte(n, n.Isddd(), t.Params(), n.List.Slice(), 0, init) 658 n.List.Set(reorder1(ll)) 659 660 case OCALLMETH: 661 t := n.Left.Type 662 if n.List.Len() != 0 && n.List.First().Op == OAS { 663 break 664 } 665 n.Left = walkexpr(n.Left, init) 666 walkexprlist(n.List.Slice(), init) 667 ll := ascompatte(n, false, t.Recvs(), []*Node{n.Left.Left}, 0, init) 668 lr := ascompatte(n, n.Isddd(), t.Params(), n.List.Slice(), 0, init) 669 ll = append(ll, lr...) 670 n.Left.Left = nil 671 updateHasCall(n.Left) 672 n.List.Set(reorder1(ll)) 673 674 case OAS: 675 init.AppendNodes(&n.Ninit) 676 677 n.Left = walkexpr(n.Left, init) 678 n.Left = safeexpr(n.Left, init) 679 680 if oaslit(n, init) { 681 break 682 } 683 684 if n.Right == nil { 685 // TODO(austin): Check all "implicit zeroing" 686 break 687 } 688 689 if !instrumenting && iszero(n.Right) { 690 break 691 } 692 693 switch n.Right.Op { 694 default: 695 n.Right = walkexpr(n.Right, init) 696 697 case ORECV: 698 // x = <-c; n.Left is x, n.Right.Left is c. 699 // orderstmt made sure x is addressable. 700 n.Right.Left = walkexpr(n.Right.Left, init) 701 702 n1 := nod(OADDR, n.Left, nil) 703 r := n.Right.Left // the channel 704 n = mkcall1(chanfn("chanrecv1", 2, r.Type), nil, init, r, n1) 705 n = walkexpr(n, init) 706 break opswitch 707 708 case OAPPEND: 709 // x = append(...) 710 r := n.Right 711 if r.Type.Elem().NotInHeap() { 712 yyerror("%v is go:notinheap; heap allocation disallowed", r.Type.Elem()) 713 } 714 if r.Isddd() { 715 r = appendslice(r, init) // also works for append(slice, string). 716 } else { 717 r = walkappend(r, init, n) 718 } 719 n.Right = r 720 if r.Op == OAPPEND { 721 // Left in place for back end. 722 // Do not add a new write barrier. 723 // Set up address of type for back end. 724 r.Left = typename(r.Type.Elem()) 725 break opswitch 726 } 727 // Otherwise, lowered for race detector. 728 // Treat as ordinary assignment. 729 } 730 731 if n.Left != nil && n.Right != nil { 732 n = convas(n, init) 733 } 734 735 case OAS2: 736 init.AppendNodes(&n.Ninit) 737 walkexprlistsafe(n.List.Slice(), init) 738 walkexprlistsafe(n.Rlist.Slice(), init) 739 ll := ascompatee(OAS, n.List.Slice(), n.Rlist.Slice(), init) 740 ll = reorder3(ll) 741 n = liststmt(ll) 742 743 // a,b,... = fn() 744 case OAS2FUNC: 745 init.AppendNodes(&n.Ninit) 746 747 r := n.Rlist.First() 748 walkexprlistsafe(n.List.Slice(), init) 749 r = walkexpr(r, init) 750 751 if isIntrinsicCall(r) { 752 n.Rlist.Set1(r) 753 break 754 } 755 init.Append(r) 756 757 ll := ascompatet(n.List, r.Type) 758 n = liststmt(ll) 759 760 // x, y = <-c 761 // orderstmt made sure x is addressable. 762 case OAS2RECV: 763 init.AppendNodes(&n.Ninit) 764 765 r := n.Rlist.First() 766 walkexprlistsafe(n.List.Slice(), init) 767 r.Left = walkexpr(r.Left, init) 768 var n1 *Node 769 if isblank(n.List.First()) { 770 n1 = nodnil() 771 } else { 772 n1 = nod(OADDR, n.List.First(), nil) 773 } 774 n1.Etype = 1 // addr does not escape 775 fn := chanfn("chanrecv2", 2, r.Left.Type) 776 ok := n.List.Second() 777 call := mkcall1(fn, ok.Type, init, r.Left, n1) 778 n = nod(OAS, ok, call) 779 n = typecheck(n, Etop) 780 781 // a,b = m[i] 782 case OAS2MAPR: 783 init.AppendNodes(&n.Ninit) 784 785 r := n.Rlist.First() 786 walkexprlistsafe(n.List.Slice(), init) 787 r.Left = walkexpr(r.Left, init) 788 r.Right = walkexpr(r.Right, init) 789 t := r.Left.Type 790 791 fast := mapfast(t) 792 var key *Node 793 if fast != mapslow { 794 // fast versions take key by value 795 key = r.Right 796 } else { 797 // standard version takes key by reference 798 // orderexpr made sure key is addressable. 799 key = nod(OADDR, r.Right, nil) 800 } 801 802 // from: 803 // a,b = m[i] 804 // to: 805 // var,b = mapaccess2*(t, m, i) 806 // a = *var 807 a := n.List.First() 808 809 if w := t.Val().Width; w <= 1024 { // 1024 must match ../../../../runtime/hashmap.go:maxZero 810 fn := mapfn(mapaccess2[fast], t) 811 r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key) 812 } else { 813 fn := mapfn("mapaccess2_fat", t) 814 z := zeroaddr(w) 815 r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key, z) 816 } 817 818 // mapaccess2* returns a typed bool, but due to spec changes, 819 // the boolean result of i.(T) is now untyped so we make it the 820 // same type as the variable on the lhs. 821 if ok := n.List.Second(); !isblank(ok) && ok.Type.IsBoolean() { 822 r.Type.Field(1).Type = ok.Type 823 } 824 n.Rlist.Set1(r) 825 n.Op = OAS2FUNC 826 827 // don't generate a = *var if a is _ 828 if !isblank(a) { 829 var_ := temp(types.NewPtr(t.Val())) 830 var_.SetTypecheck(1) 831 var_.SetNonNil(true) // mapaccess always returns a non-nil pointer 832 n.List.SetFirst(var_) 833 n = walkexpr(n, init) 834 init.Append(n) 835 n = nod(OAS, a, nod(OIND, var_, nil)) 836 } 837 838 n = typecheck(n, Etop) 839 n = walkexpr(n, init) 840 841 case ODELETE: 842 init.AppendNodes(&n.Ninit) 843 map_ := n.List.First() 844 key := n.List.Second() 845 map_ = walkexpr(map_, init) 846 key = walkexpr(key, init) 847 848 t := map_.Type 849 fast := mapfast(t) 850 if fast == mapslow { 851 // orderstmt made sure key is addressable. 852 key = nod(OADDR, key, nil) 853 } 854 n = mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key) 855 856 case OAS2DOTTYPE: 857 walkexprlistsafe(n.List.Slice(), init) 858 n.Rlist.SetFirst(walkexpr(n.Rlist.First(), init)) 859 860 case OCONVIFACE: 861 n.Left = walkexpr(n.Left, init) 862 863 // Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped. 864 if isdirectiface(n.Left.Type) { 865 var t *Node 866 if n.Type.IsEmptyInterface() { 867 t = typename(n.Left.Type) 868 } else { 869 t = itabname(n.Left.Type, n.Type) 870 } 871 l := nod(OEFACE, t, n.Left) 872 l.Type = n.Type 873 l.SetTypecheck(n.Typecheck()) 874 n = l 875 break 876 } 877 878 if staticbytes == nil { 879 staticbytes = newname(Runtimepkg.Lookup("staticbytes")) 880 staticbytes.SetClass(PEXTERN) 881 staticbytes.Type = types.NewArray(types.Types[TUINT8], 256) 882 zerobase = newname(Runtimepkg.Lookup("zerobase")) 883 zerobase.SetClass(PEXTERN) 884 zerobase.Type = types.Types[TUINTPTR] 885 } 886 887 // Optimize convT2{E,I} for many cases in which T is not pointer-shaped, 888 // by using an existing addressable value identical to n.Left 889 // or creating one on the stack. 890 var value *Node 891 switch { 892 case n.Left.Type.Size() == 0: 893 // n.Left is zero-sized. Use zerobase. 894 cheapexpr(n.Left, init) // Evaluate n.Left for side-effects. See issue 19246. 895 value = zerobase 896 case n.Left.Type.IsBoolean() || (n.Left.Type.Size() == 1 && n.Left.Type.IsInteger()): 897 // n.Left is a bool/byte. Use staticbytes[n.Left]. 898 n.Left = cheapexpr(n.Left, init) 899 value = nod(OINDEX, staticbytes, byteindex(n.Left)) 900 value.SetBounded(true) 901 case n.Left.Class() == PEXTERN && n.Left.Name != nil && n.Left.Name.Readonly(): 902 // n.Left is a readonly global; use it directly. 903 value = n.Left 904 case !n.Left.Type.IsInterface() && n.Esc == EscNone && n.Left.Type.Width <= 1024: 905 // n.Left does not escape. Use a stack temporary initialized to n.Left. 906 value = temp(n.Left.Type) 907 init.Append(typecheck(nod(OAS, value, n.Left), Etop)) 908 } 909 910 if value != nil { 911 // Value is identical to n.Left. 912 // Construct the interface directly: {type/itab, &value}. 913 var t *Node 914 if n.Type.IsEmptyInterface() { 915 t = typename(n.Left.Type) 916 } else { 917 t = itabname(n.Left.Type, n.Type) 918 } 919 l := nod(OEFACE, t, typecheck(nod(OADDR, value, nil), Erv)) 920 l.Type = n.Type 921 l.SetTypecheck(n.Typecheck()) 922 n = l 923 break 924 } 925 926 // Implement interface to empty interface conversion. 927 // tmp = i.itab 928 // if tmp != nil { 929 // tmp = tmp.type 930 // } 931 // e = iface{tmp, i.data} 932 if n.Type.IsEmptyInterface() && n.Left.Type.IsInterface() && !n.Left.Type.IsEmptyInterface() { 933 // Evaluate the input interface. 934 c := temp(n.Left.Type) 935 init.Append(nod(OAS, c, n.Left)) 936 937 // Get the itab out of the interface. 938 tmp := temp(types.NewPtr(types.Types[TUINT8])) 939 init.Append(nod(OAS, tmp, typecheck(nod(OITAB, c, nil), Erv))) 940 941 // Get the type out of the itab. 942 nif := nod(OIF, typecheck(nod(ONE, tmp, nodnil()), Erv), nil) 943 nif.Nbody.Set1(nod(OAS, tmp, itabType(tmp))) 944 init.Append(nif) 945 946 // Build the result. 947 e := nod(OEFACE, tmp, ifaceData(c, types.NewPtr(types.Types[TUINT8]))) 948 e.Type = n.Type // assign type manually, typecheck doesn't understand OEFACE. 949 e.SetTypecheck(1) 950 n = e 951 break 952 } 953 954 var ll []*Node 955 if n.Type.IsEmptyInterface() { 956 if !n.Left.Type.IsInterface() { 957 ll = append(ll, typename(n.Left.Type)) 958 } 959 } else { 960 if n.Left.Type.IsInterface() { 961 ll = append(ll, typename(n.Type)) 962 } else { 963 ll = append(ll, itabname(n.Left.Type, n.Type)) 964 } 965 } 966 967 if n.Left.Type.IsInterface() { 968 ll = append(ll, n.Left) 969 } else { 970 // regular types are passed by reference to avoid C vararg calls 971 // orderexpr arranged for n.Left to be a temporary for all 972 // the conversions it could see. comparison of an interface 973 // with a non-interface, especially in a switch on interface value 974 // with non-interface cases, is not visible to orderstmt, so we 975 // have to fall back on allocating a temp here. 976 if islvalue(n.Left) { 977 ll = append(ll, nod(OADDR, n.Left, nil)) 978 } else { 979 ll = append(ll, nod(OADDR, copyexpr(n.Left, n.Left.Type, init), nil)) 980 } 981 dowidth(n.Left.Type) 982 } 983 984 fn := syslook(convFuncName(n.Left.Type, n.Type)) 985 fn = substArgTypes(fn, n.Left.Type, n.Type) 986 dowidth(fn.Type) 987 n = nod(OCALL, fn, nil) 988 n.List.Set(ll) 989 n = typecheck(n, Erv) 990 n = walkexpr(n, init) 991 992 case OCONV, OCONVNOP: 993 if thearch.LinkArch.Family == sys.ARM || thearch.LinkArch.Family == sys.MIPS { 994 if n.Left.Type.IsFloat() { 995 if n.Type.Etype == TINT64 { 996 n = mkcall("float64toint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64])) 997 break 998 } 999 1000 if n.Type.Etype == TUINT64 { 1001 n = mkcall("float64touint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64])) 1002 break 1003 } 1004 } 1005 1006 if n.Type.IsFloat() { 1007 if n.Left.Type.Etype == TINT64 { 1008 n = conv(mkcall("int64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TINT64])), n.Type) 1009 break 1010 } 1011 1012 if n.Left.Type.Etype == TUINT64 { 1013 n = conv(mkcall("uint64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TUINT64])), n.Type) 1014 break 1015 } 1016 } 1017 } 1018 1019 if thearch.LinkArch.Family == sys.I386 { 1020 if n.Left.Type.IsFloat() { 1021 if n.Type.Etype == TINT64 { 1022 n = mkcall("float64toint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64])) 1023 break 1024 } 1025 1026 if n.Type.Etype == TUINT64 { 1027 n = mkcall("float64touint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64])) 1028 break 1029 } 1030 if n.Type.Etype == TUINT32 || n.Type.Etype == TUINT || n.Type.Etype == TUINTPTR { 1031 n = mkcall("float64touint32", n.Type, init, conv(n.Left, types.Types[TFLOAT64])) 1032 break 1033 } 1034 } 1035 if n.Type.IsFloat() { 1036 if n.Left.Type.Etype == TINT64 { 1037 n = conv(mkcall("int64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TINT64])), n.Type) 1038 break 1039 } 1040 1041 if n.Left.Type.Etype == TUINT64 { 1042 n = conv(mkcall("uint64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TUINT64])), n.Type) 1043 break 1044 } 1045 if n.Left.Type.Etype == TUINT32 || n.Left.Type.Etype == TUINT || n.Left.Type.Etype == TUINTPTR { 1046 n = conv(mkcall("uint32tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TUINT32])), n.Type) 1047 break 1048 } 1049 } 1050 } 1051 1052 n.Left = walkexpr(n.Left, init) 1053 1054 case OANDNOT: 1055 n.Left = walkexpr(n.Left, init) 1056 n.Op = OAND 1057 n.Right = nod(OCOM, n.Right, nil) 1058 n.Right = typecheck(n.Right, Erv) 1059 n.Right = walkexpr(n.Right, init) 1060 1061 case ODIV, OMOD: 1062 n.Left = walkexpr(n.Left, init) 1063 n.Right = walkexpr(n.Right, init) 1064 1065 // rewrite complex div into function call. 1066 et := n.Left.Type.Etype 1067 1068 if isComplex[et] && n.Op == ODIV { 1069 t := n.Type 1070 n = mkcall("complex128div", types.Types[TCOMPLEX128], init, conv(n.Left, types.Types[TCOMPLEX128]), conv(n.Right, types.Types[TCOMPLEX128])) 1071 n = conv(n, t) 1072 break 1073 } 1074 1075 // Nothing to do for float divisions. 1076 if isFloat[et] { 1077 break 1078 } 1079 1080 // rewrite 64-bit div and mod on 32-bit architectures. 1081 // TODO: Remove this code once we can introduce 1082 // runtime calls late in SSA processing. 1083 if Widthreg < 8 && (et == TINT64 || et == TUINT64) { 1084 if n.Right.Op == OLITERAL { 1085 // Leave div/mod by constant powers of 2. 1086 // The SSA backend will handle those. 1087 switch et { 1088 case TINT64: 1089 c := n.Right.Int64() 1090 if c < 0 { 1091 c = -c 1092 } 1093 if c != 0 && c&(c-1) == 0 { 1094 break opswitch 1095 } 1096 case TUINT64: 1097 c := uint64(n.Right.Int64()) 1098 if c != 0 && c&(c-1) == 0 { 1099 break opswitch 1100 } 1101 } 1102 } 1103 var fn string 1104 if et == TINT64 { 1105 fn = "int64" 1106 } else { 1107 fn = "uint64" 1108 } 1109 if n.Op == ODIV { 1110 fn += "div" 1111 } else { 1112 fn += "mod" 1113 } 1114 n = mkcall(fn, n.Type, init, conv(n.Left, types.Types[et]), conv(n.Right, types.Types[et])) 1115 } 1116 1117 case OINDEX: 1118 n.Left = walkexpr(n.Left, init) 1119 1120 // save the original node for bounds checking elision. 1121 // If it was a ODIV/OMOD walk might rewrite it. 1122 r := n.Right 1123 1124 n.Right = walkexpr(n.Right, init) 1125 1126 // if range of type cannot exceed static array bound, 1127 // disable bounds check. 1128 if n.Bounded() { 1129 break 1130 } 1131 t := n.Left.Type 1132 if t != nil && t.IsPtr() { 1133 t = t.Elem() 1134 } 1135 if t.IsArray() { 1136 n.SetBounded(bounded(r, t.NumElem())) 1137 if Debug['m'] != 0 && n.Bounded() && !Isconst(n.Right, CTINT) { 1138 Warn("index bounds check elided") 1139 } 1140 if smallintconst(n.Right) && !n.Bounded() { 1141 yyerror("index out of bounds") 1142 } 1143 } else if Isconst(n.Left, CTSTR) { 1144 n.SetBounded(bounded(r, int64(len(n.Left.Val().U.(string))))) 1145 if Debug['m'] != 0 && n.Bounded() && !Isconst(n.Right, CTINT) { 1146 Warn("index bounds check elided") 1147 } 1148 if smallintconst(n.Right) && !n.Bounded() { 1149 yyerror("index out of bounds") 1150 } 1151 } 1152 1153 if Isconst(n.Right, CTINT) { 1154 if n.Right.Val().U.(*Mpint).CmpInt64(0) < 0 || n.Right.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 { 1155 yyerror("index out of bounds") 1156 } 1157 } 1158 1159 case OINDEXMAP: 1160 // Replace m[k] with *map{access1,assign}(maptype, m, &k) 1161 n.Left = walkexpr(n.Left, init) 1162 n.Right = walkexpr(n.Right, init) 1163 map_ := n.Left 1164 key := n.Right 1165 t := map_.Type 1166 if n.Etype == 1 { 1167 // This m[k] expression is on the left-hand side of an assignment. 1168 fast := mapfast(t) 1169 if fast == mapslow { 1170 // standard version takes key by reference. 1171 // orderexpr made sure key is addressable. 1172 key = nod(OADDR, key, nil) 1173 } 1174 n = mkcall1(mapfn(mapassign[fast], t), nil, init, typename(t), map_, key) 1175 } else { 1176 // m[k] is not the target of an assignment. 1177 fast := mapfast(t) 1178 if fast == mapslow { 1179 // standard version takes key by reference. 1180 // orderexpr made sure key is addressable. 1181 key = nod(OADDR, key, nil) 1182 } 1183 1184 if w := t.Val().Width; w <= 1024 { // 1024 must match ../../../../runtime/hashmap.go:maxZero 1185 n = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Val()), init, typename(t), map_, key) 1186 } else { 1187 z := zeroaddr(w) 1188 n = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Val()), init, typename(t), map_, key, z) 1189 } 1190 } 1191 n.Type = types.NewPtr(t.Val()) 1192 n.SetNonNil(true) // mapaccess1* and mapassign always return non-nil pointers. 1193 n = nod(OIND, n, nil) 1194 n.Type = t.Val() 1195 n.SetTypecheck(1) 1196 1197 case ORECV: 1198 Fatalf("walkexpr ORECV") // should see inside OAS only 1199 1200 case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR: 1201 n.Left = walkexpr(n.Left, init) 1202 low, high, max := n.SliceBounds() 1203 low = walkexpr(low, init) 1204 if low != nil && iszero(low) { 1205 // Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k]. 1206 low = nil 1207 } 1208 high = walkexpr(high, init) 1209 max = walkexpr(max, init) 1210 n.SetSliceBounds(low, high, max) 1211 if n.Op.IsSlice3() { 1212 if max != nil && max.Op == OCAP && samesafeexpr(n.Left, max.Left) { 1213 // Reduce x[i:j:cap(x)] to x[i:j]. 1214 if n.Op == OSLICE3 { 1215 n.Op = OSLICE 1216 } else { 1217 n.Op = OSLICEARR 1218 } 1219 n = reduceSlice(n) 1220 } 1221 } else { 1222 n = reduceSlice(n) 1223 } 1224 1225 case ONEW: 1226 if n.Esc == EscNone { 1227 if n.Type.Elem().Width >= 1<<16 { 1228 Fatalf("large ONEW with EscNone: %v", n) 1229 } 1230 r := temp(n.Type.Elem()) 1231 r = nod(OAS, r, nil) // zero temp 1232 r = typecheck(r, Etop) 1233 init.Append(r) 1234 r = nod(OADDR, r.Left, nil) 1235 r = typecheck(r, Erv) 1236 n = r 1237 } else { 1238 n = callnew(n.Type.Elem()) 1239 } 1240 1241 case OCMPSTR: 1242 // s + "badgerbadgerbadger" == "badgerbadgerbadger" 1243 if (Op(n.Etype) == OEQ || Op(n.Etype) == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && n.Left.List.Len() == 2 && Isconst(n.Left.List.Second(), CTSTR) && strlit(n.Right) == strlit(n.Left.List.Second()) { 1244 // TODO(marvin): Fix Node.EType type union. 1245 r := nod(Op(n.Etype), nod(OLEN, n.Left.List.First(), nil), nodintconst(0)) 1246 r = typecheck(r, Erv) 1247 r = walkexpr(r, init) 1248 r.Type = n.Type 1249 n = r 1250 break 1251 } 1252 1253 // Rewrite comparisons to short constant strings as length+byte-wise comparisons. 1254 var cs, ncs *Node // const string, non-const string 1255 switch { 1256 case Isconst(n.Left, CTSTR) && Isconst(n.Right, CTSTR): 1257 // ignore; will be constant evaluated 1258 case Isconst(n.Left, CTSTR): 1259 cs = n.Left 1260 ncs = n.Right 1261 case Isconst(n.Right, CTSTR): 1262 cs = n.Right 1263 ncs = n.Left 1264 } 1265 if cs != nil { 1266 cmp := Op(n.Etype) 1267 // maxRewriteLen was chosen empirically. 1268 // It is the value that minimizes cmd/go file size 1269 // across most architectures. 1270 // See the commit description for CL 26758 for details. 1271 maxRewriteLen := 6 1272 // Some architectures can load unaligned byte sequence as 1 word. 1273 // So we can cover longer strings with the same amount of code. 1274 canCombineLoads := false 1275 combine64bit := false 1276 // TODO: does this improve performance on any other architectures? 1277 switch thearch.LinkArch.Family { 1278 case sys.AMD64: 1279 // Larger compare require longer instructions, so keep this reasonably low. 1280 // Data from CL 26758 shows that longer strings are rare. 1281 // If we really want we can do 16 byte SSE comparisons in the future. 1282 maxRewriteLen = 16 1283 canCombineLoads = true 1284 combine64bit = true 1285 case sys.I386: 1286 maxRewriteLen = 8 1287 canCombineLoads = true 1288 } 1289 var and Op 1290 switch cmp { 1291 case OEQ: 1292 and = OANDAND 1293 case ONE: 1294 and = OOROR 1295 default: 1296 // Don't do byte-wise comparisons for <, <=, etc. 1297 // They're fairly complicated. 1298 // Length-only checks are ok, though. 1299 maxRewriteLen = 0 1300 } 1301 if s := cs.Val().U.(string); len(s) <= maxRewriteLen { 1302 if len(s) > 0 { 1303 ncs = safeexpr(ncs, init) 1304 } 1305 // TODO(marvin): Fix Node.EType type union. 1306 r := nod(cmp, nod(OLEN, ncs, nil), nodintconst(int64(len(s)))) 1307 remains := len(s) 1308 for i := 0; remains > 0; { 1309 if remains == 1 || !canCombineLoads { 1310 cb := nodintconst(int64(s[i])) 1311 ncb := nod(OINDEX, ncs, nodintconst(int64(i))) 1312 r = nod(and, r, nod(cmp, ncb, cb)) 1313 remains-- 1314 i++ 1315 continue 1316 } 1317 var step int 1318 var convType *types.Type 1319 switch { 1320 case remains >= 8 && combine64bit: 1321 convType = types.Types[TINT64] 1322 step = 8 1323 case remains >= 4: 1324 convType = types.Types[TUINT32] 1325 step = 4 1326 case remains >= 2: 1327 convType = types.Types[TUINT16] 1328 step = 2 1329 } 1330 ncsubstr := nod(OINDEX, ncs, nodintconst(int64(i))) 1331 ncsubstr = conv(ncsubstr, convType) 1332 csubstr := int64(s[i]) 1333 // Calculate large constant from bytes as sequence of shifts and ors. 1334 // Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ... 1335 // ssa will combine this into a single large load. 1336 for offset := 1; offset < step; offset++ { 1337 b := nod(OINDEX, ncs, nodintconst(int64(i+offset))) 1338 b = conv(b, convType) 1339 b = nod(OLSH, b, nodintconst(int64(8*offset))) 1340 ncsubstr = nod(OOR, ncsubstr, b) 1341 csubstr = csubstr | int64(s[i+offset])<<uint8(8*offset) 1342 } 1343 csubstrPart := nodintconst(csubstr) 1344 // Compare "step" bytes as once 1345 r = nod(and, r, nod(cmp, csubstrPart, ncsubstr)) 1346 remains -= step 1347 i += step 1348 } 1349 r = typecheck(r, Erv) 1350 r = walkexpr(r, init) 1351 r.Type = n.Type 1352 n = r 1353 break 1354 } 1355 } 1356 1357 var r *Node 1358 // TODO(marvin): Fix Node.EType type union. 1359 if Op(n.Etype) == OEQ || Op(n.Etype) == ONE { 1360 // prepare for rewrite below 1361 n.Left = cheapexpr(n.Left, init) 1362 n.Right = cheapexpr(n.Right, init) 1363 1364 lstr := conv(n.Left, types.Types[TSTRING]) 1365 rstr := conv(n.Right, types.Types[TSTRING]) 1366 lptr := nod(OSPTR, lstr, nil) 1367 rptr := nod(OSPTR, rstr, nil) 1368 llen := conv(nod(OLEN, lstr, nil), types.Types[TUINTPTR]) 1369 rlen := conv(nod(OLEN, rstr, nil), types.Types[TUINTPTR]) 1370 1371 fn := syslook("memequal") 1372 fn = substArgTypes(fn, types.Types[TUINT8], types.Types[TUINT8]) 1373 r = mkcall1(fn, types.Types[TBOOL], init, lptr, rptr, llen) 1374 1375 // quick check of len before full compare for == or !=. 1376 // memequal then tests equality up to length len. 1377 // TODO(marvin): Fix Node.EType type union. 1378 if Op(n.Etype) == OEQ { 1379 // len(left) == len(right) && memequal(left, right, len) 1380 r = nod(OANDAND, nod(OEQ, llen, rlen), r) 1381 } else { 1382 // len(left) != len(right) || !memequal(left, right, len) 1383 r = nod(ONOT, r, nil) 1384 r = nod(OOROR, nod(ONE, llen, rlen), r) 1385 } 1386 1387 r = typecheck(r, Erv) 1388 r = walkexpr(r, nil) 1389 } else { 1390 // sys_cmpstring(s1, s2) :: 0 1391 r = mkcall("cmpstring", types.Types[TINT], init, conv(n.Left, types.Types[TSTRING]), conv(n.Right, types.Types[TSTRING])) 1392 // TODO(marvin): Fix Node.EType type union. 1393 r = nod(Op(n.Etype), r, nodintconst(0)) 1394 } 1395 1396 r = typecheck(r, Erv) 1397 if !n.Type.IsBoolean() { 1398 Fatalf("cmp %v", n.Type) 1399 } 1400 r.Type = n.Type 1401 n = r 1402 1403 case OADDSTR: 1404 n = addstr(n, init) 1405 1406 case OAPPEND: 1407 // order should make sure we only see OAS(node, OAPPEND), which we handle above. 1408 Fatalf("append outside assignment") 1409 1410 case OCOPY: 1411 n = copyany(n, init, instrumenting && !compiling_runtime) 1412 1413 // cannot use chanfn - closechan takes any, not chan any 1414 case OCLOSE: 1415 fn := syslook("closechan") 1416 1417 fn = substArgTypes(fn, n.Left.Type) 1418 n = mkcall1(fn, nil, init, n.Left) 1419 1420 case OMAKECHAN: 1421 // When size fits into int, use makechan instead of 1422 // makechan64, which is faster and shorter on 32 bit platforms. 1423 size := n.Left 1424 fnname := "makechan64" 1425 argtype := types.Types[TINT64] 1426 1427 // Type checking guarantees that TIDEAL size is positive and fits in an int. 1428 // The case of size overflow when converting TUINT or TUINTPTR to TINT 1429 // will be handled by the negative range checks in makechan during runtime. 1430 if size.Type.IsKind(TIDEAL) || maxintval[size.Type.Etype].Cmp(maxintval[TUINT]) <= 0 { 1431 fnname = "makechan" 1432 argtype = types.Types[TINT] 1433 } 1434 1435 n = mkcall1(chanfn(fnname, 1, n.Type), n.Type, init, typename(n.Type), conv(size, argtype)) 1436 1437 case OMAKEMAP: 1438 t := n.Type 1439 hmapType := hmap(t) 1440 hint := n.Left 1441 1442 // var h *hmap 1443 var h *Node 1444 if n.Esc == EscNone { 1445 // Allocate hmap on stack. 1446 1447 // var hv hmap 1448 hv := temp(hmapType) 1449 zero := nod(OAS, hv, nil) 1450 zero = typecheck(zero, Etop) 1451 init.Append(zero) 1452 // h = &hv 1453 h = nod(OADDR, hv, nil) 1454 1455 // Allocate one bucket pointed to by hmap.buckets on stack if hint 1456 // is not larger than BUCKETSIZE. In case hint is larger than 1457 // BUCKETSIZE runtime.makemap will allocate the buckets on the heap. 1458 // Maximum key and value size is 128 bytes, larger objects 1459 // are stored with an indirection. So max bucket size is 2048+eps. 1460 if !Isconst(hint, CTINT) || 1461 !(hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) > 0) { 1462 // var bv bmap 1463 bv := temp(bmap(t)) 1464 1465 zero = nod(OAS, bv, nil) 1466 zero = typecheck(zero, Etop) 1467 init.Append(zero) 1468 1469 // b = &bv 1470 b := nod(OADDR, bv, nil) 1471 1472 // h.buckets = b 1473 bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap 1474 na := nod(OAS, nodSym(ODOT, h, bsym), b) 1475 na = typecheck(na, Etop) 1476 init.Append(na) 1477 } 1478 } else { 1479 // h = nil 1480 h = nodnil() 1481 } 1482 1483 // When hint fits into int, use makemap instead of 1484 // makemap64, which is faster and shorter on 32 bit platforms. 1485 fnname := "makemap64" 1486 argtype := types.Types[TINT64] 1487 1488 // Type checking guarantees that TIDEAL hint is positive and fits in an int. 1489 // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function. 1490 // The case of hint overflow when converting TUINT or TUINTPTR to TINT 1491 // will be handled by the negative range checks in makemap during runtime. 1492 if hint.Type.IsKind(TIDEAL) || maxintval[hint.Type.Etype].Cmp(maxintval[TUINT]) <= 0 { 1493 fnname = "makemap" 1494 argtype = types.Types[TINT] 1495 } 1496 1497 fn := syslook(fnname) 1498 fn = substArgTypes(fn, hmapType, t.Key(), t.Val()) 1499 n = mkcall1(fn, n.Type, init, typename(n.Type), conv(hint, argtype), h) 1500 1501 case OMAKESLICE: 1502 l := n.Left 1503 r := n.Right 1504 if r == nil { 1505 r = safeexpr(l, init) 1506 l = r 1507 } 1508 t := n.Type 1509 if n.Esc == EscNone { 1510 if !isSmallMakeSlice(n) { 1511 Fatalf("non-small OMAKESLICE with EscNone: %v", n) 1512 } 1513 // var arr [r]T 1514 // n = arr[:l] 1515 t = types.NewArray(t.Elem(), nonnegintconst(r)) // [r]T 1516 var_ := temp(t) 1517 a := nod(OAS, var_, nil) // zero temp 1518 a = typecheck(a, Etop) 1519 init.Append(a) 1520 r := nod(OSLICE, var_, nil) // arr[:l] 1521 r.SetSliceBounds(nil, l, nil) 1522 r = conv(r, n.Type) // in case n.Type is named. 1523 r = typecheck(r, Erv) 1524 r = walkexpr(r, init) 1525 n = r 1526 } else { 1527 // n escapes; set up a call to makeslice. 1528 // When len and cap can fit into int, use makeslice instead of 1529 // makeslice64, which is faster and shorter on 32 bit platforms. 1530 1531 if t.Elem().NotInHeap() { 1532 yyerror("%v is go:notinheap; heap allocation disallowed", t.Elem()) 1533 } 1534 1535 len, cap := l, r 1536 1537 fnname := "makeslice64" 1538 argtype := types.Types[TINT64] 1539 1540 // Type checking guarantees that TIDEAL len/cap are positive and fit in an int. 1541 // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT 1542 // will be handled by the negative range checks in makeslice during runtime. 1543 if (len.Type.IsKind(TIDEAL) || maxintval[len.Type.Etype].Cmp(maxintval[TUINT]) <= 0) && 1544 (cap.Type.IsKind(TIDEAL) || maxintval[cap.Type.Etype].Cmp(maxintval[TUINT]) <= 0) { 1545 fnname = "makeslice" 1546 argtype = types.Types[TINT] 1547 } 1548 1549 fn := syslook(fnname) 1550 fn = substArgTypes(fn, t.Elem()) // any-1 1551 n = mkcall1(fn, t, init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype)) 1552 } 1553 1554 case ORUNESTR: 1555 a := nodnil() 1556 if n.Esc == EscNone { 1557 t := types.NewArray(types.Types[TUINT8], 4) 1558 var_ := temp(t) 1559 a = nod(OADDR, var_, nil) 1560 } 1561 1562 // intstring(*[4]byte, rune) 1563 n = mkcall("intstring", n.Type, init, a, conv(n.Left, types.Types[TINT64])) 1564 1565 case OARRAYBYTESTR: 1566 a := nodnil() 1567 if n.Esc == EscNone { 1568 // Create temporary buffer for string on stack. 1569 t := types.NewArray(types.Types[TUINT8], tmpstringbufsize) 1570 1571 a = nod(OADDR, temp(t), nil) 1572 } 1573 1574 // slicebytetostring(*[32]byte, []byte) string; 1575 n = mkcall("slicebytetostring", n.Type, init, a, n.Left) 1576 1577 // slicebytetostringtmp([]byte) string; 1578 case OARRAYBYTESTRTMP: 1579 n.Left = walkexpr(n.Left, init) 1580 1581 if !instrumenting { 1582 // Let the backend handle OARRAYBYTESTRTMP directly 1583 // to avoid a function call to slicebytetostringtmp. 1584 break 1585 } 1586 1587 n = mkcall("slicebytetostringtmp", n.Type, init, n.Left) 1588 1589 // slicerunetostring(*[32]byte, []rune) string; 1590 case OARRAYRUNESTR: 1591 a := nodnil() 1592 1593 if n.Esc == EscNone { 1594 // Create temporary buffer for string on stack. 1595 t := types.NewArray(types.Types[TUINT8], tmpstringbufsize) 1596 1597 a = nod(OADDR, temp(t), nil) 1598 } 1599 1600 n = mkcall("slicerunetostring", n.Type, init, a, n.Left) 1601 1602 // stringtoslicebyte(*32[byte], string) []byte; 1603 case OSTRARRAYBYTE: 1604 a := nodnil() 1605 1606 if n.Esc == EscNone { 1607 // Create temporary buffer for slice on stack. 1608 t := types.NewArray(types.Types[TUINT8], tmpstringbufsize) 1609 1610 a = nod(OADDR, temp(t), nil) 1611 } 1612 1613 n = mkcall("stringtoslicebyte", n.Type, init, a, conv(n.Left, types.Types[TSTRING])) 1614 1615 case OSTRARRAYBYTETMP: 1616 // []byte(string) conversion that creates a slice 1617 // referring to the actual string bytes. 1618 // This conversion is handled later by the backend and 1619 // is only for use by internal compiler optimizations 1620 // that know that the slice won't be mutated. 1621 // The only such case today is: 1622 // for i, c := range []byte(string) 1623 n.Left = walkexpr(n.Left, init) 1624 1625 // stringtoslicerune(*[32]rune, string) []rune 1626 case OSTRARRAYRUNE: 1627 a := nodnil() 1628 1629 if n.Esc == EscNone { 1630 // Create temporary buffer for slice on stack. 1631 t := types.NewArray(types.Types[TINT32], tmpstringbufsize) 1632 1633 a = nod(OADDR, temp(t), nil) 1634 } 1635 1636 n = mkcall("stringtoslicerune", n.Type, init, a, n.Left) 1637 1638 // ifaceeq(i1 any-1, i2 any-2) (ret bool); 1639 case OCMPIFACE: 1640 if !eqtype(n.Left.Type, n.Right.Type) { 1641 Fatalf("ifaceeq %v %v %v", n.Op, n.Left.Type, n.Right.Type) 1642 } 1643 var fn *Node 1644 if n.Left.Type.IsEmptyInterface() { 1645 fn = syslook("efaceeq") 1646 } else { 1647 fn = syslook("ifaceeq") 1648 } 1649 1650 n.Right = cheapexpr(n.Right, init) 1651 n.Left = cheapexpr(n.Left, init) 1652 lt := nod(OITAB, n.Left, nil) 1653 rt := nod(OITAB, n.Right, nil) 1654 ld := nod(OIDATA, n.Left, nil) 1655 rd := nod(OIDATA, n.Right, nil) 1656 ld.Type = types.Types[TUNSAFEPTR] 1657 rd.Type = types.Types[TUNSAFEPTR] 1658 ld.SetTypecheck(1) 1659 rd.SetTypecheck(1) 1660 call := mkcall1(fn, n.Type, init, lt, ld, rd) 1661 1662 // Check itable/type before full compare. 1663 // Note: short-circuited because order matters. 1664 // TODO(marvin): Fix Node.EType type union. 1665 var cmp *Node 1666 if Op(n.Etype) == OEQ { 1667 cmp = nod(OANDAND, nod(OEQ, lt, rt), call) 1668 } else { 1669 cmp = nod(OOROR, nod(ONE, lt, rt), nod(ONOT, call, nil)) 1670 } 1671 cmp = typecheck(cmp, Erv) 1672 cmp = walkexpr(cmp, init) 1673 cmp.Type = n.Type 1674 n = cmp 1675 1676 case OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT, OPTRLIT: 1677 if isStaticCompositeLiteral(n) && !canSSAType(n.Type) { 1678 // n can be directly represented in the read-only data section. 1679 // Make direct reference to the static data. See issue 12841. 1680 vstat := staticname(n.Type) 1681 vstat.Name.SetReadonly(true) 1682 fixedlit(inInitFunction, initKindStatic, n, vstat, init) 1683 n = vstat 1684 n = typecheck(n, Erv) 1685 break 1686 } 1687 var_ := temp(n.Type) 1688 anylit(n, var_, init) 1689 n = var_ 1690 1691 case OSEND: 1692 n1 := n.Right 1693 n1 = assignconv(n1, n.Left.Type.Elem(), "chan send") 1694 n1 = walkexpr(n1, init) 1695 n1 = nod(OADDR, n1, nil) 1696 n = mkcall1(chanfn("chansend1", 2, n.Left.Type), nil, init, n.Left, n1) 1697 1698 case OCLOSURE: 1699 n = walkclosure(n, init) 1700 1701 case OCALLPART: 1702 n = walkpartialcall(n, init) 1703 } 1704 1705 // Expressions that are constant at run time but not 1706 // considered const by the language spec are not turned into 1707 // constants until walk. For example, if n is y%1 == 0, the 1708 // walk of y%1 may have replaced it by 0. 1709 // Check whether n with its updated args is itself now a constant. 1710 t := n.Type 1711 evconst(n) 1712 if n.Type != t { 1713 Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type) 1714 } 1715 if n.Op == OLITERAL { 1716 n = typecheck(n, Erv) 1717 // Emit string symbol now to avoid emitting 1718 // any concurrently during the backend. 1719 if s, ok := n.Val().U.(string); ok { 1720 _ = stringsym(s) 1721 } 1722 } 1723 1724 updateHasCall(n) 1725 1726 if Debug['w'] != 0 && n != nil { 1727 Dump("walk", n) 1728 } 1729 1730 lineno = lno 1731 return n 1732 } 1733 1734 // TODO(josharian): combine this with its caller and simplify 1735 func reduceSlice(n *Node) *Node { 1736 low, high, max := n.SliceBounds() 1737 if high != nil && high.Op == OLEN && samesafeexpr(n.Left, high.Left) { 1738 // Reduce x[i:len(x)] to x[i:]. 1739 high = nil 1740 } 1741 n.SetSliceBounds(low, high, max) 1742 if (n.Op == OSLICE || n.Op == OSLICESTR) && low == nil && high == nil { 1743 // Reduce x[:] to x. 1744 if Debug_slice > 0 { 1745 Warn("slice: omit slice operation") 1746 } 1747 return n.Left 1748 } 1749 return n 1750 } 1751 1752 func ascompatee1(l *Node, r *Node, init *Nodes) *Node { 1753 // convas will turn map assigns into function calls, 1754 // making it impossible for reorder3 to work. 1755 n := nod(OAS, l, r) 1756 1757 if l.Op == OINDEXMAP { 1758 return n 1759 } 1760 1761 return convas(n, init) 1762 } 1763 1764 func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node { 1765 // check assign expression list to 1766 // an expression list. called in 1767 // expr-list = expr-list 1768 1769 // ensure order of evaluation for function calls 1770 for i := range nl { 1771 nl[i] = safeexpr(nl[i], init) 1772 } 1773 for i1 := range nr { 1774 nr[i1] = safeexpr(nr[i1], init) 1775 } 1776 1777 var nn []*Node 1778 i := 0 1779 for ; i < len(nl); i++ { 1780 if i >= len(nr) { 1781 break 1782 } 1783 // Do not generate 'x = x' during return. See issue 4014. 1784 if op == ORETURN && samesafeexpr(nl[i], nr[i]) { 1785 continue 1786 } 1787 nn = append(nn, ascompatee1(nl[i], nr[i], init)) 1788 } 1789 1790 // cannot happen: caller checked that lists had same length 1791 if i < len(nl) || i < len(nr) { 1792 var nln, nrn Nodes 1793 nln.Set(nl) 1794 nrn.Set(nr) 1795 Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), Curfn.funcname()) 1796 } 1797 return nn 1798 } 1799 1800 // l is an lv and rt is the type of an rv 1801 // return 1 if this implies a function call 1802 // evaluating the lv or a function call 1803 // in the conversion of the types 1804 func fncall(l *Node, rt *types.Type) bool { 1805 if l.HasCall() || l.Op == OINDEXMAP { 1806 return true 1807 } 1808 if needwritebarrier(l) { 1809 return true 1810 } 1811 if eqtype(l.Type, rt) { 1812 return false 1813 } 1814 return true 1815 } 1816 1817 // check assign type list to 1818 // an expression list. called in 1819 // expr-list = func() 1820 func ascompatet(nl Nodes, nr *types.Type) []*Node { 1821 if nl.Len() != nr.NumFields() { 1822 Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields()) 1823 } 1824 1825 var nn, mm Nodes 1826 for i, l := range nl.Slice() { 1827 if isblank(l) { 1828 continue 1829 } 1830 r := nr.Field(i) 1831 1832 // any lv that causes a fn call must be 1833 // deferred until all the return arguments 1834 // have been pulled from the output arguments 1835 if fncall(l, r.Type) { 1836 tmp := temp(r.Type) 1837 tmp = typecheck(tmp, Erv) 1838 a := nod(OAS, l, tmp) 1839 a = convas(a, &mm) 1840 mm.Append(a) 1841 l = tmp 1842 } 1843 1844 a := nod(OAS, l, nodarg(r, 0)) 1845 a = convas(a, &nn) 1846 updateHasCall(a) 1847 if a.HasCall() { 1848 Dump("ascompatet ucount", a) 1849 Fatalf("ascompatet: too many function calls evaluating parameters") 1850 } 1851 1852 nn.Append(a) 1853 } 1854 return append(nn.Slice(), mm.Slice()...) 1855 } 1856 1857 // nodarg returns a Node for the function argument denoted by t, 1858 // which is either the entire function argument or result struct (t is a struct *types.Type) 1859 // or a specific argument (t is a *types.Field within a struct *types.Type). 1860 // 1861 // If fp is 0, the node is for use by a caller invoking the given 1862 // function, preparing the arguments before the call 1863 // or retrieving the results after the call. 1864 // In this case, the node will correspond to an outgoing argument 1865 // slot like 8(SP). 1866 // 1867 // If fp is 1, the node is for use by the function itself 1868 // (the callee), to retrieve its arguments or write its results. 1869 // In this case the node will be an ONAME with an appropriate 1870 // type and offset. 1871 func nodarg(t interface{}, fp int) *Node { 1872 var n *Node 1873 1874 var funarg types.Funarg 1875 switch t := t.(type) { 1876 default: 1877 Fatalf("bad nodarg %T(%v)", t, t) 1878 1879 case *types.Type: 1880 // Entire argument struct, not just one arg 1881 if !t.IsFuncArgStruct() { 1882 Fatalf("nodarg: bad type %v", t) 1883 } 1884 funarg = t.StructType().Funarg 1885 1886 // Build fake variable name for whole arg struct. 1887 n = newname(lookup(".args")) 1888 n.Type = t 1889 first := t.Field(0) 1890 if first == nil { 1891 Fatalf("nodarg: bad struct") 1892 } 1893 if first.Offset == BADWIDTH { 1894 Fatalf("nodarg: offset not computed for %v", t) 1895 } 1896 n.Xoffset = first.Offset 1897 1898 case *types.Field: 1899 funarg = t.Funarg 1900 if fp == 1 { 1901 // NOTE(rsc): This should be using t.Nname directly, 1902 // except in the case where t.Nname.Sym is the blank symbol and 1903 // so the assignment would be discarded during code generation. 1904 // In that case we need to make a new node, and there is no harm 1905 // in optimization passes to doing so. But otherwise we should 1906 // definitely be using the actual declaration and not a newly built node. 1907 // The extra Fatalf checks here are verifying that this is the case, 1908 // without changing the actual logic (at time of writing, it's getting 1909 // toward time for the Go 1.7 beta). 1910 // At some quieter time (assuming we've never seen these Fatalfs happen) 1911 // we could change this code to use "expect" directly. 1912 expect := asNode(t.Nname) 1913 if expect.isParamHeapCopy() { 1914 expect = expect.Name.Param.Stackcopy 1915 } 1916 1917 for _, n := range Curfn.Func.Dcl { 1918 if (n.Class() == PPARAM || n.Class() == PPARAMOUT) && !t.Sym.IsBlank() && n.Sym == t.Sym { 1919 if n != expect { 1920 Fatalf("nodarg: unexpected node: %v (%p %v) vs %v (%p %v)", n, n, n.Op, asNode(t.Nname), asNode(t.Nname), asNode(t.Nname).Op) 1921 } 1922 return n 1923 } 1924 } 1925 1926 if !expect.Sym.IsBlank() { 1927 Fatalf("nodarg: did not find node in dcl list: %v", expect) 1928 } 1929 } 1930 1931 // Build fake name for individual variable. 1932 // This is safe because if there was a real declared name 1933 // we'd have used it above. 1934 n = newname(lookup("__")) 1935 n.Type = t.Type 1936 if t.Offset == BADWIDTH { 1937 Fatalf("nodarg: offset not computed for %v", t) 1938 } 1939 n.Xoffset = t.Offset 1940 n.Orig = asNode(t.Nname) 1941 } 1942 1943 // Rewrite argument named _ to __, 1944 // or else the assignment to _ will be 1945 // discarded during code generation. 1946 if isblank(n) { 1947 n.Sym = lookup("__") 1948 } 1949 1950 switch fp { 1951 default: 1952 Fatalf("bad fp") 1953 1954 case 0: // preparing arguments for call 1955 n.Op = OINDREGSP 1956 n.Xoffset += Ctxt.FixedFrameSize() 1957 1958 case 1: // reading arguments inside call 1959 n.SetClass(PPARAM) 1960 if funarg == types.FunargResults { 1961 n.SetClass(PPARAMOUT) 1962 } 1963 } 1964 1965 n.SetTypecheck(1) 1966 n.SetAddrtaken(true) // keep optimizers at bay 1967 return n 1968 } 1969 1970 // package all the arguments that match a ... T parameter into a []T. 1971 func mkdotargslice(typ *types.Type, args []*Node, init *Nodes, ddd *Node) *Node { 1972 esc := uint16(EscUnknown) 1973 if ddd != nil { 1974 esc = ddd.Esc 1975 } 1976 1977 if len(args) == 0 { 1978 n := nodnil() 1979 n.Type = typ 1980 return n 1981 } 1982 1983 n := nod(OCOMPLIT, nil, typenod(typ)) 1984 if ddd != nil && prealloc[ddd] != nil { 1985 prealloc[n] = prealloc[ddd] // temporary to use 1986 } 1987 n.List.Set(args) 1988 n.Esc = esc 1989 n = typecheck(n, Erv) 1990 if n.Type == nil { 1991 Fatalf("mkdotargslice: typecheck failed") 1992 } 1993 n = walkexpr(n, init) 1994 return n 1995 } 1996 1997 // check assign expression list to 1998 // a type list. called in 1999 // return expr-list 2000 // func(expr-list) 2001 func ascompatte(call *Node, isddd bool, lhs *types.Type, rhs []*Node, fp int, init *Nodes) []*Node { 2002 // f(g()) where g has multiple return values 2003 if len(rhs) == 1 && rhs[0].Type.IsFuncArgStruct() { 2004 // optimization - can do block copy 2005 if eqtypenoname(rhs[0].Type, lhs) { 2006 nl := nodarg(lhs, fp) 2007 nr := nod(OCONVNOP, rhs[0], nil) 2008 nr.Type = nl.Type 2009 n := convas(nod(OAS, nl, nr), init) 2010 n.SetTypecheck(1) 2011 return []*Node{n} 2012 } 2013 2014 // conversions involved. 2015 // copy into temporaries. 2016 var tmps []*Node 2017 for _, nr := range rhs[0].Type.FieldSlice() { 2018 tmps = append(tmps, temp(nr.Type)) 2019 } 2020 2021 a := nod(OAS2, nil, nil) 2022 a.List.Set(tmps) 2023 a.Rlist.Set(rhs) 2024 a = typecheck(a, Etop) 2025 a = walkstmt(a) 2026 init.Append(a) 2027 2028 rhs = tmps 2029 } 2030 2031 // For each parameter (LHS), assign its corresponding argument (RHS). 2032 // If there's a ... parameter (which is only valid as the final 2033 // parameter) and this is not a ... call expression, 2034 // then assign the remaining arguments as a slice. 2035 var nn []*Node 2036 for i, nl := range lhs.FieldSlice() { 2037 var nr *Node 2038 if nl.Isddd() && !isddd { 2039 nr = mkdotargslice(nl.Type, rhs[i:], init, call.Right) 2040 } else { 2041 nr = rhs[i] 2042 } 2043 2044 a := nod(OAS, nodarg(nl, fp), nr) 2045 a = convas(a, init) 2046 a.SetTypecheck(1) 2047 nn = append(nn, a) 2048 } 2049 2050 return nn 2051 } 2052 2053 // generate code for print 2054 func walkprint(nn *Node, init *Nodes) *Node { 2055 // Hoist all the argument evaluation up before the lock. 2056 walkexprlistcheap(nn.List.Slice(), init) 2057 2058 // For println, add " " between elements and "\n" at the end. 2059 if nn.Op == OPRINTN { 2060 s := nn.List.Slice() 2061 t := make([]*Node, 0, len(s)*2) 2062 for i, n := range s { 2063 if i != 0 { 2064 t = append(t, nodstr(" ")) 2065 } 2066 t = append(t, n) 2067 } 2068 t = append(t, nodstr("\n")) 2069 nn.List.Set(t) 2070 } 2071 2072 // Collapse runs of constant strings. 2073 s := nn.List.Slice() 2074 t := make([]*Node, 0, len(s)) 2075 for i := 0; i < len(s); { 2076 var strs []string 2077 for i < len(s) && Isconst(s[i], CTSTR) { 2078 strs = append(strs, s[i].Val().U.(string)) 2079 i++ 2080 } 2081 if len(strs) > 0 { 2082 t = append(t, nodstr(strings.Join(strs, ""))) 2083 } 2084 if i < len(s) { 2085 t = append(t, s[i]) 2086 i++ 2087 } 2088 } 2089 nn.List.Set(t) 2090 2091 calls := []*Node{mkcall("printlock", nil, init)} 2092 for i, n := range nn.List.Slice() { 2093 if n.Op == OLITERAL { 2094 switch n.Val().Ctype() { 2095 case CTRUNE: 2096 n = defaultlit(n, types.Runetype) 2097 2098 case CTINT: 2099 n = defaultlit(n, types.Types[TINT64]) 2100 2101 case CTFLT: 2102 n = defaultlit(n, types.Types[TFLOAT64]) 2103 } 2104 } 2105 2106 if n.Op != OLITERAL && n.Type != nil && n.Type.Etype == TIDEAL { 2107 n = defaultlit(n, types.Types[TINT64]) 2108 } 2109 n = defaultlit(n, nil) 2110 nn.List.SetIndex(i, n) 2111 if n.Type == nil || n.Type.Etype == TFORW { 2112 continue 2113 } 2114 2115 var on *Node 2116 switch n.Type.Etype { 2117 case TINTER: 2118 if n.Type.IsEmptyInterface() { 2119 on = syslook("printeface") 2120 } else { 2121 on = syslook("printiface") 2122 } 2123 on = substArgTypes(on, n.Type) // any-1 2124 case TPTR32, TPTR64, TCHAN, TMAP, TFUNC, TUNSAFEPTR: 2125 on = syslook("printpointer") 2126 on = substArgTypes(on, n.Type) // any-1 2127 case TSLICE: 2128 on = syslook("printslice") 2129 on = substArgTypes(on, n.Type) // any-1 2130 case TUINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINTPTR: 2131 if isRuntimePkg(n.Type.Sym.Pkg) && n.Type.Sym.Name == "hex" { 2132 on = syslook("printhex") 2133 } else { 2134 on = syslook("printuint") 2135 } 2136 case TINT, TINT8, TINT16, TINT32, TINT64: 2137 on = syslook("printint") 2138 case TFLOAT32, TFLOAT64: 2139 on = syslook("printfloat") 2140 case TCOMPLEX64, TCOMPLEX128: 2141 on = syslook("printcomplex") 2142 case TBOOL: 2143 on = syslook("printbool") 2144 case TSTRING: 2145 cs := "" 2146 if Isconst(n, CTSTR) { 2147 cs = n.Val().U.(string) 2148 } 2149 switch cs { 2150 case " ": 2151 on = syslook("printsp") 2152 case "\n": 2153 on = syslook("printnl") 2154 default: 2155 on = syslook("printstring") 2156 } 2157 default: 2158 badtype(OPRINT, n.Type, nil) 2159 continue 2160 } 2161 2162 r := nod(OCALL, on, nil) 2163 if params := on.Type.Params().FieldSlice(); len(params) > 0 { 2164 t := params[0].Type 2165 if !eqtype(t, n.Type) { 2166 n = nod(OCONV, n, nil) 2167 n.Type = t 2168 } 2169 r.List.Append(n) 2170 } 2171 calls = append(calls, r) 2172 } 2173 2174 calls = append(calls, mkcall("printunlock", nil, init)) 2175 2176 typecheckslice(calls, Etop) 2177 walkexprlist(calls, init) 2178 2179 r := nod(OEMPTY, nil, nil) 2180 r = typecheck(r, Etop) 2181 r = walkexpr(r, init) 2182 r.Ninit.Set(calls) 2183 return r 2184 } 2185 2186 func callnew(t *types.Type) *Node { 2187 if t.NotInHeap() { 2188 yyerror("%v is go:notinheap; heap allocation disallowed", t) 2189 } 2190 dowidth(t) 2191 fn := syslook("newobject") 2192 fn = substArgTypes(fn, t) 2193 v := mkcall1(fn, types.NewPtr(t), nil, typename(t)) 2194 v.SetNonNil(true) 2195 return v 2196 } 2197 2198 func iscallret(n *Node) bool { 2199 n = outervalue(n) 2200 return n.Op == OINDREGSP 2201 } 2202 2203 func isstack(n *Node) bool { 2204 n = outervalue(n) 2205 2206 // If n is *autotmp and autotmp = &foo, replace n with foo. 2207 // We introduce such temps when initializing struct literals. 2208 if n.Op == OIND && n.Left.Op == ONAME && n.Left.IsAutoTmp() { 2209 defn := n.Left.Name.Defn 2210 if defn != nil && defn.Op == OAS && defn.Right.Op == OADDR { 2211 n = defn.Right.Left 2212 } 2213 } 2214 2215 switch n.Op { 2216 case OINDREGSP: 2217 return true 2218 2219 case ONAME: 2220 switch n.Class() { 2221 case PAUTO, PPARAM, PPARAMOUT: 2222 return true 2223 } 2224 } 2225 2226 return false 2227 } 2228 2229 // isReflectHeaderDataField reports whether l is an expression p.Data 2230 // where p has type reflect.SliceHeader or reflect.StringHeader. 2231 func isReflectHeaderDataField(l *Node) bool { 2232 if l.Type != types.Types[TUINTPTR] { 2233 return false 2234 } 2235 2236 var tsym *types.Sym 2237 switch l.Op { 2238 case ODOT: 2239 tsym = l.Left.Type.Sym 2240 case ODOTPTR: 2241 tsym = l.Left.Type.Elem().Sym 2242 default: 2243 return false 2244 } 2245 2246 if tsym == nil || l.Sym.Name != "Data" || tsym.Pkg.Path != "reflect" { 2247 return false 2248 } 2249 return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader" 2250 } 2251 2252 // Do we need a write barrier for assigning to l? 2253 func needwritebarrier(l *Node) bool { 2254 if !use_writebarrier { 2255 return false 2256 } 2257 2258 if l == nil || isblank(l) { 2259 return false 2260 } 2261 2262 // No write barrier for write to stack. 2263 if isstack(l) { 2264 return false 2265 } 2266 2267 // Package unsafe's documentation says storing pointers into 2268 // reflect.SliceHeader and reflect.StringHeader's Data fields 2269 // is valid, even though they have type uintptr (#19168). 2270 if isReflectHeaderDataField(l) { 2271 return true 2272 } 2273 2274 // No write barrier for write of non-pointers. 2275 dowidth(l.Type) 2276 if !types.Haspointers(l.Type) { 2277 return false 2278 } 2279 2280 // No write barrier if this is a pointer to a go:notinheap 2281 // type, since the write barrier's inheap(ptr) check will fail. 2282 if l.Type.IsPtr() && l.Type.Elem().NotInHeap() { 2283 return false 2284 } 2285 2286 // TODO: We can eliminate write barriers if we know *both* the 2287 // current and new content of the slot must already be shaded. 2288 // We know a pointer is shaded if it's nil, or points to 2289 // static data, a global (variable or function), or the stack. 2290 // The nil optimization could be particularly useful for 2291 // writes to just-allocated objects. Unfortunately, knowing 2292 // the "current" value of the slot requires flow analysis. 2293 2294 // Otherwise, be conservative and use write barrier. 2295 return true 2296 } 2297 2298 func convas(n *Node, init *Nodes) *Node { 2299 if n.Op != OAS { 2300 Fatalf("convas: not OAS %v", n.Op) 2301 } 2302 2303 n.SetTypecheck(1) 2304 2305 var lt *types.Type 2306 var rt *types.Type 2307 if n.Left == nil || n.Right == nil { 2308 goto out 2309 } 2310 2311 lt = n.Left.Type 2312 rt = n.Right.Type 2313 if lt == nil || rt == nil { 2314 goto out 2315 } 2316 2317 if isblank(n.Left) { 2318 n.Right = defaultlit(n.Right, nil) 2319 goto out 2320 } 2321 2322 if !eqtype(lt, rt) { 2323 n.Right = assignconv(n.Right, lt, "assignment") 2324 n.Right = walkexpr(n.Right, init) 2325 } 2326 dowidth(n.Right.Type) 2327 2328 out: 2329 updateHasCall(n) 2330 return n 2331 } 2332 2333 // from ascompat[te] 2334 // evaluating actual function arguments. 2335 // f(a,b) 2336 // if there is exactly one function expr, 2337 // then it is done first. otherwise must 2338 // make temp variables 2339 func reorder1(all []*Node) []*Node { 2340 c := 0 // function calls 2341 t := 0 // total parameters 2342 2343 for _, n := range all { 2344 t++ 2345 updateHasCall(n) 2346 if n.HasCall() { 2347 c++ 2348 } 2349 } 2350 2351 if c == 0 || t == 1 { 2352 return all 2353 } 2354 2355 var g []*Node // fncalls assigned to tempnames 2356 var f *Node // last fncall assigned to stack 2357 var r []*Node // non fncalls and tempnames assigned to stack 2358 d := 0 2359 for _, n := range all { 2360 if !n.HasCall() { 2361 r = append(r, n) 2362 continue 2363 } 2364 2365 d++ 2366 if d == c { 2367 f = n 2368 continue 2369 } 2370 2371 // make assignment of fncall to tempname 2372 a := temp(n.Right.Type) 2373 2374 a = nod(OAS, a, n.Right) 2375 g = append(g, a) 2376 2377 // put normal arg assignment on list 2378 // with fncall replaced by tempname 2379 n.Right = a.Left 2380 2381 r = append(r, n) 2382 } 2383 2384 if f != nil { 2385 g = append(g, f) 2386 } 2387 return append(g, r...) 2388 } 2389 2390 // from ascompat[ee] 2391 // a,b = c,d 2392 // simultaneous assignment. there cannot 2393 // be later use of an earlier lvalue. 2394 // 2395 // function calls have been removed. 2396 func reorder3(all []*Node) []*Node { 2397 // If a needed expression may be affected by an 2398 // earlier assignment, make an early copy of that 2399 // expression and use the copy instead. 2400 var early []*Node 2401 2402 var mapinit Nodes 2403 for i, n := range all { 2404 l := n.Left 2405 2406 // Save subexpressions needed on left side. 2407 // Drill through non-dereferences. 2408 for { 2409 if l.Op == ODOT || l.Op == OPAREN { 2410 l = l.Left 2411 continue 2412 } 2413 2414 if l.Op == OINDEX && l.Left.Type.IsArray() { 2415 l.Right = reorder3save(l.Right, all, i, &early) 2416 l = l.Left 2417 continue 2418 } 2419 2420 break 2421 } 2422 2423 switch l.Op { 2424 default: 2425 Fatalf("reorder3 unexpected lvalue %#v", l.Op) 2426 2427 case ONAME: 2428 break 2429 2430 case OINDEX, OINDEXMAP: 2431 l.Left = reorder3save(l.Left, all, i, &early) 2432 l.Right = reorder3save(l.Right, all, i, &early) 2433 if l.Op == OINDEXMAP { 2434 all[i] = convas(all[i], &mapinit) 2435 } 2436 2437 case OIND, ODOTPTR: 2438 l.Left = reorder3save(l.Left, all, i, &early) 2439 } 2440 2441 // Save expression on right side. 2442 all[i].Right = reorder3save(all[i].Right, all, i, &early) 2443 } 2444 2445 early = append(mapinit.Slice(), early...) 2446 return append(early, all...) 2447 } 2448 2449 // if the evaluation of *np would be affected by the 2450 // assignments in all up to but not including the ith assignment, 2451 // copy into a temporary during *early and 2452 // replace *np with that temp. 2453 // The result of reorder3save MUST be assigned back to n, e.g. 2454 // n.Left = reorder3save(n.Left, all, i, early) 2455 func reorder3save(n *Node, all []*Node, i int, early *[]*Node) *Node { 2456 if !aliased(n, all, i) { 2457 return n 2458 } 2459 2460 q := temp(n.Type) 2461 q = nod(OAS, q, n) 2462 q = typecheck(q, Etop) 2463 *early = append(*early, q) 2464 return q.Left 2465 } 2466 2467 // what's the outer value that a write to n affects? 2468 // outer value means containing struct or array. 2469 func outervalue(n *Node) *Node { 2470 for { 2471 if n.Op == OXDOT { 2472 Fatalf("OXDOT in walk") 2473 } 2474 if n.Op == ODOT || n.Op == OPAREN || n.Op == OCONVNOP { 2475 n = n.Left 2476 continue 2477 } 2478 2479 if n.Op == OINDEX && n.Left.Type != nil && n.Left.Type.IsArray() { 2480 n = n.Left 2481 continue 2482 } 2483 2484 break 2485 } 2486 2487 return n 2488 } 2489 2490 // Is it possible that the computation of n might be 2491 // affected by writes in as up to but not including the ith element? 2492 func aliased(n *Node, all []*Node, i int) bool { 2493 if n == nil { 2494 return false 2495 } 2496 2497 // Treat all fields of a struct as referring to the whole struct. 2498 // We could do better but we would have to keep track of the fields. 2499 for n.Op == ODOT { 2500 n = n.Left 2501 } 2502 2503 // Look for obvious aliasing: a variable being assigned 2504 // during the all list and appearing in n. 2505 // Also record whether there are any writes to main memory. 2506 // Also record whether there are any writes to variables 2507 // whose addresses have been taken. 2508 memwrite := 0 2509 2510 varwrite := 0 2511 for _, an := range all[:i] { 2512 a := outervalue(an.Left) 2513 2514 for a.Op == ODOT { 2515 a = a.Left 2516 } 2517 2518 if a.Op != ONAME { 2519 memwrite = 1 2520 continue 2521 } 2522 2523 switch n.Class() { 2524 default: 2525 varwrite = 1 2526 continue 2527 2528 case PAUTO, PPARAM, PPARAMOUT: 2529 if n.Addrtaken() { 2530 varwrite = 1 2531 continue 2532 } 2533 2534 if vmatch2(a, n) { 2535 // Direct hit. 2536 return true 2537 } 2538 } 2539 } 2540 2541 // The variables being written do not appear in n. 2542 // However, n might refer to computed addresses 2543 // that are being written. 2544 2545 // If no computed addresses are affected by the writes, no aliasing. 2546 if memwrite == 0 && varwrite == 0 { 2547 return false 2548 } 2549 2550 // If n does not refer to computed addresses 2551 // (that is, if n only refers to variables whose addresses 2552 // have not been taken), no aliasing. 2553 if varexpr(n) { 2554 return false 2555 } 2556 2557 // Otherwise, both the writes and n refer to computed memory addresses. 2558 // Assume that they might conflict. 2559 return true 2560 } 2561 2562 // does the evaluation of n only refer to variables 2563 // whose addresses have not been taken? 2564 // (and no other memory) 2565 func varexpr(n *Node) bool { 2566 if n == nil { 2567 return true 2568 } 2569 2570 switch n.Op { 2571 case OLITERAL: 2572 return true 2573 2574 case ONAME: 2575 switch n.Class() { 2576 case PAUTO, PPARAM, PPARAMOUT: 2577 if !n.Addrtaken() { 2578 return true 2579 } 2580 } 2581 2582 return false 2583 2584 case OADD, 2585 OSUB, 2586 OOR, 2587 OXOR, 2588 OMUL, 2589 ODIV, 2590 OMOD, 2591 OLSH, 2592 ORSH, 2593 OAND, 2594 OANDNOT, 2595 OPLUS, 2596 OMINUS, 2597 OCOM, 2598 OPAREN, 2599 OANDAND, 2600 OOROR, 2601 OCONV, 2602 OCONVNOP, 2603 OCONVIFACE, 2604 ODOTTYPE: 2605 return varexpr(n.Left) && varexpr(n.Right) 2606 2607 case ODOT: // but not ODOTPTR 2608 // Should have been handled in aliased. 2609 Fatalf("varexpr unexpected ODOT") 2610 } 2611 2612 // Be conservative. 2613 return false 2614 } 2615 2616 // is the name l mentioned in r? 2617 func vmatch2(l *Node, r *Node) bool { 2618 if r == nil { 2619 return false 2620 } 2621 switch r.Op { 2622 // match each right given left 2623 case ONAME: 2624 return l == r 2625 2626 case OLITERAL: 2627 return false 2628 } 2629 2630 if vmatch2(l, r.Left) { 2631 return true 2632 } 2633 if vmatch2(l, r.Right) { 2634 return true 2635 } 2636 for _, n := range r.List.Slice() { 2637 if vmatch2(l, n) { 2638 return true 2639 } 2640 } 2641 return false 2642 } 2643 2644 // is any name mentioned in l also mentioned in r? 2645 // called by sinit.go 2646 func vmatch1(l *Node, r *Node) bool { 2647 // isolate all left sides 2648 if l == nil || r == nil { 2649 return false 2650 } 2651 switch l.Op { 2652 case ONAME: 2653 switch l.Class() { 2654 case PPARAM, PAUTO: 2655 break 2656 2657 default: 2658 // assignment to non-stack variable must be 2659 // delayed if right has function calls. 2660 if r.HasCall() { 2661 return true 2662 } 2663 } 2664 2665 return vmatch2(l, r) 2666 2667 case OLITERAL: 2668 return false 2669 } 2670 2671 if vmatch1(l.Left, r) { 2672 return true 2673 } 2674 if vmatch1(l.Right, r) { 2675 return true 2676 } 2677 for _, n := range l.List.Slice() { 2678 if vmatch1(n, r) { 2679 return true 2680 } 2681 } 2682 return false 2683 } 2684 2685 // paramstoheap returns code to allocate memory for heap-escaped parameters 2686 // and to copy non-result parameters' values from the stack. 2687 func paramstoheap(params *types.Type) []*Node { 2688 var nn []*Node 2689 for _, t := range params.Fields().Slice() { 2690 v := asNode(t.Nname) 2691 if v != nil && v.Sym != nil && strings.HasPrefix(v.Sym.Name, "~r") { // unnamed result 2692 v = nil 2693 } 2694 if v == nil { 2695 continue 2696 } 2697 2698 if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil { 2699 nn = append(nn, walkstmt(nod(ODCL, v, nil))) 2700 if stackcopy.Class() == PPARAM { 2701 nn = append(nn, walkstmt(typecheck(nod(OAS, v, stackcopy), Etop))) 2702 } 2703 } 2704 } 2705 2706 return nn 2707 } 2708 2709 // zeroResults zeros the return values at the start of the function. 2710 // We need to do this very early in the function. Defer might stop a 2711 // panic and show the return values as they exist at the time of 2712 // panic. For precise stacks, the garbage collector assumes results 2713 // are always live, so we need to zero them before any allocations, 2714 // even allocations to move params/results to the heap. 2715 // The generated code is added to Curfn's Enter list. 2716 func zeroResults() { 2717 lno := lineno 2718 lineno = Curfn.Pos 2719 for _, f := range Curfn.Type.Results().Fields().Slice() { 2720 if v := asNode(f.Nname); v != nil && v.Name.Param.Heapaddr != nil { 2721 // The local which points to the return value is the 2722 // thing that needs zeroing. This is already handled 2723 // by a Needzero annotation in plive.go:livenessepilogue. 2724 continue 2725 } 2726 // Zero the stack location containing f. 2727 Curfn.Func.Enter.Append(nod(OAS, nodarg(f, 1), nil)) 2728 } 2729 lineno = lno 2730 } 2731 2732 // returnsfromheap returns code to copy values for heap-escaped parameters 2733 // back to the stack. 2734 func returnsfromheap(params *types.Type) []*Node { 2735 var nn []*Node 2736 for _, t := range params.Fields().Slice() { 2737 v := asNode(t.Nname) 2738 if v == nil { 2739 continue 2740 } 2741 if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil && stackcopy.Class() == PPARAMOUT { 2742 nn = append(nn, walkstmt(typecheck(nod(OAS, stackcopy, v), Etop))) 2743 } 2744 } 2745 2746 return nn 2747 } 2748 2749 // heapmoves generates code to handle migrating heap-escaped parameters 2750 // between the stack and the heap. The generated code is added to Curfn's 2751 // Enter and Exit lists. 2752 func heapmoves() { 2753 lno := lineno 2754 lineno = Curfn.Pos 2755 nn := paramstoheap(Curfn.Type.Recvs()) 2756 nn = append(nn, paramstoheap(Curfn.Type.Params())...) 2757 nn = append(nn, paramstoheap(Curfn.Type.Results())...) 2758 Curfn.Func.Enter.Append(nn...) 2759 lineno = Curfn.Func.Endlineno 2760 Curfn.Func.Exit.Append(returnsfromheap(Curfn.Type.Results())...) 2761 lineno = lno 2762 } 2763 2764 func vmkcall(fn *Node, t *types.Type, init *Nodes, va []*Node) *Node { 2765 if fn.Type == nil || fn.Type.Etype != TFUNC { 2766 Fatalf("mkcall %v %v", fn, fn.Type) 2767 } 2768 2769 n := fn.Type.NumParams() 2770 if n != len(va) { 2771 Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va)) 2772 } 2773 2774 r := nod(OCALL, fn, nil) 2775 r.List.Set(va) 2776 if fn.Type.NumResults() > 0 { 2777 r = typecheck(r, Erv|Efnstruct) 2778 } else { 2779 r = typecheck(r, Etop) 2780 } 2781 r = walkexpr(r, init) 2782 r.Type = t 2783 return r 2784 } 2785 2786 func mkcall(name string, t *types.Type, init *Nodes, args ...*Node) *Node { 2787 return vmkcall(syslook(name), t, init, args) 2788 } 2789 2790 func mkcall1(fn *Node, t *types.Type, init *Nodes, args ...*Node) *Node { 2791 return vmkcall(fn, t, init, args) 2792 } 2793 2794 func conv(n *Node, t *types.Type) *Node { 2795 if eqtype(n.Type, t) { 2796 return n 2797 } 2798 n = nod(OCONV, n, nil) 2799 n.Type = t 2800 n = typecheck(n, Erv) 2801 return n 2802 } 2803 2804 // byteindex converts n, which is byte-sized, to a uint8. 2805 // We cannot use conv, because we allow converting bool to uint8 here, 2806 // which is forbidden in user code. 2807 func byteindex(n *Node) *Node { 2808 if eqtype(n.Type, types.Types[TUINT8]) { 2809 return n 2810 } 2811 n = nod(OCONV, n, nil) 2812 n.Type = types.Types[TUINT8] 2813 n.SetTypecheck(1) 2814 return n 2815 } 2816 2817 func chanfn(name string, n int, t *types.Type) *Node { 2818 if !t.IsChan() { 2819 Fatalf("chanfn %v", t) 2820 } 2821 fn := syslook(name) 2822 switch n { 2823 default: 2824 Fatalf("chanfn %d", n) 2825 case 1: 2826 fn = substArgTypes(fn, t.Elem()) 2827 case 2: 2828 fn = substArgTypes(fn, t.Elem(), t.Elem()) 2829 } 2830 return fn 2831 } 2832 2833 func mapfn(name string, t *types.Type) *Node { 2834 if !t.IsMap() { 2835 Fatalf("mapfn %v", t) 2836 } 2837 fn := syslook(name) 2838 fn = substArgTypes(fn, t.Key(), t.Val(), t.Key(), t.Val()) 2839 return fn 2840 } 2841 2842 func mapfndel(name string, t *types.Type) *Node { 2843 if !t.IsMap() { 2844 Fatalf("mapfn %v", t) 2845 } 2846 fn := syslook(name) 2847 fn = substArgTypes(fn, t.Key(), t.Val(), t.Key()) 2848 return fn 2849 } 2850 2851 const ( 2852 mapslow = iota 2853 mapfast32 2854 mapfast64 2855 mapfaststr 2856 nmapfast 2857 ) 2858 2859 type mapnames [nmapfast]string 2860 2861 func mkmapnames(base string) mapnames { 2862 return mapnames{base, base + "_fast32", base + "_fast64", base + "_faststr"} 2863 } 2864 2865 var mapaccess1 = mkmapnames("mapaccess1") 2866 var mapaccess2 = mkmapnames("mapaccess2") 2867 var mapassign = mkmapnames("mapassign") 2868 var mapdelete = mkmapnames("mapdelete") 2869 2870 func mapfast(t *types.Type) int { 2871 // Check ../../runtime/hashmap.go:maxValueSize before changing. 2872 if t.Val().Width > 128 { 2873 return mapslow 2874 } 2875 switch algtype(t.Key()) { 2876 case AMEM32: 2877 return mapfast32 2878 case AMEM64: 2879 return mapfast64 2880 case ASTRING: 2881 return mapfaststr 2882 } 2883 return mapslow 2884 } 2885 2886 func writebarrierfn(name string, l *types.Type, r *types.Type) *Node { 2887 fn := syslook(name) 2888 fn = substArgTypes(fn, l, r) 2889 return fn 2890 } 2891 2892 func addstr(n *Node, init *Nodes) *Node { 2893 // orderexpr rewrote OADDSTR to have a list of strings. 2894 c := n.List.Len() 2895 2896 if c < 2 { 2897 Fatalf("addstr count %d too small", c) 2898 } 2899 2900 buf := nodnil() 2901 if n.Esc == EscNone { 2902 sz := int64(0) 2903 for _, n1 := range n.List.Slice() { 2904 if n1.Op == OLITERAL { 2905 sz += int64(len(n1.Val().U.(string))) 2906 } 2907 } 2908 2909 // Don't allocate the buffer if the result won't fit. 2910 if sz < tmpstringbufsize { 2911 // Create temporary buffer for result string on stack. 2912 t := types.NewArray(types.Types[TUINT8], tmpstringbufsize) 2913 2914 buf = nod(OADDR, temp(t), nil) 2915 } 2916 } 2917 2918 // build list of string arguments 2919 args := []*Node{buf} 2920 for _, n2 := range n.List.Slice() { 2921 args = append(args, conv(n2, types.Types[TSTRING])) 2922 } 2923 2924 var fn string 2925 if c <= 5 { 2926 // small numbers of strings use direct runtime helpers. 2927 // note: orderexpr knows this cutoff too. 2928 fn = fmt.Sprintf("concatstring%d", c) 2929 } else { 2930 // large numbers of strings are passed to the runtime as a slice. 2931 fn = "concatstrings" 2932 2933 t := types.NewSlice(types.Types[TSTRING]) 2934 slice := nod(OCOMPLIT, nil, typenod(t)) 2935 if prealloc[n] != nil { 2936 prealloc[slice] = prealloc[n] 2937 } 2938 slice.List.Set(args[1:]) // skip buf arg 2939 args = []*Node{buf, slice} 2940 slice.Esc = EscNone 2941 } 2942 2943 cat := syslook(fn) 2944 r := nod(OCALL, cat, nil) 2945 r.List.Set(args) 2946 r = typecheck(r, Erv) 2947 r = walkexpr(r, init) 2948 r.Type = n.Type 2949 2950 return r 2951 } 2952 2953 // expand append(l1, l2...) to 2954 // init { 2955 // s := l1 2956 // n := len(s) + len(l2) 2957 // // Compare as uint so growslice can panic on overflow. 2958 // if uint(n) > uint(cap(s)) { 2959 // s = growslice(s, n) 2960 // } 2961 // s = s[:n] 2962 // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) 2963 // } 2964 // s 2965 // 2966 // l2 is allowed to be a string. 2967 func appendslice(n *Node, init *Nodes) *Node { 2968 walkexprlistsafe(n.List.Slice(), init) 2969 2970 // walkexprlistsafe will leave OINDEX (s[n]) alone if both s 2971 // and n are name or literal, but those may index the slice we're 2972 // modifying here. Fix explicitly. 2973 ls := n.List.Slice() 2974 for i1, n1 := range ls { 2975 ls[i1] = cheapexpr(n1, init) 2976 } 2977 2978 l1 := n.List.First() 2979 l2 := n.List.Second() 2980 2981 var l []*Node 2982 2983 // var s []T 2984 s := temp(l1.Type) 2985 l = append(l, nod(OAS, s, l1)) // s = l1 2986 2987 // n := len(s) + len(l2) 2988 nn := temp(types.Types[TINT]) 2989 l = append(l, nod(OAS, nn, nod(OADD, nod(OLEN, s, nil), nod(OLEN, l2, nil)))) 2990 2991 // if uint(n) > uint(cap(s)) 2992 nif := nod(OIF, nil, nil) 2993 nif.Left = nod(OGT, nod(OCONV, nn, nil), nod(OCONV, nod(OCAP, s, nil), nil)) 2994 nif.Left.Left.Type = types.Types[TUINT] 2995 nif.Left.Right.Type = types.Types[TUINT] 2996 2997 // instantiate growslice(Type*, []any, int) []any 2998 fn := syslook("growslice") 2999 fn = substArgTypes(fn, s.Type.Elem(), s.Type.Elem()) 3000 3001 // s = growslice(T, s, n) 3002 nif.Nbody.Set1(nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(s.Type.Elem()), s, nn))) 3003 l = append(l, nif) 3004 3005 // s = s[:n] 3006 nt := nod(OSLICE, s, nil) 3007 nt.SetSliceBounds(nil, nn, nil) 3008 nt.Etype = 1 3009 l = append(l, nod(OAS, s, nt)) 3010 3011 if types.Haspointers(l1.Type.Elem()) { 3012 // copy(s[len(l1):], l2) 3013 nptr1 := nod(OSLICE, s, nil) 3014 nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil) 3015 nptr1.Etype = 1 3016 nptr2 := l2 3017 fn := syslook("typedslicecopy") 3018 fn = substArgTypes(fn, l1.Type, l2.Type) 3019 var ln Nodes 3020 ln.Set(l) 3021 nt := mkcall1(fn, types.Types[TINT], &ln, typename(l1.Type.Elem()), nptr1, nptr2) 3022 l = append(ln.Slice(), nt) 3023 } else if instrumenting && !compiling_runtime { 3024 // rely on runtime to instrument copy. 3025 // copy(s[len(l1):], l2) 3026 nptr1 := nod(OSLICE, s, nil) 3027 nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil) 3028 nptr1.Etype = 1 3029 nptr2 := l2 3030 3031 var ln Nodes 3032 ln.Set(l) 3033 var nt *Node 3034 if l2.Type.IsString() { 3035 fn := syslook("slicestringcopy") 3036 fn = substArgTypes(fn, l1.Type, l2.Type) 3037 nt = mkcall1(fn, types.Types[TINT], &ln, nptr1, nptr2) 3038 } else { 3039 fn := syslook("slicecopy") 3040 fn = substArgTypes(fn, l1.Type, l2.Type) 3041 nt = mkcall1(fn, types.Types[TINT], &ln, nptr1, nptr2, nodintconst(s.Type.Elem().Width)) 3042 } 3043 3044 l = append(ln.Slice(), nt) 3045 } else { 3046 // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) 3047 nptr1 := nod(OINDEX, s, nod(OLEN, l1, nil)) 3048 nptr1.SetBounded(true) 3049 3050 nptr1 = nod(OADDR, nptr1, nil) 3051 3052 nptr2 := nod(OSPTR, l2, nil) 3053 3054 fn := syslook("memmove") 3055 fn = substArgTypes(fn, s.Type.Elem(), s.Type.Elem()) 3056 3057 var ln Nodes 3058 ln.Set(l) 3059 nwid := cheapexpr(conv(nod(OLEN, l2, nil), types.Types[TUINTPTR]), &ln) 3060 3061 nwid = nod(OMUL, nwid, nodintconst(s.Type.Elem().Width)) 3062 nt := mkcall1(fn, nil, &ln, nptr1, nptr2, nwid) 3063 l = append(ln.Slice(), nt) 3064 } 3065 3066 typecheckslice(l, Etop) 3067 walkstmtlist(l) 3068 init.Append(l...) 3069 return s 3070 } 3071 3072 // Rewrite append(src, x, y, z) so that any side effects in 3073 // x, y, z (including runtime panics) are evaluated in 3074 // initialization statements before the append. 3075 // For normal code generation, stop there and leave the 3076 // rest to cgen_append. 3077 // 3078 // For race detector, expand append(src, a [, b]* ) to 3079 // 3080 // init { 3081 // s := src 3082 // const argc = len(args) - 1 3083 // if cap(s) - len(s) < argc { 3084 // s = growslice(s, len(s)+argc) 3085 // } 3086 // n := len(s) 3087 // s = s[:n+argc] 3088 // s[n] = a 3089 // s[n+1] = b 3090 // ... 3091 // } 3092 // s 3093 func walkappend(n *Node, init *Nodes, dst *Node) *Node { 3094 if !samesafeexpr(dst, n.List.First()) { 3095 n.List.SetFirst(safeexpr(n.List.First(), init)) 3096 n.List.SetFirst(walkexpr(n.List.First(), init)) 3097 } 3098 walkexprlistsafe(n.List.Slice()[1:], init) 3099 3100 // walkexprlistsafe will leave OINDEX (s[n]) alone if both s 3101 // and n are name or literal, but those may index the slice we're 3102 // modifying here. Fix explicitly. 3103 // Using cheapexpr also makes sure that the evaluation 3104 // of all arguments (and especially any panics) happen 3105 // before we begin to modify the slice in a visible way. 3106 ls := n.List.Slice()[1:] 3107 for i, n := range ls { 3108 ls[i] = cheapexpr(n, init) 3109 } 3110 3111 nsrc := n.List.First() 3112 3113 argc := n.List.Len() - 1 3114 if argc < 1 { 3115 return nsrc 3116 } 3117 3118 // General case, with no function calls left as arguments. 3119 // Leave for gen, except that instrumentation requires old form. 3120 if !instrumenting || compiling_runtime { 3121 return n 3122 } 3123 3124 var l []*Node 3125 3126 ns := temp(nsrc.Type) 3127 l = append(l, nod(OAS, ns, nsrc)) // s = src 3128 3129 na := nodintconst(int64(argc)) // const argc 3130 nx := nod(OIF, nil, nil) // if cap(s) - len(s) < argc 3131 nx.Left = nod(OLT, nod(OSUB, nod(OCAP, ns, nil), nod(OLEN, ns, nil)), na) 3132 3133 fn := syslook("growslice") // growslice(<type>, old []T, mincap int) (ret []T) 3134 fn = substArgTypes(fn, ns.Type.Elem(), ns.Type.Elem()) 3135 3136 nx.Nbody.Set1(nod(OAS, ns, 3137 mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type.Elem()), ns, 3138 nod(OADD, nod(OLEN, ns, nil), na)))) 3139 3140 l = append(l, nx) 3141 3142 nn := temp(types.Types[TINT]) 3143 l = append(l, nod(OAS, nn, nod(OLEN, ns, nil))) // n = len(s) 3144 3145 nx = nod(OSLICE, ns, nil) // ...s[:n+argc] 3146 nx.SetSliceBounds(nil, nod(OADD, nn, na), nil) 3147 nx.Etype = 1 3148 l = append(l, nod(OAS, ns, nx)) // s = s[:n+argc] 3149 3150 ls = n.List.Slice()[1:] 3151 for i, n := range ls { 3152 nx = nod(OINDEX, ns, nn) // s[n] ... 3153 nx.SetBounded(true) 3154 l = append(l, nod(OAS, nx, n)) // s[n] = arg 3155 if i+1 < len(ls) { 3156 l = append(l, nod(OAS, nn, nod(OADD, nn, nodintconst(1)))) // n = n + 1 3157 } 3158 } 3159 3160 typecheckslice(l, Etop) 3161 walkstmtlist(l) 3162 init.Append(l...) 3163 return ns 3164 } 3165 3166 // Lower copy(a, b) to a memmove call or a runtime call. 3167 // 3168 // init { 3169 // n := len(a) 3170 // if n > len(b) { n = len(b) } 3171 // memmove(a.ptr, b.ptr, n*sizeof(elem(a))) 3172 // } 3173 // n; 3174 // 3175 // Also works if b is a string. 3176 // 3177 func copyany(n *Node, init *Nodes, runtimecall bool) *Node { 3178 if types.Haspointers(n.Left.Type.Elem()) { 3179 fn := writebarrierfn("typedslicecopy", n.Left.Type, n.Right.Type) 3180 return mkcall1(fn, n.Type, init, typename(n.Left.Type.Elem()), n.Left, n.Right) 3181 } 3182 3183 if runtimecall { 3184 if n.Right.Type.IsString() { 3185 fn := syslook("slicestringcopy") 3186 fn = substArgTypes(fn, n.Left.Type, n.Right.Type) 3187 return mkcall1(fn, n.Type, init, n.Left, n.Right) 3188 } 3189 3190 fn := syslook("slicecopy") 3191 fn = substArgTypes(fn, n.Left.Type, n.Right.Type) 3192 return mkcall1(fn, n.Type, init, n.Left, n.Right, nodintconst(n.Left.Type.Elem().Width)) 3193 } 3194 3195 n.Left = walkexpr(n.Left, init) 3196 n.Right = walkexpr(n.Right, init) 3197 nl := temp(n.Left.Type) 3198 nr := temp(n.Right.Type) 3199 var l []*Node 3200 l = append(l, nod(OAS, nl, n.Left)) 3201 l = append(l, nod(OAS, nr, n.Right)) 3202 3203 nfrm := nod(OSPTR, nr, nil) 3204 nto := nod(OSPTR, nl, nil) 3205 3206 nlen := temp(types.Types[TINT]) 3207 3208 // n = len(to) 3209 l = append(l, nod(OAS, nlen, nod(OLEN, nl, nil))) 3210 3211 // if n > len(frm) { n = len(frm) } 3212 nif := nod(OIF, nil, nil) 3213 3214 nif.Left = nod(OGT, nlen, nod(OLEN, nr, nil)) 3215 nif.Nbody.Append(nod(OAS, nlen, nod(OLEN, nr, nil))) 3216 l = append(l, nif) 3217 3218 // Call memmove. 3219 fn := syslook("memmove") 3220 3221 fn = substArgTypes(fn, nl.Type.Elem(), nl.Type.Elem()) 3222 nwid := temp(types.Types[TUINTPTR]) 3223 l = append(l, nod(OAS, nwid, conv(nlen, types.Types[TUINTPTR]))) 3224 nwid = nod(OMUL, nwid, nodintconst(nl.Type.Elem().Width)) 3225 l = append(l, mkcall1(fn, nil, init, nto, nfrm, nwid)) 3226 3227 typecheckslice(l, Etop) 3228 walkstmtlist(l) 3229 init.Append(l...) 3230 return nlen 3231 } 3232 3233 func eqfor(t *types.Type, needsize *int) *Node { 3234 // Should only arrive here with large memory or 3235 // a struct/array containing a non-memory field/element. 3236 // Small memory is handled inline, and single non-memory 3237 // is handled during type check (OCMPSTR etc). 3238 switch a, _ := algtype1(t); a { 3239 case AMEM: 3240 n := syslook("memequal") 3241 n = substArgTypes(n, t, t) 3242 *needsize = 1 3243 return n 3244 case ASPECIAL: 3245 sym := typesymprefix(".eq", t) 3246 n := newname(sym) 3247 n.SetClass(PFUNC) 3248 ntype := nod(OTFUNC, nil, nil) 3249 ntype.List.Append(anonfield(types.NewPtr(t))) 3250 ntype.List.Append(anonfield(types.NewPtr(t))) 3251 ntype.Rlist.Append(anonfield(types.Types[TBOOL])) 3252 ntype = typecheck(ntype, Etype) 3253 n.Type = ntype.Type 3254 *needsize = 0 3255 return n 3256 } 3257 Fatalf("eqfor %v", t) 3258 return nil 3259 } 3260 3261 // The result of walkcompare MUST be assigned back to n, e.g. 3262 // n.Left = walkcompare(n.Left, init) 3263 func walkcompare(n *Node, init *Nodes) *Node { 3264 // Given interface value l and concrete value r, rewrite 3265 // l == r 3266 // into types-equal && data-equal. 3267 // This is efficient, avoids allocations, and avoids runtime calls. 3268 var l, r *Node 3269 if n.Left.Type.IsInterface() && !n.Right.Type.IsInterface() { 3270 l = n.Left 3271 r = n.Right 3272 } else if !n.Left.Type.IsInterface() && n.Right.Type.IsInterface() { 3273 l = n.Right 3274 r = n.Left 3275 } 3276 3277 if l != nil { 3278 // Handle both == and !=. 3279 eq := n.Op 3280 var andor Op 3281 if eq == OEQ { 3282 andor = OANDAND 3283 } else { 3284 andor = OOROR 3285 } 3286 // Check for types equal. 3287 // For empty interface, this is: 3288 // l.tab == type(r) 3289 // For non-empty interface, this is: 3290 // l.tab != nil && l.tab._type == type(r) 3291 var eqtype *Node 3292 tab := nod(OITAB, l, nil) 3293 rtyp := typename(r.Type) 3294 if l.Type.IsEmptyInterface() { 3295 tab.Type = types.NewPtr(types.Types[TUINT8]) 3296 tab.SetTypecheck(1) 3297 eqtype = nod(eq, tab, rtyp) 3298 } else { 3299 nonnil := nod(brcom(eq), nodnil(), tab) 3300 match := nod(eq, itabType(tab), rtyp) 3301 eqtype = nod(andor, nonnil, match) 3302 } 3303 // Check for data equal. 3304 eqdata := nod(eq, ifaceData(l, r.Type), r) 3305 // Put it all together. 3306 expr := nod(andor, eqtype, eqdata) 3307 n = finishcompare(n, expr, init) 3308 return n 3309 } 3310 3311 // Must be comparison of array or struct. 3312 // Otherwise back end handles it. 3313 // While we're here, decide whether to 3314 // inline or call an eq alg. 3315 t := n.Left.Type 3316 var inline bool 3317 3318 maxcmpsize := int64(4) 3319 unalignedLoad := false 3320 switch thearch.LinkArch.Family { 3321 case sys.AMD64, sys.ARM64, sys.S390X: 3322 // Keep this low enough, to generate less code than function call. 3323 maxcmpsize = 16 3324 unalignedLoad = true 3325 case sys.I386: 3326 maxcmpsize = 8 3327 unalignedLoad = true 3328 } 3329 3330 switch t.Etype { 3331 default: 3332 return n 3333 case TARRAY: 3334 // We can compare several elements at once with 2/4/8 byte integer compares 3335 inline = t.NumElem() <= 1 || (issimple[t.Elem().Etype] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize)) 3336 case TSTRUCT: 3337 inline = t.NumFields() <= 4 3338 } 3339 3340 cmpl := n.Left 3341 for cmpl != nil && cmpl.Op == OCONVNOP { 3342 cmpl = cmpl.Left 3343 } 3344 cmpr := n.Right 3345 for cmpr != nil && cmpr.Op == OCONVNOP { 3346 cmpr = cmpr.Left 3347 } 3348 3349 // Chose not to inline. Call equality function directly. 3350 if !inline { 3351 if isvaluelit(cmpl) { 3352 var_ := temp(cmpl.Type) 3353 anylit(cmpl, var_, init) 3354 cmpl = var_ 3355 } 3356 if isvaluelit(cmpr) { 3357 var_ := temp(cmpr.Type) 3358 anylit(cmpr, var_, init) 3359 cmpr = var_ 3360 } 3361 if !islvalue(cmpl) || !islvalue(cmpr) { 3362 Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr) 3363 } 3364 3365 // eq algs take pointers 3366 pl := temp(types.NewPtr(t)) 3367 al := nod(OAS, pl, nod(OADDR, cmpl, nil)) 3368 al.Right.Etype = 1 // addr does not escape 3369 al = typecheck(al, Etop) 3370 init.Append(al) 3371 3372 pr := temp(types.NewPtr(t)) 3373 ar := nod(OAS, pr, nod(OADDR, cmpr, nil)) 3374 ar.Right.Etype = 1 // addr does not escape 3375 ar = typecheck(ar, Etop) 3376 init.Append(ar) 3377 3378 var needsize int 3379 call := nod(OCALL, eqfor(t, &needsize), nil) 3380 call.List.Append(pl) 3381 call.List.Append(pr) 3382 if needsize != 0 { 3383 call.List.Append(nodintconst(t.Width)) 3384 } 3385 res := call 3386 if n.Op != OEQ { 3387 res = nod(ONOT, res, nil) 3388 } 3389 n = finishcompare(n, res, init) 3390 return n 3391 } 3392 3393 // inline: build boolean expression comparing element by element 3394 andor := OANDAND 3395 if n.Op == ONE { 3396 andor = OOROR 3397 } 3398 var expr *Node 3399 compare := func(el, er *Node) { 3400 a := nod(n.Op, el, er) 3401 if expr == nil { 3402 expr = a 3403 } else { 3404 expr = nod(andor, expr, a) 3405 } 3406 } 3407 cmpl = safeexpr(cmpl, init) 3408 cmpr = safeexpr(cmpr, init) 3409 if t.IsStruct() { 3410 for _, f := range t.Fields().Slice() { 3411 sym := f.Sym 3412 if sym.IsBlank() { 3413 continue 3414 } 3415 compare( 3416 nodSym(OXDOT, cmpl, sym), 3417 nodSym(OXDOT, cmpr, sym), 3418 ) 3419 } 3420 } else { 3421 step := int64(1) 3422 remains := t.NumElem() * t.Elem().Width 3423 combine64bit := unalignedLoad && Widthreg == 8 && t.Elem().Width <= 4 && t.Elem().IsInteger() 3424 combine32bit := unalignedLoad && t.Elem().Width <= 2 && t.Elem().IsInteger() 3425 combine16bit := unalignedLoad && t.Elem().Width == 1 && t.Elem().IsInteger() 3426 for i := int64(0); remains > 0; { 3427 var convType *types.Type 3428 switch { 3429 case remains >= 8 && combine64bit: 3430 convType = types.Types[TINT64] 3431 step = 8 / t.Elem().Width 3432 case remains >= 4 && combine32bit: 3433 convType = types.Types[TUINT32] 3434 step = 4 / t.Elem().Width 3435 case remains >= 2 && combine16bit: 3436 convType = types.Types[TUINT16] 3437 step = 2 / t.Elem().Width 3438 default: 3439 step = 1 3440 } 3441 if step == 1 { 3442 compare( 3443 nod(OINDEX, cmpl, nodintconst(int64(i))), 3444 nod(OINDEX, cmpr, nodintconst(int64(i))), 3445 ) 3446 i++ 3447 remains -= t.Elem().Width 3448 } else { 3449 cmplw := nod(OINDEX, cmpl, nodintconst(int64(i))) 3450 cmplw = conv(cmplw, convType) 3451 cmprw := nod(OINDEX, cmpr, nodintconst(int64(i))) 3452 cmprw = conv(cmprw, convType) 3453 // For code like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ... 3454 // ssa will generate a single large load. 3455 for offset := int64(1); offset < step; offset++ { 3456 lb := nod(OINDEX, cmpl, nodintconst(int64(i+offset))) 3457 lb = conv(lb, convType) 3458 lb = nod(OLSH, lb, nodintconst(int64(8*t.Elem().Width*offset))) 3459 cmplw = nod(OOR, cmplw, lb) 3460 rb := nod(OINDEX, cmpr, nodintconst(int64(i+offset))) 3461 rb = conv(rb, convType) 3462 rb = nod(OLSH, rb, nodintconst(int64(8*t.Elem().Width*offset))) 3463 cmprw = nod(OOR, cmprw, rb) 3464 } 3465 compare(cmplw, cmprw) 3466 i += step 3467 remains -= step * t.Elem().Width 3468 } 3469 } 3470 } 3471 if expr == nil { 3472 expr = nodbool(n.Op == OEQ) 3473 } 3474 n = finishcompare(n, expr, init) 3475 return n 3476 } 3477 3478 // The result of finishcompare MUST be assigned back to n, e.g. 3479 // n.Left = finishcompare(n.Left, x, r, init) 3480 func finishcompare(n, r *Node, init *Nodes) *Node { 3481 // Use nn here to avoid passing r to typecheck. 3482 nn := r 3483 nn = typecheck(nn, Erv) 3484 nn = walkexpr(nn, init) 3485 r = nn 3486 if r.Type != n.Type { 3487 r = nod(OCONVNOP, r, nil) 3488 r.Type = n.Type 3489 r.SetTypecheck(1) 3490 nn = r 3491 } 3492 return nn 3493 } 3494 3495 // isIntOrdering reports whether n is a <, ≤, >, or ≥ ordering between integers. 3496 func (n *Node) isIntOrdering() bool { 3497 switch n.Op { 3498 case OLE, OLT, OGE, OGT: 3499 default: 3500 return false 3501 } 3502 return n.Left.Type.IsInteger() && n.Right.Type.IsInteger() 3503 } 3504 3505 // walkinrange optimizes integer-in-range checks, such as 4 <= x && x < 10. 3506 // n must be an OANDAND or OOROR node. 3507 // The result of walkinrange MUST be assigned back to n, e.g. 3508 // n.Left = walkinrange(n.Left) 3509 func walkinrange(n *Node, init *Nodes) *Node { 3510 // We are looking for something equivalent to a opl b OP b opr c, where: 3511 // * a, b, and c have integer type 3512 // * b is side-effect-free 3513 // * opl and opr are each < or ≤ 3514 // * OP is && 3515 l := n.Left 3516 r := n.Right 3517 if !l.isIntOrdering() || !r.isIntOrdering() { 3518 return n 3519 } 3520 3521 // Find b, if it exists, and rename appropriately. 3522 // Input is: l.Left l.Op l.Right ANDAND/OROR r.Left r.Op r.Right 3523 // Output is: a opl b(==x) ANDAND/OROR b(==x) opr c 3524 a, opl, b := l.Left, l.Op, l.Right 3525 x, opr, c := r.Left, r.Op, r.Right 3526 for i := 0; ; i++ { 3527 if samesafeexpr(b, x) { 3528 break 3529 } 3530 if i == 3 { 3531 // Tried all permutations and couldn't find an appropriate b == x. 3532 return n 3533 } 3534 if i&1 == 0 { 3535 a, opl, b = b, brrev(opl), a 3536 } else { 3537 x, opr, c = c, brrev(opr), x 3538 } 3539 } 3540 3541 // If n.Op is ||, apply de Morgan. 3542 // Negate the internal ops now; we'll negate the top level op at the end. 3543 // Henceforth assume &&. 3544 negateResult := n.Op == OOROR 3545 if negateResult { 3546 opl = brcom(opl) 3547 opr = brcom(opr) 3548 } 3549 3550 cmpdir := func(o Op) int { 3551 switch o { 3552 case OLE, OLT: 3553 return -1 3554 case OGE, OGT: 3555 return +1 3556 } 3557 Fatalf("walkinrange cmpdir %v", o) 3558 return 0 3559 } 3560 if cmpdir(opl) != cmpdir(opr) { 3561 // Not a range check; something like b < a && b < c. 3562 return n 3563 } 3564 3565 switch opl { 3566 case OGE, OGT: 3567 // We have something like a > b && b ≥ c. 3568 // Switch and reverse ops and rename constants, 3569 // to make it look like a ≤ b && b < c. 3570 a, c = c, a 3571 opl, opr = brrev(opr), brrev(opl) 3572 } 3573 3574 // We must ensure that c-a is non-negative. 3575 // For now, require a and c to be constants. 3576 // In the future, we could also support a == 0 and c == len/cap(...). 3577 // Unfortunately, by this point, most len/cap expressions have been 3578 // stored into temporary variables. 3579 if !Isconst(a, CTINT) || !Isconst(c, CTINT) { 3580 return n 3581 } 3582 3583 if opl == OLT { 3584 // We have a < b && ... 3585 // We need a ≤ b && ... to safely use unsigned comparison tricks. 3586 // If a is not the maximum constant for b's type, 3587 // we can increment a and switch to ≤. 3588 if a.Int64() >= maxintval[b.Type.Etype].Int64() { 3589 return n 3590 } 3591 a = nodintconst(a.Int64() + 1) 3592 opl = OLE 3593 } 3594 3595 bound := c.Int64() - a.Int64() 3596 if bound < 0 { 3597 // Bad news. Something like 5 <= x && x < 3. 3598 // Rare in practice, and we still need to generate side-effects, 3599 // so just leave it alone. 3600 return n 3601 } 3602 3603 // We have a ≤ b && b < c (or a ≤ b && b ≤ c). 3604 // This is equivalent to (a-a) ≤ (b-a) && (b-a) < (c-a), 3605 // which is equivalent to 0 ≤ (b-a) && (b-a) < (c-a), 3606 // which is equivalent to uint(b-a) < uint(c-a). 3607 ut := b.Type.ToUnsigned() 3608 lhs := conv(nod(OSUB, b, a), ut) 3609 rhs := nodintconst(bound) 3610 if negateResult { 3611 // Negate top level. 3612 opr = brcom(opr) 3613 } 3614 cmp := nod(opr, lhs, rhs) 3615 cmp.Pos = n.Pos 3616 cmp = addinit(cmp, l.Ninit.Slice()) 3617 cmp = addinit(cmp, r.Ninit.Slice()) 3618 // Typecheck the AST rooted at cmp... 3619 cmp = typecheck(cmp, Erv) 3620 // ...but then reset cmp's type to match n's type. 3621 cmp.Type = n.Type 3622 cmp = walkexpr(cmp, init) 3623 return cmp 3624 } 3625 3626 // return 1 if integer n must be in range [0, max), 0 otherwise 3627 func bounded(n *Node, max int64) bool { 3628 if n.Type == nil || !n.Type.IsInteger() { 3629 return false 3630 } 3631 3632 sign := n.Type.IsSigned() 3633 bits := int32(8 * n.Type.Width) 3634 3635 if smallintconst(n) { 3636 v := n.Int64() 3637 return 0 <= v && v < max 3638 } 3639 3640 switch n.Op { 3641 case OAND: 3642 v := int64(-1) 3643 if smallintconst(n.Left) { 3644 v = n.Left.Int64() 3645 } else if smallintconst(n.Right) { 3646 v = n.Right.Int64() 3647 } 3648 3649 if 0 <= v && v < max { 3650 return true 3651 } 3652 3653 case OMOD: 3654 if !sign && smallintconst(n.Right) { 3655 v := n.Right.Int64() 3656 if 0 <= v && v <= max { 3657 return true 3658 } 3659 } 3660 3661 case ODIV: 3662 if !sign && smallintconst(n.Right) { 3663 v := n.Right.Int64() 3664 for bits > 0 && v >= 2 { 3665 bits-- 3666 v >>= 1 3667 } 3668 } 3669 3670 case ORSH: 3671 if !sign && smallintconst(n.Right) { 3672 v := n.Right.Int64() 3673 if v > int64(bits) { 3674 return true 3675 } 3676 bits -= int32(v) 3677 } 3678 } 3679 3680 if !sign && bits <= 62 && 1<<uint(bits) <= max { 3681 return true 3682 } 3683 3684 return false 3685 } 3686 3687 // usemethod checks interface method calls for uses of reflect.Type.Method. 3688 func usemethod(n *Node) { 3689 t := n.Left.Type 3690 3691 // Looking for either of: 3692 // Method(int) reflect.Method 3693 // MethodByName(string) (reflect.Method, bool) 3694 // 3695 // TODO(crawshaw): improve precision of match by working out 3696 // how to check the method name. 3697 if n := t.NumParams(); n != 1 { 3698 return 3699 } 3700 if n := t.NumResults(); n != 1 && n != 2 { 3701 return 3702 } 3703 p0 := t.Params().Field(0) 3704 res0 := t.Results().Field(0) 3705 var res1 *types.Field 3706 if t.NumResults() == 2 { 3707 res1 = t.Results().Field(1) 3708 } 3709 3710 if res1 == nil { 3711 if p0.Type.Etype != TINT { 3712 return 3713 } 3714 } else { 3715 if !p0.Type.IsString() { 3716 return 3717 } 3718 if !res1.Type.IsBoolean() { 3719 return 3720 } 3721 } 3722 3723 // Note: Don't rely on res0.Type.String() since its formatting depends on multiple factors 3724 // (including global variables such as numImports - was issue #19028). 3725 if s := res0.Type.Sym; s != nil && s.Name == "Method" && s.Pkg != nil && s.Pkg.Path == "reflect" { 3726 Curfn.Func.SetReflectMethod(true) 3727 } 3728 } 3729 3730 func usefield(n *Node) { 3731 if objabi.Fieldtrack_enabled == 0 { 3732 return 3733 } 3734 3735 switch n.Op { 3736 default: 3737 Fatalf("usefield %v", n.Op) 3738 3739 case ODOT, ODOTPTR: 3740 break 3741 } 3742 if n.Sym == nil { 3743 // No field name. This DOTPTR was built by the compiler for access 3744 // to runtime data structures. Ignore. 3745 return 3746 } 3747 3748 t := n.Left.Type 3749 if t.IsPtr() { 3750 t = t.Elem() 3751 } 3752 field := dotField[typeSymKey{t.Orig, n.Sym}] 3753 if field == nil { 3754 Fatalf("usefield %v %v without paramfld", n.Left.Type, n.Sym) 3755 } 3756 if !strings.Contains(field.Note, "go:\"track\"") { 3757 return 3758 } 3759 3760 outer := n.Left.Type 3761 if outer.IsPtr() { 3762 outer = outer.Elem() 3763 } 3764 if outer.Sym == nil { 3765 yyerror("tracked field must be in named struct type") 3766 } 3767 if !exportname(field.Sym.Name) { 3768 yyerror("tracked field must be exported (upper case)") 3769 } 3770 3771 sym := tracksym(outer, field) 3772 if Curfn.Func.FieldTrack == nil { 3773 Curfn.Func.FieldTrack = make(map[*types.Sym]struct{}) 3774 } 3775 Curfn.Func.FieldTrack[sym] = struct{}{} 3776 } 3777 3778 func candiscardlist(l Nodes) bool { 3779 for _, n := range l.Slice() { 3780 if !candiscard(n) { 3781 return false 3782 } 3783 } 3784 return true 3785 } 3786 3787 func candiscard(n *Node) bool { 3788 if n == nil { 3789 return true 3790 } 3791 3792 switch n.Op { 3793 default: 3794 return false 3795 3796 // Discardable as long as the subpieces are. 3797 case ONAME, 3798 ONONAME, 3799 OTYPE, 3800 OPACK, 3801 OLITERAL, 3802 OADD, 3803 OSUB, 3804 OOR, 3805 OXOR, 3806 OADDSTR, 3807 OADDR, 3808 OANDAND, 3809 OARRAYBYTESTR, 3810 OARRAYRUNESTR, 3811 OSTRARRAYBYTE, 3812 OSTRARRAYRUNE, 3813 OCAP, 3814 OCMPIFACE, 3815 OCMPSTR, 3816 OCOMPLIT, 3817 OMAPLIT, 3818 OSTRUCTLIT, 3819 OARRAYLIT, 3820 OSLICELIT, 3821 OPTRLIT, 3822 OCONV, 3823 OCONVIFACE, 3824 OCONVNOP, 3825 ODOT, 3826 OEQ, 3827 ONE, 3828 OLT, 3829 OLE, 3830 OGT, 3831 OGE, 3832 OKEY, 3833 OSTRUCTKEY, 3834 OLEN, 3835 OMUL, 3836 OLSH, 3837 ORSH, 3838 OAND, 3839 OANDNOT, 3840 ONEW, 3841 ONOT, 3842 OCOM, 3843 OPLUS, 3844 OMINUS, 3845 OOROR, 3846 OPAREN, 3847 ORUNESTR, 3848 OREAL, 3849 OIMAG, 3850 OCOMPLEX: 3851 break 3852 3853 // Discardable as long as we know it's not division by zero. 3854 case ODIV, OMOD: 3855 if Isconst(n.Right, CTINT) && n.Right.Val().U.(*Mpint).CmpInt64(0) != 0 { 3856 break 3857 } 3858 if Isconst(n.Right, CTFLT) && n.Right.Val().U.(*Mpflt).CmpFloat64(0) != 0 { 3859 break 3860 } 3861 return false 3862 3863 // Discardable as long as we know it won't fail because of a bad size. 3864 case OMAKECHAN, OMAKEMAP: 3865 if Isconst(n.Left, CTINT) && n.Left.Val().U.(*Mpint).CmpInt64(0) == 0 { 3866 break 3867 } 3868 return false 3869 3870 // Difficult to tell what sizes are okay. 3871 case OMAKESLICE: 3872 return false 3873 } 3874 3875 if !candiscard(n.Left) || !candiscard(n.Right) || !candiscardlist(n.Ninit) || !candiscardlist(n.Nbody) || !candiscardlist(n.List) || !candiscardlist(n.Rlist) { 3876 return false 3877 } 3878 3879 return true 3880 } 3881 3882 // rewrite 3883 // print(x, y, z) 3884 // into 3885 // func(a1, a2, a3) { 3886 // print(a1, a2, a3) 3887 // }(x, y, z) 3888 // and same for println. 3889 3890 var walkprintfunc_prgen int 3891 3892 // The result of walkprintfunc MUST be assigned back to n, e.g. 3893 // n.Left = walkprintfunc(n.Left, init) 3894 func walkprintfunc(n *Node, init *Nodes) *Node { 3895 if n.Ninit.Len() != 0 { 3896 walkstmtlist(n.Ninit.Slice()) 3897 init.AppendNodes(&n.Ninit) 3898 } 3899 3900 t := nod(OTFUNC, nil, nil) 3901 num := 0 3902 var printargs []*Node 3903 for _, n1 := range n.List.Slice() { 3904 buf := fmt.Sprintf("a%d", num) 3905 num++ 3906 a := namedfield(buf, n1.Type) 3907 t.List.Append(a) 3908 printargs = append(printargs, a.Left) 3909 } 3910 3911 oldfn := Curfn 3912 Curfn = nil 3913 3914 walkprintfunc_prgen++ 3915 sym := lookupN("print·%d", walkprintfunc_prgen) 3916 fn := dclfunc(sym, t) 3917 3918 a := nod(n.Op, nil, nil) 3919 a.List.Set(printargs) 3920 a = typecheck(a, Etop) 3921 a = walkstmt(a) 3922 3923 fn.Nbody.Set1(a) 3924 3925 funcbody() 3926 3927 fn = typecheck(fn, Etop) 3928 typecheckslice(fn.Nbody.Slice(), Etop) 3929 xtop = append(xtop, fn) 3930 Curfn = oldfn 3931 3932 a = nod(OCALL, nil, nil) 3933 a.Left = fn.Func.Nname 3934 a.List.Set(n.List.Slice()) 3935 a = typecheck(a, Etop) 3936 a = walkexpr(a, init) 3937 return a 3938 } 3939 3940 // substArgTypes substitutes the given list of types for 3941 // successive occurrences of the "any" placeholder in the 3942 // type syntax expression n.Type. 3943 // The result of substArgTypes MUST be assigned back to old, e.g. 3944 // n.Left = substArgTypes(n.Left, t1, t2) 3945 func substArgTypes(old *Node, types_ ...*types.Type) *Node { 3946 n := *old // make shallow copy 3947 3948 for _, t := range types_ { 3949 dowidth(t) 3950 } 3951 n.Type = types.SubstAny(n.Type, &types_) 3952 if len(types_) > 0 { 3953 Fatalf("substArgTypes: too many argument types") 3954 } 3955 return &n 3956 }